serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
10,901 | __device__ int getBlockId()
{
return blockIdx.x
+ blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
}
__device__ int getThreadId()
{
return (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
}
__device__ int getGlobalId()
{
int blockId = getBlockId();
int threadId = getThreadId();
return blockId * (blockDim.x * blockDim.y * blockDim.z) + threadId;
}
__device__ int index(int partid, int comp)
{
return 3*partid + comp;
}
|
10,902 | #include "includes.h"
__global__ void labeling(const char *text, int *pos, int text_size){
int index = threadIdx.x*blockDim.y+threadIdx.y + blockDim.x*blockDim.y*(gridDim.y*blockIdx.x + blockIdx.y);
if (index >= text_size) {
return;
}
pos[index] = 0;
if (text[index] <= ' ')
return ;
for (int k = index; k >= 0; k--) {
if (text[k] <= ' ') {
pos[index] = index - k;
return;
}
}
pos[index] = index+1;
} |
10,903 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,int var_4,int var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22) {
if (comp < var_1 * (-0.0f - -1.0488E-41f)) {
if (comp >= (+1.6809E-44f / atanf(+0.0f + (var_2 + var_3 + -1.9413E35f)))) {
float tmp_1 = var_6 / var_7 * logf(floorf(-1.8625E-14f));
float tmp_2 = -1.4014E-35f;
comp += tmp_2 / tmp_1 + (+1.1014E14f - var_8 - -1.0898E-43f);
for (int i=0; i < var_4; ++i) {
comp = var_9 + powf((+1.8927E36f * (var_10 * coshf(var_11 / (var_12 - -1.2219E-44f)))), var_13 + (+1.1021E13f / expf((-1.4460E-35f + -0.0f))));
}
for (int i=0; i < var_5; ++i) {
comp += (-1.4263E-41f / +1.4689E-37f * -1.7224E26f);
comp = var_14 * (+0.0f / atan2f(+1.8768E-42f, sinf(acosf(+0.0f))));
}
if (comp <= var_15 / var_16 + (-1.8000E-44f + +1.3862E-35f)) {
float tmp_3 = +0.0f;
comp += tmp_3 * var_17 / (-1.1685E-1f * -1.1878E35f + +1.0889E34f);
float tmp_4 = -1.3936E36f;
comp = tmp_4 * fmodf(var_18 / (-1.4626E-11f * log10f(-0.0f)), var_19 - (var_20 - -1.9467E-26f * (var_21 - (var_22 * +1.4935E35f))));
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
int tmp_5 = atoi(argv[5]);
int tmp_6 = atoi(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23);
cudaDeviceSynchronize();
return 0;
}
|
10,904 | /*
* Copyright (c) 2018 Preferred Networks, Inc. All rights reserved.
*/
#include <cuda_fp16.h>
namespace chainer_trt {
namespace plugin {
template <typename T>
__global__ void slice_kernel(const T* src_gpu, T* dest_gpu,
int* mapping_gpu, int n_src, int n_dst) {
const int dst_idx = blockIdx.x * blockDim.x + threadIdx.x;
if(n_dst <= dst_idx)
return;
const int src_idx = mapping_gpu[dst_idx];
dest_gpu[blockIdx.y * n_dst + dst_idx] =
src_gpu[blockIdx.y * n_src + src_idx]; // blockIdx.y is batch idx
}
template <typename T>
void apply_slice(const T* src_gpu, T* dest_gpu, int* mapping_gpu, int n_src,
int n_dst, int batch_size, cudaStream_t stream) {
const int block_size = 1024;
const int grid_size = (int)std::ceil(1.0 * n_dst / block_size);
dim3 grid(grid_size, batch_size);
slice_kernel<T><<<grid, block_size, 0, stream>>>(
src_gpu, dest_gpu, mapping_gpu, n_src, n_dst);
}
template void apply_slice(const float*, float*, int*, int, int, int,
cudaStream_t);
template void apply_slice(const __half*, __half*, int*, int, int, int,
cudaStream_t);
}
}
|
10,905 | #include "includes.h"
/* This file is copied from https://github.com/jzbonter/mc-cnn */
extern "C" {
}
#define TB 128
#define DISP_MAX 256
__global__ void remove_white(float *x, float *y, int size)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size) {
if (x[id] == 255) {
y[id] = 0;
}
}
} |
10,906 | extern "C" __global__ void sum ( const float *A
, const float *B
, float *C
, int N
)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<N) {
C[i] = A[i] + B[i];
}
}
|
10,907 | #include<stdio.h>
#include <stdlib.h>
#include<malloc.h>
#include <time.h>
#include<cuda.h>
typedef char* string;
__global__
void multGPU(float* A, int rowsA, int colsA, float* B, int rowsB, int colsB, float* C){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if((col < colsB)&&(row < rowsA)) {
for(int M = 0; M < rowsB; M++) {
C[row * colsB + col]+= A[row * colsA + M] * B[M * colsB + col];
}
}
}
__host__
void multCPU(float* A, int rowsA, int colsA, float* B, int rowsB, int colsB, float* C){
int i, j;
for(i = 0; i < rowsA; i++){
for(j = 0; j< colsB; j++){
for(int M = 0; M < rowsB; M++){
C[i * colsB + j] += A[i * colsA + M] * B[ M * colsB + j];
}
}
}
}
__host__
bool compare(float *A, float *B, int rows, int cols){
int i, j;
for(i = 0; i < rows; i++) {
for(j = 0; j < cols; j++) {
if (A[ i * cols + j] != B[i * cols + j]) return false;
}
}
return true;
}
__host__
void load(float *M, FILE *stream, int rows, int cols) {
int i, j;
for(i = 0; i < rows; i++) {
for(j = 0; j < cols; j++) {
fscanf(stream, "%f,", &M[i * cols + j]);
}
}
fclose(stream);
}
__host__
void save(float *M, int rows, int cols, string file_name) {
FILE *stream;
int i, j;
stream = fopen(file_name, "w");
fprintf(stream, "%d\n", rows);
fprintf(stream, "%d\n", cols);
for(i = 0; i < rows; i++) {
for(j = 0; j < cols; j++) {
if (j + 1 == cols) fprintf(stream, "%.2f", M[i * cols + j]);
else fprintf(stream, "%.2f,", M[i * cols + j]);
}
fprintf(stream, "%s\n","");
}
fclose(stream);
}
__host__
void print(float* M, int rows, int cols){
printf("---------------print matrix--------------\n");
for(int i = 0; i < rows; i++) {
for(int j = 0; j < cols; j++) {
printf("%f ", M[i * cols + j]);
}
printf("\n");
}
}
int main(int argc, char** argv){
if (argc != 3) {
printf("Must be called with the names of the files\n");
return 1;
}
//-------------------------------CPU--------------------------------------
time_t time_start, time_end;
float *A, *B, *C;
int rowsA, colsA, rowsB, colsB;
double timeCPU, timeGPU;
FILE *arc1, *arc2;
arc1 = fopen(argv[1], "r");
arc2 = fopen(argv[2], "r");
fscanf(arc1, "%d", &rowsA);
fscanf(arc1, "%d", &colsA);
fscanf(arc2, "%d", &rowsB);
fscanf(arc2, "%d", &colsB);
//RESERVA MEMORIA EN CPU
A = (float*)malloc(rowsA * colsA * sizeof(float));
B = (float*)malloc(rowsB * colsB * sizeof(float));
C = (float*)malloc(rowsA * colsB * sizeof(float));
load(A, arc1, rowsA, colsA);
// printf("rowsA: %d\n", rowsA);
// printf("colsA: %d\n", colsA);
// print(A, rowsA, colsA);
load(B, arc2, rowsB, colsB);
// printf("rowsA: %d\n", rowsB);
// printf("colsA: %d\n", colsB);
// print(B, rowsB, colsB);
if (colsA != rowsB) return 1; // tiene que ser iguales filas M2 y col M1
time_start = clock();
multCPU(A, rowsA, colsA, B, rowsB, colsB, C);
time_end = clock();
// print(C, rowsA, colsB);
timeCPU = difftime(time_end, time_start);
printf ("Elasped time in CPU: %.2lf seconds.\n", timeCPU);
// save(C, rowsA, colsB, "CPU.out");
//-------------------------------GPU--------------------------------------
cudaError_t error = cudaSuccess;
float *d_A, *d_B, *d_C, *h_C;
h_C = (float*)malloc(rowsA * colsB * sizeof(float));
error = cudaMalloc((void**)&d_A, rowsA * colsA * sizeof(float));
if (error != cudaSuccess) {
printf("Error al asignar memoria a d_A");
return 1;
}
error = cudaMalloc((void**)&d_B, rowsB * colsB * sizeof(float));
if (error != cudaSuccess) {
printf("Error al asignar memoria a d_B");
return 1;
}
error = cudaMalloc((void**)&d_C, rowsA * colsB * sizeof(float));
if (error != cudaSuccess) {
printf("Error al asignar memoria a d_C");
return 1;
}
cudaMemcpy(d_A, A, rowsA * colsA * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, rowsB * colsB * sizeof(float), cudaMemcpyHostToDevice);
int blockSize = 32;
dim3 dimblock(blockSize, blockSize, 1);
dim3 dimGrid(ceil((colsB) / float(blockSize)), ceil((rowsA) / float(blockSize)), 1);
time_start = clock();
multGPU<<<dimGrid,dimblock>>>(d_A, rowsA, colsA, d_B, rowsB, colsB, d_C);
cudaDeviceSynchronize();
time_end = clock();
timeGPU = difftime(time_end, time_start);
printf ("Tiempo trasncurrido en GPU: %.2lf seconds.\n", timeGPU);
cudaMemcpy(h_C, d_C, rowsA * colsB * sizeof(float), cudaMemcpyDeviceToHost);
// print(h_C, rowsA, colsB);
if (!compare(h_C, C, rowsA, colsB)) {
printf("Error al multiplicar\n");
} else {
printf("tiempo acelerado: %lf\n", timeCPU / timeGPU);
// save(h_C, rowsA, colsB, "GPU.out");
}
free(A); free(B); free(C); free(h_C);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
return 0;
}
|
10,908 | #include "includes.h"
extern "C" {
}
__global__ void cross_entropy_backward(const float* x, float* dx, const float* t, float* dy, unsigned int len) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < len) {
dx[tid] = dy[0] * (x[tid] - t[tid]);
}
} |
10,909 | #include <string>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define AND 0
#define OR 1
#define NAND 2
#define NOR 3
#define XOR 4
#define NXOR 5
/*
* Evaluates logical expression.
*/
char evaluateLogicGate(int ops[]) {
int result=0;
switch (ops[2]) {
case NOR:
result = !(ops[0] | ops[1]);
break;
case XOR:
result = ops[0] ^ ops[1];
break;
case NAND:
result = !(ops[0] & ops[1]);
break;
case AND:
result = ops[0] & ops[1];
break;
case OR:
result = ops[0] | ops[1];
break;
case NXOR:
result = !(ops[0] ^ ops[1]);
break;
}
char output = result + '0';
return output;
}
/*
* Sequential computation of list of logical expressions.
* Takes input file, file length and output file name as arguments.
*/
int main(int argc, char* argv[]) {
if (argc != 4) {
printf("Error: Please enter the input file path, input file length and output file path when running.\n");
return 1;
}
char line[10];
char* inputFileName = argv[1];
int inputLength = atoi(argv[2]);
char* outputFileName = argv[3];
FILE* inputFile = fopen(inputFileName, "r");
FILE* outputFile = fopen(outputFileName, "w");
if (inputFile == NULL) {
fprintf(stderr, "Error opening file.\n");
return 1;
}
clock_t start = clock();
int operands[3] = { 0 };
char result = NULL;
while (fgets(line, 10, inputFile)) {
operands[0] = atoi(line);
operands[1] = atoi(line + 2);
operands[2] = atoi(line + 4);
result = evaluateLogicGate(operands);
fprintf(outputFile, "%c\n", result);
}
clock_t end = clock();
int duration = ((end - start) * 1000) / CLOCKS_PER_SEC;
fclose(outputFile);
fclose(inputFile);
printf("Completed\nTime: %ds %dms\n", duration / 1000, duration % 1000);
return 0;
} |
10,910 |
#include <cstdio>
#include <cstdlib>
#define DTYPE unsigned long long
#define ull unsigned long long
/*** CUDA API error checking ***/
#define cudaErrChk(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"CUDA assert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/****************************************************************
*** Kernel mode : 4
*** Blocked shared half unrolled reduction
****************************************************************/
/*** Kernel program ***/
template <size_t blockSize>
__global__ void reduction_blocked_shared_half_unrolled (DTYPE* d_data, DTYPE* d_out, ull remain) {
ull tidx = threadIdx.x;
ull idx = blockIdx.x * (2*blockSize) + threadIdx.x;
extern __shared__ DTYPE smem[];
if (idx < remain) {
if (idx+blockSize<remain)
smem[tidx] = d_data[idx]+d_data[idx+blockSize];
else
smem[tidx] = d_data[idx];
}
__syncthreads();
#pragma unroll
for (ull s=blockSize>>1; s>0; s>>=1) {
if (tidx<s && idx+s<remain) {
smem[tidx]+=smem[tidx+s];
}
__syncthreads();
}
if (tidx == 0) {
d_out[blockIdx.x] = smem[tidx];
}
}
/*** Host program ***/
void run_kernel_blocked_shared_half_unrolled (DTYPE* d_data, const ull num_data) {
DTYPE* d_out;
cudaErrChk (cudaMalloc ((void**)&d_out, sizeof(DTYPE)*num_data));
ull remain=num_data, next=0;
dim3 threads (128);
const size_t size_smem = sizeof (DTYPE) * threads.x;
while (remain > 1) {
if (remain%threads.x==0)
next = remain/(2*threads.x);
else
next = remain/(2*threads.x)+1;
dim3 blocks ((remain+(2*threads.x)-1)/(2*threads.x));
reduction_blocked_shared_half_unrolled<128><<<blocks, threads, size_smem>>> (d_data, d_out, remain);
cudaErrChk (cudaMemcpy (d_data, d_out, next*sizeof(DTYPE), cudaMemcpyDeviceToDevice));
cudaErrChk (cudaDeviceSynchronize ())
cudaErrChk (cudaGetLastError() );
remain = next;
}
cudaErrChk (cudaFree (d_out));
}
/****************************************************************
*** Kernel mode : 3
*** Blocked shared half reduction
****************************************************************/
/*** Kernel program ***/
__global__ void reduction_blocked_shared_half (DTYPE* d_data, DTYPE* d_out, ull remain) {
ull tidx = threadIdx.x;
ull idx = blockIdx.x * (2*blockDim.x) + threadIdx.x;
extern __shared__ DTYPE smem[];
if (idx < remain) {
if (idx+blockDim.x<remain)
smem[tidx] = d_data[idx]+d_data[idx+blockDim.x];
else
smem[tidx] = d_data[idx];
}
__syncthreads();
for (ull s=blockDim.x>>1; s>0; s>>=1) {
if (tidx<s && idx+s<remain) {
smem[tidx]+=smem[tidx+s];
}
__syncthreads();
}
if (tidx == 0) {
d_out[blockIdx.x] = smem[tidx];
}
}
/*** Host program ***/
void run_kernel_blocked_shared_half (DTYPE* d_data, const ull num_data) {
DTYPE* d_out;
cudaErrChk (cudaMalloc ((void**)&d_out, sizeof(DTYPE)*num_data));
ull remain=num_data, next=0;
dim3 threads (128);
const size_t size_smem = sizeof (DTYPE) * threads.x;
while (remain > 1) {
if (remain%threads.x==0)
next = remain/(2*threads.x);
else
next = remain/(2*threads.x)+1;
dim3 blocks ((remain+(2*threads.x)-1)/(2*threads.x));
reduction_blocked_shared_half<<<blocks, threads, size_smem>>> (d_data, d_out, remain);
cudaErrChk (cudaMemcpy (d_data, d_out, next*sizeof(DTYPE), cudaMemcpyDeviceToDevice));
cudaErrChk (cudaDeviceSynchronize ())
cudaErrChk (cudaGetLastError() );
remain = next;
}
cudaErrChk (cudaFree (d_out));
}
/****************************************************************
*** Kernel mode : 2
*** Blocked shared reduction
****************************************************************/
/*** Kernel program ***/
__global__ void reduction_blocked_shared (DTYPE* d_data, DTYPE* d_out, ull remain) {
ull tidx = threadIdx.x;
ull idx = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ DTYPE smem[];
if (idx < remain) {
smem[tidx] = d_data[idx];
}
__syncthreads();
for (ull s=blockDim.x>>1; s>0; s>>=1) {
if (tidx<s && idx+s<remain) {
smem[tidx]+=smem[tidx+s];
}
__syncthreads();
}
if (tidx == 0) {
d_out[blockIdx.x] = smem[tidx];
}
}
/*** Host program ***/
void run_kernel_blocked_shared (DTYPE* d_data, const ull num_data) {
DTYPE* d_out;
cudaErrChk (cudaMalloc ((void**)&d_out, sizeof(DTYPE)*num_data));
ull remain=num_data, next=0;
dim3 threads (128);
const size_t size_smem = sizeof (DTYPE) * threads.x;
while (remain > 1) {
if (remain%threads.x==0)
next = remain/threads.x;
else
next = remain/threads.x+1;
dim3 blocks ((remain+threads.x-1)/threads.x);
reduction_blocked_shared<<<blocks, threads, size_smem>>> (d_data, d_out, remain);
cudaErrChk (cudaMemcpy (d_data, d_out, next*sizeof(DTYPE), cudaMemcpyDeviceToDevice));
cudaErrChk (cudaDeviceSynchronize ())
cudaErrChk (cudaGetLastError() );
remain = next;
}
cudaErrChk (cudaFree (d_out));
}
/****************************************************************
*** Kernel mode : 1
*** Blocked reduction
****************************************************************/
/*** Kernel program ***/
__global__ void reduction_blocked (DTYPE* d_data, DTYPE* d_out, ull remain) {
ull tidx = threadIdx.x;
ull idx = blockIdx.x * blockDim.x + threadIdx.x;
for (ull s=blockDim.x>>1; s>0; s>>=1) {
if (tidx<s && idx+s < remain) {
d_data[idx]+=d_data[idx+s];
}
__syncthreads();
}
if (tidx == 0) {
d_out[blockIdx.x] = d_data[idx];
}
}
/*** Host program ***/
void run_kernel_blocked (DTYPE* d_data, const ull num_data) {
DTYPE* d_out;
cudaErrChk (cudaMalloc ((void**)&d_out, sizeof(DTYPE)*num_data));
ull remain=num_data, next=0;
dim3 threads (128);
while (remain > 1) {
if (remain%threads.x==0)
next = remain/threads.x;
else
next = remain/threads.x+1;
dim3 blocks ((remain+threads.x-1)/threads.x);
reduction_blocked<<<blocks, threads>>> (d_data, d_out, remain);
cudaErrChk (cudaMemcpy (d_data, d_out, next*sizeof(DTYPE), cudaMemcpyDeviceToDevice));
cudaErrChk (cudaDeviceSynchronize ())
cudaErrChk (cudaGetLastError() );
remain = next;
}
cudaErrChk (cudaFree (d_out));
}
/****************************************************************
*** Kernel mode : 0
*** Basic reduction
****************************************************************/
/*** Kernel program ***/
__global__ void reduction (DTYPE* d_data, ull remain, ull next) {
ull idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx + next < remain) {
d_data[idx] += d_data[idx+next];
}
}
/*** Host program ***/
void run_kernel_basic (DTYPE* d_data, const ull num_data) {
ull remain=num_data, next=0;
dim3 threads (128);
while (remain > 1) {
if (remain%2==0)
next = remain/2;
else
next = remain/2 +1;
dim3 blocks ((next+threads.x-1)/threads.x);
reduction<<<blocks, threads>>> (d_data, remain, next);
cudaErrChk (cudaDeviceSynchronize ())
cudaErrChk (cudaGetLastError() );
remain = next;
}
}
/****************************************************************
*** Host program
****************************************************************/
DTYPE initial_data (DTYPE* data, const ull num_data) {
DTYPE sum = 0;
for (ull i=0; i<num_data; i++) {
data[i] = rand()%5;
sum += data[i];
}
return sum;
}
int select_mode(const int argc, const char** argv) {
int mode = 0;
if (argc > 1)
mode = atoi(argv[1]);
switch (mode) {
case 0:
printf("Kernel mode : 0.Basic reduction\n");
break;
case 1:
printf("Kernel mode : 1.Blocked reduction\n");
break;
case 2:
printf("Kernel mode : 2.Blocked shared reduction\n");
break;
case 3:
printf("Kernel mode : 3.Blocked shared half reduction\n");
break;
case 4:
printf("Kernel mode : 4.Blocked shared half unrolled reduction\n");
break;
default:
printf("Selected not implemented mode...\n");
exit(1);
break;
}
return mode;
}
int main (const int argc, const char** argv) {
/*** Program Configuration ***/
const ull num_data = 4*1e+8;
const int loop_exe = 4;
const size_t size_data = sizeof (ull) * num_data;
printf("\n\n=======================================================================\n");
printf("== Parallel DTYPE reduction\n");
printf("=======================================================================\n");
const int mode_kernel = select_mode(argc, argv);
printf("Number of DTYPE : %llu\n", num_data);
printf(" size of mem : %.2f GB\n", size_data*1e-9);
/*** Initialize variables ***/
DTYPE* data = (DTYPE*) malloc (size_data);
const DTYPE sum = initial_data (data, num_data);
float gops = 1e-9*num_data*loop_exe;
cudaEvent_t start, stop;
float msec_total=0.0f, msec=0.0f;
cudaErrChk (cudaEventCreate(&start));
cudaErrChk (cudaEventCreate(&stop));
/*** Set CUDA Memory ***/
DTYPE* d_data;
cudaErrChk (cudaMalloc ((void**)&d_data, size_data));
cudaErrChk (cudaMemcpy (d_data, data, size_data, cudaMemcpyHostToDevice));
cudaErrChk (cudaDeviceSynchronize ());
/*** Run kernel ***/
for (int loop=0; loop<loop_exe; loop++) {
cudaErrChk (cudaMemcpy (d_data, data, size_data, cudaMemcpyHostToDevice));
cudaErrChk (cudaEventRecord(start, NULL));
switch (mode_kernel) {
case 0:
run_kernel_basic (d_data, num_data);
break;
case 1:
run_kernel_blocked (d_data, num_data);
break;
case 2:
run_kernel_blocked_shared (d_data, num_data);
break;
case 3:
run_kernel_blocked_shared_half (d_data, num_data);
break;
case 4:
run_kernel_blocked_shared_half_unrolled (d_data, num_data);
break;
default:
printf("Not implemented\n");
exit(1);
break;
}
cudaErrChk (cudaEventRecord(stop, NULL));
cudaErrChk (cudaEventSynchronize(stop));
cudaErrChk (cudaEventElapsedTime(&msec, start, stop));
msec_total += msec;
}
/*** Check result ***/
DTYPE result;
cudaErrChk (cudaMemcpy (&result, d_data, sizeof (DTYPE), cudaMemcpyDeviceToHost));
printf(" Total number of add inst. : %.2f Gops\n", gops);
printf(" Elaped time: %.4f msec\n", msec_total);
printf(" GFLOPS : %.4f gflops [Avg. of %d time(s)]\n", gops/(msec_total*1e-3), loop_exe);
printf("Check result ...\n");
if (sum != result) {
printf(" [Err] GT(%llu) != Pred(%llu)\n", sum, result);
} else {
printf(" [Pass] GT(%llu) == Pred(%llu)\n", sum, result);
}
printf("=======================================================================\n\n");
/*** Finalize program ***/
cudaErrChk (cudaFree (d_data));
free (data);
return 0;
}
|
10,911 | // Note: errors in this file will appear on the wrong line, since we copy another header file
// in to provide some utility functions (the include paths in Jitify are somewhat unreliable)
template<typename Destination, typename DATA>
__global__ void negateArrays(size_t elements, Destination *dst, DATA *data) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) { dst[kernelIndex] = -data[kernelIndex]; }
}
template<>
__global__ void negateArrays(size_t elements, float2 *dst, float2 *data) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) {
dst[kernelIndex].x = -data[kernelIndex].x;
dst[kernelIndex].y = -data[kernelIndex].y;
}
}
template<>
__global__ void negateArrays(size_t elements, float3 *dst, float3 *data) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) {
dst[kernelIndex].x = -data[kernelIndex].x;
dst[kernelIndex].y = -data[kernelIndex].y;
dst[kernelIndex].z = -data[kernelIndex].z;
}
}
template<>
__global__ void negateArrays(size_t elements, float4 *dst, float4 *data) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) {
dst[kernelIndex].x = -data[kernelIndex].x;
dst[kernelIndex].y = -data[kernelIndex].y;
dst[kernelIndex].z = -data[kernelIndex].z;
dst[kernelIndex].w = -data[kernelIndex].w;
}
}
template<>
__global__ void negateArrays(size_t elements, double2 *dst, double2 *data) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) {
dst[kernelIndex].x = -data[kernelIndex].x;
dst[kernelIndex].y = -data[kernelIndex].y;
}
}
template<>
__global__ void negateArrays(size_t elements, double3 *dst, double3 *data) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) {
dst[kernelIndex].x = -data[kernelIndex].x;
dst[kernelIndex].y = -data[kernelIndex].y;
dst[kernelIndex].z = -data[kernelIndex].z;
}
}
template<>
__global__ void negateArrays(size_t elements, double4 *dst, double4 *data) {
const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (kernelIndex < elements) {
dst[kernelIndex].x = -data[kernelIndex].x;
dst[kernelIndex].y = -data[kernelIndex].y;
dst[kernelIndex].z = -data[kernelIndex].z;
dst[kernelIndex].w = -data[kernelIndex].w;
}
}
|
10,912 | #include <stdio.h>
#define N 10
__global__ void dev_add_n(float *a, float *b, float *c, int n) {
__shared__ float tmp[N];
int id = threadIdx.x;
if (id < N / 2) {
tmp[id] = a[id] + b[id];
__syncthreads();
}
c[id] = tmp[id];
}
void add_n(float a[], float b[], float c[], int n) {
float *dev_a, *dev_b, *dev_c;
cudaMalloc(&dev_a, n * sizeof(float));
cudaMalloc(&dev_b, n * sizeof(float));
cudaMalloc(&dev_c, n * sizeof(float));
cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, n * sizeof(float), cudaMemcpyHostToDevice);
dev_add_n<<<1, n>>>(dev_a, dev_b, dev_c, n);
cudaMemcpy(c, dev_c, n * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
}
int main() {
float a[N] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
b[N] = {0, 1, 2 ,3, 4, 5, 6 ,7, 8, 9},
c[N] = {0};
add_n(a, b, c, N);
for (int i = 0; i < N; ++i)
printf("c[%d] = %.3f%c", i, c[i], i + 1 == N ? '\n' : ' ');
}
|
10,913 | #include <stdio.h>
__global__ void no_divergence(int* input, dim3 size) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < size.x) {
float a = 0.0;
int warp_id = gid / 32;
if (warp_id % 2 == 0) {
a = 100.0;
printf("warp(%d), a(%.0f)\n", warp_id, a);
} else {
a = 200.0;
printf("warp(%d), a(%.0f)\n", warp_id, a);
}
}
}
__global__ void divergence(int* input, dim3 size) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < size.x) {
float a = 0.0;
if (gid % 2 == 0) {
a = 100.0;
printf("warp(%d), a(%.0f)\n", gid, a);
} else {
a = 200.0;
printf("warp(%d), a(%.0f)\n", gid, a);
}
}
}
void initInput(int* input, int size) {
for (int index = 0; index < size; index++) {
input[index] = index;
}
}
int main(void) {
dim3 size(32, 0, 0);
dim3 block_dim(0);
dim3 grid_dim(0);
int* h_input = NULL;
int* d_input = NULL;
h_input = (int*)calloc(size.x, sizeof(int));
initInput(h_input, size.x);
cudaMalloc((void**)&d_input, size.x * sizeof(int));
cudaMemcpy(d_input, h_input, size.x * sizeof(int), cudaMemcpyHostToDevice);
block_dim.x = 32;
grid_dim.x = size.x / block_dim.x + 1;
printf("\nno warp divergence occurred:\n");
no_divergence<<<grid_dim, block_dim>>>(d_input, size);
cudaDeviceSynchronize();
printf("\nwarp divergence occurred:\n");
divergence<<<grid_dim, block_dim>>>(d_input, size);
cudaDeviceSynchronize();
cudaFree(d_input);
free(h_input);
//// reset
cudaDeviceReset();
}
|
10,914 | #include <cuda.h>
#include <cuda_runtime_api.h>
#define ID3_BASE_WIDTH 3
#define ID3_BASE_HEIGHT 6
#define ID3_MID_WIDTH 1
#define ID3_THRESHOLD .19f //definitely needs to be changed
#define ID3_SKIP_AMOUNT 1 //amount to skip in pixels, we can change this to be multiplied by scale if necessary/desirable
//This identifier is 3 vertical bars going dark light dark
__global__
void ID3kernel(float* intImage, size_t stride, int* offsets, int windowSize, int numSubWindows, int scale, int* faceDetected, float* results, float* heatMap) {
int threadNum = blockIdx.x * blockDim.x + threadIdx.x;
if(threadNum < numSubWindows){
int startX = offsets[threadNum]/(stride);
int startY = offsets[threadNum]%stride;
float maxFitValue = 0.0f;
for (int i = startX; (i+ID3_BASE_WIDTH*scale) < (startX+windowSize); i = i+ID3_SKIP_AMOUNT){ //use ID3_SKIP_AMOUNT * scale for it to scale up as identifier scales
for (int j = startY; (j+ID3_BASE_HEIGHT*scale) < (startY + windowSize); j = j+ID3_SKIP_AMOUNT){
// take important corners from image
float upperLeft = intImage[i*stride + j];
float upperRight = intImage[(i+ID3_BASE_WIDTH*scale)*stride + j];
float midLeftTop = intImage[(i+ID3_BASE_WIDTH*scale/2 - ID3_MID_WIDTH*scale/2)*stride + j];
float midRightTop = intImage[(i+ID3_BASE_WIDTH*scale/2 + ID3_MID_WIDTH*scale/2)*stride + j];
float midLeftBot = intImage[(i+ID3_BASE_WIDTH*scale/2 - ID3_MID_WIDTH*scale/2)*stride + j+ID3_BASE_HEIGHT*scale];
float midRightBot = intImage[(i+ID3_BASE_WIDTH*scale/2 + ID3_MID_WIDTH*scale/2)*stride + j+ID3_BASE_HEIGHT*scale];
float lowerLeft = intImage[i*stride + j+(ID3_BASE_HEIGHT*scale)];
float lowerRight = intImage[(i+ID3_BASE_WIDTH*scale)*stride + j+(ID3_BASE_HEIGHT*scale)];
//calculate fit value based on identifier (hard-coded)
// float fitValue = (midRightBot + midLeftTop - midRightTop - midLeftBot)*2.0 - lowerRight - upperLeft + upperRight + lowerLeft;
float fitValue = 2.0*(midRightBot - midLeftBot - midRightTop + midLeftTop) - (lowerRight - lowerLeft - upperRight + upperLeft) ;
if(fitValue < 0)
fitValue = -fitValue;
if(fitValue > maxFitValue){
maxFitValue = fitValue;
}
}
}
float goodnessValue = maxFitValue/(ID3_BASE_WIDTH*scale*ID3_BASE_HEIGHT*scale); // goodnessValue = fit/area
// results[threadNum] = goodnessValue;
if(goodnessValue > ID3_THRESHOLD){
faceDetected[threadNum] = 1;
// for(int i = 0; i < windowSize; ++i){
// for(int j = 0; j < windowSize; ++j){
// heatMap[offsets[threadNum] + i*stride + j] = heatMap[offsets[threadNum] + i*stride + j] + 1.0f;
// }
// }
}
}
}
|
10,915 | /**
* Programma che simula il comportamento del gpdt per
* la risoluzione di un kernel di una serie di
* valori di dimensione variabile utilizzando la
* tecnologia cuda.
* compilare con:
* nvcc -o simil_gpdt_si_cuda simil_gpdt_si_cuda.cu
* lanciare con:
* ./simil_gpdt_si_cuda [numero vettori] [numero componenti] [numero di righe da calcolare] [tipo di kernel] [grado(int)/sigma(float)]
**/
#include <iostream>
#include <ctime>
#include <cstdlib>
#include <cstdio>
#include <math.h>
#include <cuda.h>
using namespace std;
/**
* Funzione che riempie i vettori con numeri
* casuali compresi tra 0 e 99.
**/
void riempi_vettori(float *vettori, int Nr_vet_elem, int Nr_vet_comp)
{
for (int i = 0; i < Nr_vet_elem; i++)
for(int j = 0; j < Nr_vet_comp; j++)
vettori[i * Nr_vet_comp + j] = i * 2 + j; //j % 4; //
}
/**
* Funzione che crea dei vettori contenente i valori significativi su cui
* calcolare la norma 2 al quadrato.
**/
void crea_vettori_termini_noti(int *vettori, int Nr_vet_elem, int Nr_vet_comp)
{
for (int i = 0; i < Nr_vet_elem; i++)
for(int j = 0; j < Nr_vet_comp; j++)
vettori[i * Nr_vet_comp + j] = (j+1)*3;
}
/**
* Funzione che crea un vettore contenente il numero di valori significative.
**/
void crea_vettori_posizioni(int *vettore, int Nr_vet_elem, int numero_val)
{
for (int i = 0; i < Nr_vet_elem; i++)
vettore[i] = numero_val;
}
/**
* kernel per il calcolo delle norme al quadrato dei vettori.
**/
__global__ void Kernel_norme(float *Vd, float *Nd, int *Vp, int *Vnp, int N, int C, int nr_max_val)
{
long int x = threadIdx.x + blockIdx.x * blockDim.x;
int pos;
if(x < N)
{
float norma = 0;
int Nr_val = Vnp[x];
for(int i = 0; i < Nr_val; i++)
{
pos = Vp[x * nr_max_val + i];
norma = norma + (Vd[x * C + pos] * Vd[x * C + pos]);
}
Nd[x] = norma;
}
}
/**
* Kernel per il calcolo del del guassiano, basato sul metodo utilizzato nel gpdt,
* modificato per l'utilizzo con la tecnologia CUDA.
**/
__global__ void Kernel_gaus(float *Vd, float *Ris, float *Nd, int N, int C, int dim_indici, int *ind, float sigma, int *Vp, int *Vnp, int nr_max_val)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int j;
int pos;
int tmp_ind;
float gaus;
for ( ; x < N ; x+=blockDim.x * gridDim.x)
{
for( ; y < dim_indici; y+=blockDim.y * gridDim.y)
{
tmp_ind = ind[y];
gaus = 0.0;
int Nr_val = Vnp[x];
for(j = 0; j < Nr_val; j++)
{
pos = Vp[x * nr_max_val + j];
gaus = gaus + (Vd[x * C + pos] * Vd[tmp_ind * C + pos]);
}
gaus = - 2.0*gaus +Nd[x] + Nd[tmp_ind];
gaus = (exp(-gaus*sigma));
//Ris[x * dim_indici + y] = gaus;
Ris[y * N + x] = gaus;
}
}
}
/**
* Kernel per il calcolo del kernel lineare
* modificato per l'utilizzo con la tecnologia CUDA.
**/
__global__ void Kernel_lineare(float *Vd, float *Ris, int N, int C, int dim_indici, int *ind, int *Vp, int *Vnp, int nr_max_val)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int j;
int pos;
int tmp_ind;
float lin;
for ( ; x < N ; x+=blockDim.x * gridDim.x)
{
for( ; y < dim_indici; y+=blockDim.y * gridDim.y)
{
tmp_ind = ind[y];
lin = 0.0;
int Nr_val = Vnp[x];
for(j = 0; j < Nr_val; j++)
{
pos = Vp[x * nr_max_val + j];
lin = lin + (Vd[x * C + pos] * Vd[tmp_ind * C + pos]);
}
//Ris[x * dim_indici + y] = lin;
Ris[y * N + x ] = lin;
}
}
}
/**
* Kernel per il calcolo del kernel lineare
* modificato per l'utilizzo con la tecnologia CUDA.
**/
__global__ void Kernel_polimoniale(float *Vd, float *Ris, int N, int C, int dim_indici, int *ind, int *Vp, int *Vnp, int nr_max_val, int s)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int j;
int pos;
int tmp_ind;
float pol;
float tmp;
for ( ; x < N ; x+=blockDim.x * gridDim.x)
{
for( ; y < dim_indici; y+=blockDim.y * gridDim.y)
{
tmp_ind = ind[y];
tmp = 1.0;
pol = 0.0;
int Nr_val = Vnp[x];
for(j = 0; j < Nr_val; j++)
{
pos = Vp[x * nr_max_val + j];
pol = pol + (Vd[x * C + pos] * Vd[tmp_ind * C + pos]);
}
pol = pol + 1;
for(j = 0; j < s; j++)
{
tmp = tmp * pol;
}
//Ris[x * dim_indici + y] = tmp;
Ris[y * N + x ] = tmp;
}
}
}
int main(int argc, char** argv)
{
/**
* Variabile contenente la percentuale dei valori significativi
* all'interno dei vettori.
**/
float perc_val_noti= 1.0 - 0.82;
/**
* Matrice contenente i vettori da cui calcolare la differeza
* per calcolarne le norme.
* Il numero di vettori e la dimensione degli stessi viene definita
* dall'utente.
**/
float *vettori;
/**
* Matrice contenente le posizioni all'interno del vettore contenente
* le posizioni dei valori significativi.
**/
int *vettore_posizioni;
/**
* Vettore contenente il numero di valori non nulli nel vettore.
**/
int *vett_numero_posizioni;
/**
* vettore contenente le norme 2 al quadreato dei vettori.
**/
float *vett_norme;
/**
* Matrice contenente i risultati.
**/
float *risultati;
/**
*
**/
int *indici;
/**
* Tempo impiegato per il calcolo
**/
float elapsedTime;
cudaEvent_t start, stop;
//nr di vettori e di elementi.
int Nr_vet_elem = atoi(argv[1]);
int Nr_vet_comp = atoi(argv[2]);
//Numero di righe da calcolare.
int Nr_righe = atoi(argv[3]);
/**
* Numero per la selezione del kernel.
* 1 = kernel lineare.
* 2 = kernel polimoniale.
* 3 = kernel gaussiano.
**/
int sel_kernel = atoi(argv[4]);
/**
* Sigma della funzione gaussiana.
**/
//float sigma = atoi(argv[5]);
//sigma = (1.0/(2.0*sigma*sigma));
//Copia per il device.
float *Vd;
int *Vp;
int *Vnp;
float *Nd;
float *Ris;
int *ind;
//Variabili per il controllo della memoria disponibile.
size_t free_byte;
size_t total_byte;
/**
* Variabile contenente il numero dei valori significativi.
**/
int numero_val_significativi = Nr_vet_comp * perc_val_noti;
//Spazio necessario per l'allocazione dei vettori.
int tot_vett_size = Nr_vet_elem * Nr_vet_comp * sizeof(float);
//Spazio necessario per l'allocazione della Matrice dei risultati.
int norme_size = Nr_vet_elem * sizeof(float);
//Spazio necessario per l'allocazione della Matrice delle posizioni.
int vett_pos_size = Nr_vet_elem * numero_val_significativi * sizeof(int);
//Spazio necessario per l'allocazione del vettore con il numero dei valori significativi.
int vett_nrpos_size = Nr_vet_elem * sizeof(int);
//Spazio necessario per l'allocazione di una colonna.
int col_size = Nr_vet_elem * sizeof(float);
//Allocazione.
vettori = (float*)malloc(tot_vett_size);
vett_norme = (float*)malloc(norme_size);
vettore_posizioni = (int*)malloc(vett_pos_size);
vett_numero_posizioni = (int*)malloc(vett_nrpos_size);
//Allocazione nel device.
cudaMalloc((void **)&Vd, tot_vett_size);
cudaMalloc((void **)&Nd, norme_size);
cudaMalloc((void **)&Vp, vett_pos_size);
cudaMalloc((void **)&Vnp, vett_nrpos_size);
srand(time(0));
//Riempimento dei vettori.
riempi_vettori(vettori, Nr_vet_elem, Nr_vet_comp);
//Riempimento dei vettori delle posizioni.
crea_vettori_termini_noti(vettore_posizioni, Nr_vet_elem, numero_val_significativi);
//Riempimento del vettore contenente il numero dei valori significativi.
crea_vettori_posizioni(vett_numero_posizioni, Nr_vet_elem, numero_val_significativi);
//trasferimento dei vettori nel device.
cudaMemcpy(Vd, vettori, tot_vett_size, cudaMemcpyHostToDevice);
//trasferimento dei vettori delle posizioni nel device.
cudaMemcpy(Vp, vettore_posizioni, vett_pos_size, cudaMemcpyHostToDevice);
//trasferimento del vettore conentente il numero di valori all'interno di ogni singolo vettore.
cudaMemcpy(Vnp, vett_numero_posizioni, vett_nrpos_size, cudaMemcpyHostToDevice);
cudaMemGetInfo( &free_byte, &total_byte );
int col_ospitabili_mem = (free_byte*0.7)/col_size;
int contatore = 0;
/**
* Valori impostati per ottimizzare il funzionamento del device.
* Questi valori sono basati sull'utilizzo di una Nvidia 230m.
**/
int dimXX =4;
int dimYY =128;
/**
* Numero di colonne ospitabili calcolabili dal kernel contemporaneamente.
* Purtroppo a causa del fatto che il kernel CUDA fallisca in automatico
* se impiega più di 5 secondi per il calcolo, è necessario inserire un
* limitatore per il calcolo.
* Questo valore è basato sull'utilizzo di una Nvidia 230m.
**/
int col_ospitabili = 200;
if (col_ospitabili > Nr_righe)
{
col_ospitabili = Nr_righe;
}
if (col_ospitabili > col_ospitabili_mem)
{
col_ospitabili = col_ospitabili_mem;
}
int numero_cicli = Nr_righe/col_ospitabili;
cout<<"Numero cicli necessari: "<<numero_cicli<<endl;
int risultati_size = Nr_righe * Nr_vet_elem * sizeof(float);
int indici_size = col_ospitabili * sizeof(int);
int risultati_part_size = col_ospitabili * Nr_vet_elem * sizeof(float);
risultati = (float*)malloc(risultati_size);
indici = (int*)malloc(indici_size);
int numSMs;
cudaDeviceGetAttribute(&numSMs, cudaDevAttrMultiProcessorCount, 0);
cudaMalloc((void **)&Ris, risultati_part_size);
cudaMalloc((void **)&ind, indici_size);
dim3 blockGridRows;
blockGridRows.x=Nr_vet_elem/dimXX + (Nr_vet_elem%dimXX== 0?0:1);;
blockGridRows.y=col_ospitabili/dimYY + (col_ospitabili%dimYY== 0?0:1);
dim3 threadBlockRows;
threadBlockRows.x=dimXX;
threadBlockRows.y=dimYY;
cout<<"Memoria allocata, griglie definite:"<<endl;
cout<<"blockGridRows.x: "<<blockGridRows.x<<endl;
cout<<"blockGridRows.y: "<<blockGridRows.y<<endl;
cout<<"threadBlockRows.x: "<<threadBlockRows.x<<endl;
cout<<"threadBlockRows.y: "<<threadBlockRows.y<<endl;
int cicle_dim = col_ospitabili * Nr_vet_elem * sizeof(float);
dim3 blockGridRowsn(Nr_vet_elem, 1);
int resto;
cudaEventCreate(&start);
cudaEventRecord(start,0);
switch(sel_kernel){
case(1):{
//cout<<"Kernel Lineare\n";
for(int i = 0; i < numero_cicli; i++)
{
for (int kk = 0; kk < col_ospitabili; kk++)
{
indici[kk] = contatore + kk;
}
cudaMemcpy(ind, indici, indici_size, cudaMemcpyHostToDevice);
//__global__ void Kernel_lineare(float *Vd, float *Ris, int N, int C, int dim_indici, int *ind, int *Vp, int *Vnp, int nr_max_val)
Kernel_lineare<<< blockGridRows, threadBlockRows>>>(Vd, Ris, Nr_vet_elem, Nr_vet_comp, col_ospitabili, ind, Vp, Vnp, numero_val_significativi);
cudaMemcpy(risultati+(i*col_ospitabili*Nr_vet_elem), Ris, cicle_dim, cudaMemcpyDeviceToHost);
contatore = contatore + col_ospitabili;
}
resto = Nr_righe - contatore;
if (resto > 0)
{
for (int kk = 0; kk < resto; kk++)
{
indici[kk] = contatore + kk;
}
cudaMemcpy(ind, indici, indici_size, cudaMemcpyHostToDevice);
Kernel_lineare<<< blockGridRows, threadBlockRows>>>(Vd, Ris, Nr_vet_elem, Nr_vet_comp, resto, ind, Vp, Vnp, numero_val_significativi);
cudaMemcpy(risultati+(numero_cicli)*(col_ospitabili*Nr_vet_elem), Ris, resto * Nr_vet_elem * sizeof(float), cudaMemcpyDeviceToHost);
}
break;
}
case(2):{
//cout<<"Kernel Polimoniale\n";
/**
* Grado del kernel.
**/
int grado = atoi(argv[5]);
for(int i = 0; i < numero_cicli; i++)
{
for (int kk = 0; kk < col_ospitabili; kk++)
{
indici[kk] = contatore + kk;
}
cudaMemcpy(ind, indici, indici_size, cudaMemcpyHostToDevice);
//__global__ void Kernel_polimoniale(float *Vd, float *Ris, int N, int C, int dim_indici, int *ind, int *Vp, int *Vnp, int nr_max_val, int s)
Kernel_polimoniale<<< blockGridRows, threadBlockRows>>>(Vd, Ris, Nr_vet_elem, Nr_vet_comp, col_ospitabili, ind, Vp, Vnp, numero_val_significativi,grado);
cudaMemcpy(risultati+(i*col_ospitabili*Nr_vet_elem), Ris, cicle_dim, cudaMemcpyDeviceToHost);
contatore = contatore + col_ospitabili;
}
resto = Nr_righe - contatore;
if (resto > 0)
{
for (int kk = 0; kk < resto; kk++)
{
indici[kk] = contatore + kk;
}
cudaMemcpy(ind, indici, indici_size, cudaMemcpyHostToDevice);
Kernel_polimoniale<<< blockGridRows, threadBlockRows>>>(Vd, Ris, Nr_vet_elem, Nr_vet_comp, resto, ind, Vp, Vnp, numero_val_significativi,grado);
cudaMemcpy(risultati+(numero_cicli)*(col_ospitabili*Nr_vet_elem), Ris, resto * Nr_vet_elem * sizeof(float), cudaMemcpyDeviceToHost);
}
break;
}
case(3):{
//cout<<"Kernel gaussiano\n";
/**
* Sigma della funzione gaussiana.
**/
float sigma = atof(argv[5]);
sigma = (1.0/(2.0*sigma*sigma));
//calcolo norme.
Kernel_norme<<< blockGridRowsn, 256 >>>(Vd, Nd, Vp, Vnp, Nr_vet_elem, Nr_vet_comp, numero_val_significativi);
//calcolo kernel
for(int i = 0; i < numero_cicli; i++)
{
for (int kk = 0; kk < col_ospitabili; kk++)
{
indici[kk] = contatore + kk;
}
cudaMemcpy(ind, indici, indici_size, cudaMemcpyHostToDevice);
//Kernel_gaus(float *Vd, float *Ris, float *Nd, int N, int C, int dim_indici, int *ind, float sigma, float *Vp, float *Vnp)
Kernel_gaus<<< blockGridRows, threadBlockRows>>>(Vd, Ris, Nd, Nr_vet_elem, Nr_vet_comp, col_ospitabili, ind, sigma, Vp, Vnp, numero_val_significativi);
cudaMemcpy(risultati+(i*col_ospitabili*Nr_vet_elem), Ris, cicle_dim, cudaMemcpyDeviceToHost);
contatore = contatore + col_ospitabili;
}
resto = Nr_righe - contatore;
if (resto > 0)
{
for (int kk = 0; kk < resto; kk++)
{
indici[kk] = contatore + kk;
}
cudaMemcpy(ind, indici, indici_size, cudaMemcpyHostToDevice);
Kernel_gaus<<< blockGridRows, threadBlockRows>>>(Vd, Ris, Nd, Nr_vet_elem, Nr_vet_comp, resto, ind, sigma, Vp, Vnp, numero_val_significativi);
cudaMemcpy(risultati+(numero_cicli)*(col_ospitabili*Nr_vet_elem), Ris, resto * Nr_vet_elem * sizeof(float), cudaMemcpyDeviceToHost);
}
break;
}
default:
{
cout<<"Scelta non valida.\n";
cout<<"4° argomento non esistente.\n";
cout<<"1 = kernel lineare\t2 = kernel polimoniale\t 3 = kernel gaussiano\n";
break;
}
}
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
cout<<"Tempo totale:\t"<<elapsedTime/1000<<" secondi\n";
/*for (int i = 0; i < Nr_vet_elem*Nr_righe; i++)
{
cout<<risultati[i]<<endl;
}*/
free(vettori);
free(vett_norme);
free(vettore_posizioni);
free(vett_numero_posizioni);
free(indici);
free(risultati);
cudaFree(Vd);
cudaFree(Vp);
cudaFree(Vnp);
cudaFree(Nd);
cudaFree(ind);
cudaFree(Ris);
return 0;
}
|
10,916 | #include "includes.h"
__global__ void compute(int *v1,int *v2, int *v3, int N){
//blockIdx.x (0-2) threadIdx.x (0-99)
if(blockIdx.x==2){
v3[(N*blockIdx.x) + threadIdx.x] = v1[((blockIdx.x-2)*N)+threadIdx.x]*v2[((blockIdx.x-1)*N)+threadIdx.x] -
v1[((blockIdx.x-1)*N)+threadIdx.x]*v2[((blockIdx.x-2)*N)+threadIdx.x];
}else if(blockIdx.x==1){
v3[(N*blockIdx.x) + threadIdx.x] = v1[((blockIdx.x+1)*N)+threadIdx.x]*v2[((blockIdx.x-1)*N)+threadIdx.x] -
v1[(N*(blockIdx.x-1))+threadIdx.x]*v2[((blockIdx.x+1)*N)+threadIdx.x];
}else{
v3[(N*blockIdx.x) + threadIdx.x] = v1[((blockIdx.x+1)*N)+threadIdx.x]*v2[((blockIdx.x+2)*N)+threadIdx.x] -
v2[((blockIdx.x+1)*N)+threadIdx.x]*v1[((blockIdx.x+2)*N)+threadIdx.x];
}
} |
10,917 | /*
#include "Refine.hxx"
#define T_BLOCKS 1
#define T_THREADS 64
__device__ void printList(ListElt<int>* list, int size) {
if(threadIdx.x==0 && blockIdx.x==0) {
printf("size:%d\n", size);
for(int i=0; i<size; i++) {
printf("%d, %d, %d\n", list[i].id, list[i].dist, list[i].checkedFlag);
}
printf("\n");
}
__syncthreads();
}
__global__ void test_removeDuplicates(ListElt<int>* listMem) {
ListElt<int>* list = &listMem[blockIdx.x*LISTSIZE*2];
typedef cub::BlockRadixSort<int, T_THREADS, LISTCAP/T_THREADS, ListElt<int>> BlockRadixSortT;
__shared__ typename BlockRadixSortT::TempStorage temp_storage;
__shared__ int border_vals[T_THREADS];
for(int i=threadIdx.x; i<LISTSIZE; i+=blockDim.x) {
list[i].id = i;
list[i+LISTSIZE].id = i;
list[i].dist = -i;
list[i+LISTSIZE].dist = -i;
list[i].checkedFlag=true;
list[i+LISTSIZE].checkedFlag=false;
}
__syncthreads();
int listSize = LISTSIZE*2;
__syncthreads();
printList(list, listSize);
__syncthreads();
sortListById<int, int, 0, T_THREADS>(list, &listSize, &temp_storage);
removeDuplicatesAndCompact<int, int, 0, T_THREADS>(list, &listSize, &temp_storage, border_vals);
printList(list, listSize);
}
int main(int argc, char* argv[]) {
ListElt<int>* listMem;
cudaMalloc(&listMem, T_BLOCKS*LISTSIZE*2*sizeof(ListElt<int>));
test_removeDuplicates<<<T_BLOCKS, T_THREADS>>>(listMem);
printf("Error: %d\n", cudaDeviceSynchronize());
}
*/
|
10,918 | #include "includes.h"
__global__ void transpose_diagonal_row(int * mat, int * transpose, int nx, int ny)
{
int blk_x = blockIdx.x;
int blk_y = (blockIdx.x + blockIdx.y) % gridDim.x;
int ix = blockIdx.x * blk_x + threadIdx.x;
int iy = blockIdx.y * blk_y + threadIdx.y;
if (ix < nx && iy < ny)
{
transpose[ix * ny + iy] = mat[iy * nx + ix];
}
} |
10,919 | #include "includes.h"
__global__ void max_output(float *input, float *output, float *indices, long nrows, long ncols)
{
// output offset:
long o = threadIdx.x + blockDim.x * blockIdx.x;
if (o >= nrows) return;
// input offset:
long i = o * ncols;
// move pointers
input = input + i;
// compute max:
float max = input[0];
long argmax = 0;
long ii;
for (ii=1; ii<ncols; ii++) {
float val = input[ii];
if (val > max) {
max = val;
argmax = ii;
}
}
// store
output[o] = max;
indices[o] = argmax+1;
} |
10,920 | #include <stdio.h>
#include <cuda.h>
#define N 100
__global__ void matrixAddKernel(int * a, int * b, int * c, int n){
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
int index = row * N + col;
if(col < N && row < N)
{
c[index] = a[index]+b[index];
}
//int index = threadIdx.y*n + threadIdx.x;
//c[index] = a[index] + b[index];
}
void matrixAdd(int *a, int *b, int *c, int n)
{
int index;
for(int col=0; col<n; col++)
{
for(int row=0; row<n; row++)
{
index = row * n + col;
c[index] = a[index] + b[index];
}
}
}
int main(){
dim3 grid(1, 1, 1);
dim3 block(N, N, 1);
int *a_h;
int *b_h;
int *c_h;
int *d_h;
int *a_d;
int *b_d;
int *c_d;
int size;
cudaEvent_t start;
cudaEvent_t stop;
float elapsedTime;
printf("Number of threads: %i (%ix%i)\n", block.x*block.y, block.x, block.y);
printf("Number of blocks: %i (%ix%i)\n", grid.x*grid.y, grid.x, grid.y);
size = N * N * sizeof(int);
a_h = (int*) malloc(size);
b_h = (int*) malloc(size);
c_h = (int*) malloc(size);
d_h = (int*) malloc(size);
for(int i=0; i<N; i++)
{
for(int j=0; j<N; j++)
{
a_h[i * N + j] = i;
b_h[i * N + j] = i;
}
}
cudaMalloc((void**)&a_d, size);
cudaMalloc((void**)&b_d, size);
cudaMalloc((void**)&c_d, size);
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(c_d, c_h, size, cudaMemcpyHostToDevice);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
matrixAddKernel<<<grid, block>>>(a_d, b_d, c_d, N);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Time to calculate results on GPU: %f ms.\n",elapsedTime);
cudaMemcpy(c_h, c_d, size ,cudaMemcpyDeviceToHost);
cudaEventRecord(start, 0);
matrixAdd(a_h, b_h, d_h, N);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop );
printf("Time to calculate results on CPU: %f ms.\n",elapsedTime);
for(int i=0; i<N*N; i++)
{
if (c_h[i] != d_h[i]) printf("Error: CPU and GPU results do not match\n");
break;
}
return 0;
}
|
10,921 | #include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
/*
#define N 512
__global__ void dotProd( int *a, int *b, int *c ) {
__shared__ int temp[N];
temp[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
__syncthreads(); // Evita condición de carrera.
if( 0 == threadIdx.x ) {
int sum = 0;
for(int i = 0; i < N; i++ ) {
sum += temp[i]; //lento
}
*c = sum;
}
}
#define N 2048
#define THREADS_PER_BLOCK 512
__global__ void dotProd( int *a, int *b, int *c ) {
__shared__ int temp[THREADS_PER_BLOCK];
int index = threadIdx.x + blockIdx.x * blockDim.x;
temp[threadIdx.x] = a[index] * b[index];
__syncthreads(); // Hasta que no rellenen todos los thread temp no puedo continuar...
if(threadIdx.x == 0) {
int sum = 0;
for( int i= 0; i < THREADS_PER_BLOCK; i++ ) {
sum += temp[i];
}
c[blockIdx.x] = sum;
}
}
*/
const int THREADS_PER_BLOCK = 32;
const int N = 2048;
__global__ void mult(int *a, int *b, int *c)
{
int pos = threadIdx.x + blockDim.x * blockIdx.x;
if (pos >= N) return;
c[pos] = a[pos] * b[pos];
}
__global__ void shared_mult(int *a, int *b, int *c)
{
__shared__ int mem[THREADS_PER_BLOCK];
int pos = threadIdx.x + blockIdx.x * blockDim.x;
mem[threadIdx.x] = a[pos] * b[pos];
__syncthreads();
c[pos] = mem[threadIdx.x];
}
int main(int argc, char const *argv[]) {
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int size = sizeof(int) * N;
a = (int *) malloc(size);
b = (int *) malloc(size);
c = (int *) malloc(size);
for (int i = 0; i < N ; i++) {
a[i] = b[i] = 3;
}
cudaMalloc(&dev_a, size);
cudaMalloc(&dev_b, size);
cudaMalloc(&dev_c, size);
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
cudaMemset(dev_c, 0, size);
shared_mult<<<(N - 1) / THREADS_PER_BLOCK + 1, THREADS_PER_BLOCK>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < N ; i++) {
fprintf(stdout, "Numb : %d\n", c[i]);
}
cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c);
return 0;
}
|
10,922 | /*
* =====================================================================================
*
* Filename: jacobi_cpu.c
*
* Description:
*
* Version: 1.0
* Created: 12/05/11 02:30:51
* Revision: none
* Compiler: gcc
*
* Author: YOUR NAME (),
* Company:
*
* =====================================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#define SIZE 8192
#define BLOCK_SIZE 16
float ratio(float**u,float ant,int iter)
{
float tmp=0.0;
int i,j;
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
{
if(u[i][j]>tmp)
tmp=u[i][j];
}
}
if(iter%10==0)
printf(" iter=%d ratio=%f max=%f\n",iter,tmp/ant,tmp);
return tmp;
}
void muestra(float**u)
{
int i,j;
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
{
printf("%f ",u[i][j]);
}
printf("\n");
}
}
__global__ void jacobi(float *d_u_new,float *d_u, float *d_f,float h, float pr)
{
__shared__ float bf[BLOCK_SIZE][BLOCK_SIZE];
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = threadIdx.x;
int l = threadIdx.y;
bf[k][l] = d_u[i*SIZE+j];
__syncthreads();
if(i>=0 && i < SIZE && j>=0 && j<SIZE)
{
d_u_new[i*SIZE+j]=0.25*(
h*h*d_f[i *SIZE+j ]+
bf[k-1][l]+
bf[k+1][l]+
bf[k][l-1]+
bf[k][l+1]);
d_u_new[i*SIZE+j]=pr;
}
}
int main()
{
float * h_u, * h_f;
float * d_u, *d_u_new, *d_f;
float * temp;
float ant=1.0;
int i,j;
size_t size;
float h = 1.0/SIZE;
/* Reservamos memoria */
size=SIZE*SIZE*sizeof(float);
printf("Necesitamos %d Mb\n",3*size/1024/1024);
h_u = (float*)malloc(size);
h_f = (float*)malloc(size);
/* REservamos memoria GPU*/
cudaMalloc(&d_u,size);
cudaMalloc(&d_u_new,size);
cudaMalloc(&d_f,size);
/* Inicializamos */
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
{
h_f[i*SIZE+j]=0.0;
h_u[i*SIZE+j]=rand();
}
}
for(i=0;i<SIZE;i++)
{
h_u[i]=0.0;
h_u[i*SIZE]=0.0;
h_u[SIZE*(SIZE-1)+i]=0.0;
h_u[i*SIZE+SIZE-1]=0.0;
}
/* Copiamos la memoria del host a la GPU */
cudaMemcpy(d_f,h_f,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_u,h_u,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_u_new,h_u,size,cudaMemcpyHostToDevice);
/* Creamos el grid para el cálculo */
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(SIZE/BLOCK_SIZE,SIZE/BLOCK_SIZE);
/* Bucle principal, llamamos a JACOBI */
for(i=0;i<100;i++)
{
jacobi<<<dimGrid,dimBlock>>>(d_u_new,d_u,d_f,h,1.0*i);
if(i%10==0)
printf("iter=%d\n",i);
}
/* Copiamos */
cudaMemcpy(h_u,d_u_new,size,cudaMemcpyDeviceToHost);
/* Comprobamos */
for(i=0;i<SIZE*SIZE;i++)
if(h_u[i]!=1.0*99)
printf("Error en %d, h_u[%d]=%f\n",i,i,h_u[i]);
/* Liberamos memoria */
free(h_u);
free(h_f);
cudaFree(d_u_new);
cudaFree(d_u);
cudaFree(d_f);
}
|
10,923 | #include <iostream>
#include <cuda_runtime.h>
int main(){
int count = 0;
cudaGetDeviceCount(&count);
std::cout<< count << " device(s) found.\n";
return 0;
}
|
10,924 | /* Performance testing for assignment operator '+='
Comparison between '+=' and '='
Two kernels should be executed separately or else the kernels will influence each other
Kernel that executed later was faster regardless of operator
Result : assignment operator '=' is faster than '+='
100 ms vs 94 ms for 256*100000 nodes
This result will be beneficial to optimize Peridynamics kernel
*/
#include <cmath> //for calculating power & NaN
#include<iostream>
#include<cstdio>
#include <vector>
#include <cstdlib>
#include <fstream> // for writing to file
#include <math.h> //exp, pi
#include <chrono> //for time measurement
#include <fstream>
#include <ctime>
using namespace std;
using namespace std::chrono;
//Use assignment operator '+='
__global__ void add_01 (float *array_a,
float *array_b,
float *array_c ){
size_t i= threadIdx.x+ blockDim.x*threadIdx.y+blockDim.x*blockDim.y*blockIdx.x;
array_c[i] += (array_a[i]+array_b[i])*sqrt(array_a[i])*sqrt(array_b[i]);
}
//Use assignment operator '='
__global__ void add_02 (float *array_a,
float *array_b,
float *array_c ){
size_t i= threadIdx.x+ blockDim.x*threadIdx.y+blockDim.x*blockDim.y*blockIdx.x;
array_c[i] = (array_a[i]+array_b[i])*sqrt(array_a[i])*sqrt(array_b[i]);
}
int main(int argc, char **argv){
cout<<"Start of program assignment operator test"<<endl;
const size_t node = 256*100000;
//Differentiate a,b to 01 & 02 to avoid kernel reusing the value
float *array_a_01= (float*) calloc (node, sizeof(float));
float *array_b_01= (float*) calloc (node, sizeof(float));
float *array_a_02= (float*) calloc (node, sizeof(float));
float *array_b_02= (float*) calloc (node, sizeof(float));
float *array_c_01= (float*) calloc (node, sizeof(float));
float *array_c_02= (float*) calloc (node, sizeof(float));
cout<<"No of nodes = "<<node<<endl;
//Initialization
for (size_t i = 0; i < node; ++i) {
array_a_01[i]= 2.30*i; // dummy value
array_b_01[i]= 1.45*i; // dummy value
array_a_02 [i] = array_a_01 [i];
array_b_02 [i] = array_b_01 [i];
}
//###########################################################################
/*
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, cal_dilatation, 0, node);
gridSize = (node + blockSize - 1) / blockSize;
printf("\t Blocksize= %i\n", blockSize);
printf("\t minGridSize= %i \n",minGridSize);
printf("\t gridSize= %i \n",gridSize);*/
// Create memory buffers on the device for each vector
float* buffer_array_a_01, * buffer_array_b_01;
float* buffer_array_a_02, * buffer_array_b_02;
float * buffer_array_c_01, * buffer_array_c_02;
cudaMalloc(&buffer_array_a_01, node*sizeof(float));
cudaMalloc(&buffer_array_a_02, node*sizeof(float));
cudaMalloc(&buffer_array_b_01, node*sizeof(float));
cudaMalloc(&buffer_array_b_02, node*sizeof(float));
cudaMalloc(&buffer_array_c_01, node*sizeof(float));
cudaMalloc(&buffer_array_c_02, node*sizeof(float));
cudaMemcpy(buffer_array_a_01, array_a_01, node*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(buffer_array_a_02, array_a_02, node*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(buffer_array_b_01, array_b_01, node*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(buffer_array_b_02, array_b_02, node*sizeof(float), cudaMemcpyHostToDevice);
dim3 gridDim(node/256,1,1); // 512 x 1 x 1
dim3 blockDim(256, 1, 1); // 1024 x 1024 x 1
printf("Using manual gridDim %i, ", gridDim.x);
printf("blockDim %i, ", blockDim.x);
printf("= %i;", (gridDim.x*blockDim.x));
printf("\t Gap = %zi \n", (gridDim.x*blockDim.x)-node);
/*printf("Using optimized blockSize %i, ", blockSize);
printf("gridSize %i, ", gridSize);
printf("= %i;", (gridSize*blockSize));
printf("\t Gap = %zi \n", (gridSize*blockSize)-node);*/
//Comment out one of the 2 kernels below---------------------------
/*
system_clock::time_point start_01 = system_clock::now();
add_01<<< gridDim , blockDim>>>(buffer_array_a_01, buffer_array_b_01, buffer_array_c_01);
cudaMemcpy(array_c_01, buffer_array_c_01, node *sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
system_clock::time_point stop_01 = system_clock::now();
std::chrono::duration<float, std::milli> duration_01 = stop_01 - start_01;
cout << "Assignment operator '+=' time = "<<duration_01.count()<<" millisecond"<<endl;
*/
//------------------------------------------------------------
system_clock::time_point start_02 = system_clock::now();
add_02<<< gridDim , blockDim>>>(buffer_array_a_02, buffer_array_b_02, buffer_array_c_02);
cudaMemcpy(array_c_02, buffer_array_c_02, node *sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
system_clock::time_point stop_02 = system_clock::now();
std::chrono::duration<float, std::milli> duration_02 = stop_02 - start_02;
cout << "Assignment operator '=' time = "<<duration_02.count()<<" millisecond"<<endl;
//##########################################################
//Checking
printf("\t array_c_01 - array_c_02 : \n");
for (size_t i = 0; i < 5; ++i) {
printf("\t\t %f", array_c_01[i]);
printf(" - %f \n", array_c_02[i]);
}
cudaFree(buffer_array_a_01); cudaFree(buffer_array_a_02);
cudaFree(buffer_array_b_01); cudaFree(buffer_array_b_02);
cudaFree(buffer_array_c_01);
cudaFree(buffer_array_c_02);
free(array_a_01); free(array_a_02);
free (array_b_01); free(array_b_02);
free (array_c_01); free (array_c_02);
printf("End of program!\n\n");
} |
10,925 | #include <stdio.h>
#include <cuda_runtime_api.h>
/****************************************************************************
This program gives an example of a poor way to implement a password cracker
in CUDA C. It is poor because it acheives this with just one thread, which
is obviously not good given the scale of parallelism available to CUDA
programs.
The intentions of this program are:
1) Demonstrate the use of __device__ and __global__ functions
2) Enable a simulation of password cracking in the absence of library
with equivalent functionality to libcrypt. The password to be found
is hardcoded into a function called is_a_match.
Compile and run with:
nvcc -o cuda_crack.cu cuda_crack
./cuda_crack
Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
/****************************************************************************
This function returns 1 if the attempt at cracking the password is
identical to the plain text password string stored in the program.
Otherwise,it returns 0.
*****************************************************************************/
__device__ int is_a_match(char *attempt)
{
char plain_password[] = "RG";
char *a = attempt;
char *p = plain_password;
while(*a == *p)
{
if(*a == '\0')
{
return 1;
}
a++;
p++;
}
return 0;
}
/****************************************************************************
The kernel function assume that there will be only one thread and uses
nested loops to generate all possible passwords and test whether they match
the hidden password.
*****************************************************************************/
__global__ void kernel()
{
char i, j;
int threadID = threadIdx.x;
int blockID = blockIdx.x;
int tID = blockID * blockDim.x + threadID;
// prints a unique thread id.
printf("Thread id is %d\n", tID);
char password[3];
password[2] = '\0';
for(i='A'; i<='Z'; i++)
{
password[0] = i;
for(j='A'; j<='Z'; j++)
{
password[1] = j;
if(is_a_match(password))
{
printf("password found: %s\n, Thread Id is %d\n threadID" , password, tID);
}
else
{
//printf("tried: %s\n", password);
}
}
}
}
int main()
{
char arrayLetters[26] = {'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'};
char *gpuLetters;
//cudamalloc
cudaMalloc((void**) &gpuLetters, 26*sizeof(char));
cudaMemcpy(arrayLetters, gpuLetters, 26*sizeof(char), cudaMemcpyHostToDevice);
// kernel to launch the program, 26, 26 for 26 blocks and 26 threads;
kernel <<<26, 26>>>();
cudaThreadSynchronize();
//cudamemcpy
cudaMemcpy(gpuLetters, arrayLetters, 26*sizeof(char), cudaMemcpyDeviceToHost);
return 0;
}
|
10,926 | #include "includes.h"
__global__ void pick_minus_log_ps(float *matrix, float *minus_log_ps, unsigned int *indices, unsigned int row, unsigned int col) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < row)
minus_log_ps[index] = -log(matrix[index * col + indices[index]]);
} |
10,927 | #include <thrust/scan.h>
#include <thrust/device_vector.h>
#include <iostream>
int main(){
thrust::device_vector<int> data(6, 0);
data[0] = 1;
data[1] = 0;
data[2] = 2;
data[3] = 2;
data[4] = 1;
data[5] = 3;
thrust::inclusive_scan(data.begin(), data.end(), data.begin()); // in-place scan
// data is now {1, 1, 3, 5, 6, 9}
/* data[0] = data[0]
* data[1] = data[0] + data[1]
* data[2] = data[0] + data[1] + data[2]
* ...
* data[5] = data[0] + data[1] + ... + data[5]
*/
data[0] = 1;
data[1] = 0;
data[2] = 2;
data[3] = 2;
data[4] = 1;
data[5] = 3;
thrust::exclusive_scan(data.begin(), data.end(), data.end()); // in-place scan
// data is now {0, 1, 1, 3, 5, 6}
/* data[0] = 0
* data[1] = data[0]
* data[2] = data[0] + data[1]
* ...
* data[5] = data[0] + data[1] + ... + data[4]
*/
} |
10,928 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <device_launch_parameters.h>
#define LIST_SIZE_GLOBAL 5000000
#define LIST_SIZE 10000
extern "C" __device__ unsigned long long load_store_index[LIST_SIZE];
extern "C" __device__ unsigned long long load_store_value[LIST_SIZE];
extern "C" __device__ double load_store_double[LIST_SIZE];
extern "C" __device__ unsigned long long load_store_double_index[LIST_SIZE];
extern "C" __device__ unsigned long long record_flag;
extern "C" __device__ unsigned long long call_count;
int memPro_kernel = 0;
void bambooLogRecordOff(){
long long local_record = 0;
cudaMemcpyToSymbol(record_flag, &local_record, sizeof(long long), 0, cudaMemcpyHostToDevice);
}
void bambooLogKernelBegin(long long i) {
cudaMemcpyToSymbol(call_count, &i, sizeof(long long), 0, cudaMemcpyHostToDevice);
i = 1;
cudaMemcpyToSymbol(record_flag, &i, sizeof(long long), 0, cudaMemcpyHostToDevice);
}
void bambooLogKernelEnd()
{
unsigned long long loadStoreIndex[LIST_SIZE] = {0};
unsigned long long loadStoreValue[LIST_SIZE] = {0};
unsigned long long loadStoreIndex_double[LIST_SIZE] = {0};
double loadStoreValue_double[LIST_SIZE] = {0};
FILE *profileFile = fopen("profile_mem_val_result.txt", "a");
for (int j=0; j < LIST_SIZE_GLOBAL; j+=LIST_SIZE)
{
cudaMemcpyFromSymbol(&loadStoreIndex, load_store_index, LIST_SIZE * sizeof(unsigned long long), j*sizeof(unsigned long long), cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&loadStoreValue, load_store_value, LIST_SIZE * sizeof(unsigned long long), j*sizeof(unsigned long long), cudaMemcpyDeviceToHost);
for(long long i=0; i < LIST_SIZE && loadStoreIndex[i] != 0; i++)
{
fprintf(profileFile, "%lld %lld\n", loadStoreIndex[i], loadStoreValue[i]);
}
}
for (int j=0; j < LIST_SIZE_GLOBAL; j+=LIST_SIZE)
{
cudaMemcpyFromSymbol(&loadStoreIndex_double, load_store_double_index, LIST_SIZE * sizeof(unsigned long long), j*sizeof(unsigned long long), cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(&loadStoreValue_double, load_store_double, LIST_SIZE * sizeof(double), j*sizeof(double), cudaMemcpyDeviceToHost);
for(long long i=0; i < LIST_SIZE && loadStoreIndex_double[i] != 0; i++)
{
fprintf(profileFile, "%lld %.40f\n", loadStoreIndex_double[i], loadStoreValue_double[i]);
}
}
fclose(profileFile);
}
|
10,929 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define N 60
/* El siguiente es un kernel CUDA que se ejecuta en la GPU.
* Todas las hebras ejecutan esta funcion en paralelo. */
__global__ void consecutivos(float a[]) {
int t = threadIdx.x; /* indice de la hebra (en este ejemplo, entre 0 y 19) */
int b = blockIdx.x; /* indice del bloque (en este ejemplo, entre 0 y 2) */
int B = blockDim.x; /* taman~o del bloque (en este ejemplo siempre es 20) */
int i = b * B + t; /* elemento del arreglo que le toca asignar a esta hebra */
a[i] = i;
}
int main() {
float *a_gpu;
float *a_cpu;
/* Reserva arreglos de taman~o N en la CPU y en la GPU. */
cudaMalloc((void **) &a_gpu, N * sizeof(float));
a_cpu = (float *) malloc(N * sizeof(float));
/* Ejecuta el kernel con 3 bloques de 20 hebras cada uno. */
consecutivos<<<4, 15>>>(a_gpu);
/* Copia el arreglo de la GPU a la CPU. */
cudaMemcpy(a_cpu, a_gpu, N * sizeof(float), cudaMemcpyDeviceToHost);
int i;
for (i = 0; i < N; ++i)
printf("%.1f\n", a_cpu[i]);
/* Libera los arreglos reservados. */
free(a_cpu);
cudaFree(a_gpu);
return 0;
}
|
10,930 |
#include <type_traits>
int main(int argc, char** argv)
{
// Verify that we have at least c++11
using returnv = std::integral_constant<int, 0>;
return returnv::value;
}
|
10,931 | #define CONSTANT_MEMORY_SIZE 100
__constant__ float constantMemoryData[CONSTANT_MEMORY_SIZE];
extern "C"
__global__ void constantMemoryKernel(float* array, int size)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size && index < CONSTANT_MEMORY_SIZE) {
array[index] = constantMemoryData[index];
}
}
|
10,932 | //
// Created by klaus on 09.07.21.
//
#include "ImmediateFunctionsThrust.cuh"
#include "StreamFunctionHelper.cuh"
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
enum ImmOp {
ADD,
MUL,
DIVI,
DIVII,
SUBI,
SUBII,
MODI,
MODII
};
struct immFunctor {
size_t imm;
ImmOp op;
immFunctor(size_t _imm, ImmOp _op) {
imm = _imm;
op = _op;
}
__host__ __device__
int operator()(int input) const {
switch (op) {
case ADD:
return input + imm;
case MUL:
return input * imm;
case SUBI:
return input - imm;
case SUBII:
return imm - input;
case DIVI:
return input / imm;
case DIVII:
return imm / input;
case MODI:
return input % imm;
case MODII:
return imm % input;
}
}
};
shared_ptr<GPUIntStream> exec_imm_op(shared_ptr<GPUIntStream> input, size_t imm, ImmOp op) {
// prepare result
shared_ptr<GPUIntStream> result = make_shared<GPUIntStream>();
cudaMalloc((void**) &result->device_timestamp, input->size*sizeof(int));
cudaMalloc((void**) &result->device_values, input->size*sizeof(int));
result->device_offset = input->device_offset;
// copy timestamps
auto input_ts = thrust::device_pointer_cast(input->device_timestamp);
auto result_ts = thrust::device_pointer_cast(result->device_timestamp);
thrust::copy_n(input_ts, input->size, result_ts);
// get pointers and transform stream
auto offset = thrust::device_pointer_cast(input->device_offset);
auto input_vals_start = thrust::device_pointer_cast(input->device_values+*offset);
auto input_vals_end = thrust::device_pointer_cast(input->device_values+ input->size);
auto result_vals = thrust::device_pointer_cast(input->device_values + *offset);
immFunctor f(imm, op);
thrust::transform(input_vals_start, input_vals_end, result_vals, f);
return result;
}
std::shared_ptr<GPUIntStream> add_imm_thrust(shared_ptr<GPUIntStream> input, size_t imm) {
return exec_imm_op(input, imm, ADD);
}
shared_ptr<GPUIntStream> mul_imm_thrust (shared_ptr<GPUIntStream> input, size_t imm) {
return exec_imm_op(input, imm, MUL);
}
shared_ptr<GPUIntStream> sub_imm_thrust (shared_ptr<GPUIntStream> input, size_t imm) {
return exec_imm_op(input, imm, SUBI);
}
shared_ptr<GPUIntStream> sub_inv_imm_thrust (shared_ptr<GPUIntStream> input, size_t imm) {
return exec_imm_op(input, imm, SUBII);
}
shared_ptr<GPUIntStream> div_imm_thrust (shared_ptr<GPUIntStream> input, size_t imm) {
return exec_imm_op(input, imm, DIVI);
}
shared_ptr<GPUIntStream> div_inv_imm_thrust (shared_ptr<GPUIntStream> input, size_t imm){
return exec_imm_op(input, imm, DIVII);
}
shared_ptr<GPUIntStream> mod_imm_thrust (shared_ptr<GPUIntStream> input, size_t imm){
return exec_imm_op(input, imm, MODI);
}
shared_ptr<GPUIntStream> mod_inv_imm_thrust (shared_ptr<GPUIntStream> input, size_t imm){
return exec_imm_op(input, imm, MODII);
}
|
10,933 | typedef unsigned uint32_t;
extern "C" {
__global__ void my_dot( float const * const a, float const * const b, float * const c, uint32_t const n ) {
uint32_t const ix = blockDim.x * blockIdx.x + threadIdx.x;
if( ix < n ) { c[ix] = a[ix] + b[ix]; }
}
struct n_t {
uint32_t n;
};
__global__ void my_dot_struct( float const * const a, float const * const b, float * const c, struct n_t const n ) {
uint32_t const ix = blockDim.x * blockIdx.x + threadIdx.x;
if( ix < n.n ) { c[ix] = a[ix] + b[ix]; }
}
}
|
10,934 | #include "stdio.h"
#include<iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>
#define TILE_SIZE 2
__global__ void gpu_Matrix_Mul_nonshared(float *d_a, float *d_b, float *d_c, const int size)
{
int row, col;
col = TILE_SIZE * blockIdx.x + threadIdx.x;
row = TILE_SIZE * blockIdx.y + threadIdx.y;
for(int k = 0; k < size; k++){
d_c[row * size + col] += d_a[row * size + k] * d_b[k * size + col];
}
}
__global__ void gpu_Matrix_Mul_shared(float *d_a, float *d_b, float *d_c, const int size)
{
int row, col;
col = TILE_SIZE * blockIdx.x + threadIdx.x;
row = TILE_SIZE * blockIdx.y + threadIdx.y;
__shared__ float share_a[TILE_SIZE][TILE_SIZE];
__shared__ float share_b[TILE_SIZE][TILE_SIZE];
for(int i = 0; i < size/TILE_SIZE; i++){
share_a[threadIdx.y][threadIdx.x] = d_a[row * size + (i*TILE_SIZE + threadIdx.x)];
share_b[threadIdx.y][threadIdx.x] = d_b[(i*TILE_SIZE + threadIdx.x) * size + col];
__syncthreads();
for(int j = 0; j < TILE_SIZE; j++){
d_c[row * size + col] += share_a[threadIdx.y][j] * share_b[j][threadIdx.x];
}
__syncthreads();
}
}
int main(void){
const int size = 4;
float h_a[size][size];
float h_b[size][size];
float h_result[size][size];
float *d_a, *d_b, *d_result;
// Init array
for(int i=0; i < size; i++){
for(int j=0; j<size;j++){
h_a[i][j] = i;
h_b[i][j] = j;
}
}
cudaMalloc((void **)&d_a, size * size * sizeof(int));
cudaMalloc((void **)&d_b, size * size * sizeof(int));
cudaMalloc((void **)&d_result, size * size * sizeof(int));
cudaMemcpy(d_a, h_a, size * size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, size * size * sizeof(int), cudaMemcpyHostToDevice);
dim3 dimGrid(size/TILE_SIZE, size/TILE_SIZE, 1);
dim3 dimBlock(TILE_SIZE, TILE_SIZE, 1);
// gpu_Matrix_Mul_nonshared << <dimGrid, dimBlock >> > (d_a, d_b, d_result, size);
gpu_Matrix_Mul_shared<<<dimGrid, dimBlock>>> (d_a, d_b, d_result,size);
cudaMemcpy(h_result, d_result, size*size*sizeof(int), cudaMemcpyDeviceToHost);
printf("The result of Matrix multiplication is: \n");
for (int i = 0; i< size; i++)
{
for (int j = 0; j < size; j++)
{
printf("%f ", h_result[i][j]);
}
printf("\n");
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_result);
return 0;
} |
10,935 | #include "includes.h"
__global__ void gpu_dotp_kernel(int size, float* vec1, float* vec2, float* res){
float cache = 0;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < size ){
cache = vec1[i]*vec2[i];
}
atomicAdd(res, cache);
} |
10,936 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include <iostream>
// CUDA kernel for vector addition
__global__ void MatrixMul(int* a, int* b, int* c, int n) {
// row
int row = (blockIdx.y * blockDim.y) + threadIdx.y;
//col
int col = (blockIdx.x * blockDim.x) + threadIdx.x;
int temp_sum = 0;
// boundary guard
if ((row < n) && (col < n)) {
for (int k = 0; k < n; k++)
{
temp_sum += a[row*n+k]*b[k*n+col];
}
c[row*n+col] = temp_sum;
}
}
// Initialize
void Mat_init(int* a, int n) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
a[i * n + j] = rand() % 100;
}
}
}
// Check MatrixMul add result
void check_answer(int* a, int* b, int* c, int n) {
int* result = (int*)malloc(n * n * sizeof(int));
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
for (int k = 0; k < n; k++)
{
result[i * n + j] += a[i * n + k] * b[k * n + j];
}
}
}
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
assert(c[i * n + j] == result[i * n + j]);
}
}
}
int main() {
// matrix of size 1024 x 1024
int n = 1 << 10;
//host memory pointers
int* h_a, * h_b, * h_c;
// Allocation size for all vectors
size_t bytes = sizeof(int) * n * n;
h_a = (int*)malloc(bytes);
h_b = (int*)malloc(bytes);
h_c = (int*)malloc(bytes);
//device memory pointers
int* d_a, * d_b, * d_c;
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
// Initialize vectors a and b with random values between 0 and 99
Mat_init(h_a, n);
Mat_init(h_b, n);
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
// Threadblock size
int BLOCKS = 16;
// Grid size
int GRID = (int)ceil(n / BLOCKS);
//use dim3 objects
dim3 grid(GRID, GRID);
dim3 threads(BLOCKS, BLOCKS);
// Launch kernel on default stream w/o shmem
MatrixMul <<<grid, threads >>> (d_a, d_b, d_c, n);
//copy result back to host
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
// Check result for errors
check_answer(h_a, h_b, h_c, n);
free(h_a);
free(h_b);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
printf("COMPLETED SUCCESFULLY\n");
return 0;
} |
10,937 | #include "includes.h"
__global__ void SetKernel(float *buffer, int offset, float value, int count)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < count)
{
(buffer + offset)[threadId] = value;
}
} |
10,938 | #include "includes.h"
__global__ void dense_add(size_t sz, float_t* src, float_t* dest)
{
size_t srcIndex = threadIdx.x;
size_t destIndex = blockIdx.x*blockDim.x + threadIdx.x;
if(destIndex < sz)
{
dest[destIndex] += src[srcIndex];
}
} |
10,939 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <ctime>
__global__ void sumArraysZeroCopyUVA(float* A, float* B, float* C, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
C[idx] = A[idx] + B[idx];
}
void sumArraysOnHost(float* A, float* B, float* C, int N)
{
for (int i = 0; i<N; ++i)
{
C[i] = A[i] + B[i];
}
}
void initialData(float* ip, int size)
{
time_t t;
srand((unsigned int)time(&t));
for (int i = 0; i<size; ++i)
{
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
void checkResult(float* hostResult, float* deviceResult, const int N)
{
double epsilon = 1.0E-8;
int match = 1;
for (int i = 0; i<N; ++i)
{
if (abs(hostResult[i] - deviceResult[i]) > epsilon)
{
match = 0;
printf("Array do not match\n");
printf("host %5.2f gpu %5.2f at current %d\n", hostResult[i], deviceResult[i], i);
break;
}
}
if (match)
printf("Array match\n");
return;
}
int main(int argc, char* argv[])
{
int dev = 0;
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if (deviceProp.canMapHostMemory == false)
{
printf("Device %d dose not support mapping CPU host memory!\n", dev);
cudaDeviceReset();
return -1;
}
printf("Using Device %d, %s\n", dev, deviceProp.name);
int iPower = 10;
if (argc > 1)
{
iPower = atoi(argv[1]);
}
int nElem = 1 << iPower;
size_t nBytes = nElem * sizeof(float);
// part 1 use device memory
float* h_a;
float* h_b;
float* d_c;
float* hostRef;
float* gpuRef;
unsigned int flags = cudaHostAllocMapped;
cudaHostAlloc((void**)&h_a, nBytes, flags);
cudaHostAlloc((void**)&h_b, nBytes, flags);
cudaHostAlloc((void**)&d_c, nBytes, flags);
hostRef = (float*)malloc(nBytes);
initialData(h_a, nElem);
initialData(h_b, nElem);
memset(hostRef, 0, nBytes);
sumArraysOnHost(h_a, h_b, hostRef, nElem);
int nLen = 32;
dim3 block(nLen);
dim3 grid((nElem + block.x - 1) / block.x);
sumArraysZeroCopyUVA<<<grid, block >>>(h_a, h_b, d_c, nElem);
checkResult(hostRef, d_c, nElem);
cudaFreeHost(d_c);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
free(hostRef);
cudaDeviceReset();
system("Pause");
return 0;
}
|
10,940 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define N 4;
//cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
//__global__ void addKernel(int *c, const int *a, const int *b)
//{
// int i = threadIdx.x;
// c[i] = a[i] + b[i];
//}
__global__ void addKernel(double *a, double *b, double *c,int size)
{
int i = threadIdx.x;
for(int k = 0;k<size; k++)
{
if(i < size)
c[i] += a[i*size+k] * b[k];
}
}
void simple_dgemv(double *A, double *B, double *C,int size)
{
int i,j;
for(i = 0;i < size; i++)
{
double prod = 0;
for(j = 0;j < size; j++)
{
prod += A[i * size + j] * B[j];
}
C[i] = prod;
}
}
//__global__ void MVKernel_gm(double *A, double *X, double *Y,int ARRAY_SIZE)
//{
// //int bx = blockIdx.x;
// //int by = blockIdx.y;
// int tid = threadIdx.x;
// //int ty = threadIdx.y;
// // Calculate the row index of the Pd element and M
// //int Row = bx * BLOCK_SIZE + tx;
// // Calculate the column idenx of Pd and N
// //int Col = bx * BLOCK_SIZE + tx;
//
// double tmpSum = 0;
//
// for (int k = 0; k < ARRAY_SIZE; k++)
// {
// if(tid < ARRAY_SIZE)
// tmpSum += A[tid*ARRAY_SIZE+k] * X[k];
// }
//
// __syncthreads();
//
// if(tid < ARRAY_SIZE)
// Y[tid] = tmpSum;
//
// __syncthreads();
//}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
int ARRAY_SIZE = 5;
int ARRAY_SIZE2 = ARRAY_SIZE*ARRAY_SIZE;
//Host
double *h_a;
double *h_b;
double *h_c;
//Device
double *d_a;
double *d_b;
double *d_c;
//generate the input array on the host
h_a=(double*)malloc(sizeof(double)*ARRAY_SIZE2);
h_b=(double*)malloc(sizeof(double)*ARRAY_SIZE);
h_c=(double*)malloc(sizeof(double)*ARRAY_SIZE);
//inital the h_a, h_b
for(int i = 0;i<ARRAY_SIZE2;i++){
h_a[i] = double(i);
}
for(int i = 0;i<ARRAY_SIZE;i++){
h_b[i] = double(i);
}
for(int i = 0;i<ARRAY_SIZE;i++){
h_c[i] = double(0);
}
////print out test
//printf("\nThe vector A is:\n");
//for(int i=0;i<ARRAY_SIZE2;i++){
// printf("%f", h_a[i]);
// printf(((i%4)!=3)? "\t" : "\n");
//}
//printf("\nThe Matrix X is:\n");
//for(int i=0;i<ARRAY_SIZE;i++){
// printf("%f", h_b[i]);
// printf(((i%4)!=3)? "\t" : "\n");
//}
//// Add vectors in parallel.
//cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
//if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "addWithCuda failed!");
// return 1;
//}
//printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
// c[0], c[1], c[2], c[3], c[4]);
//// cudaDeviceReset must be called before exiting in order for profiling and
//// tracing tools such as Nsight and Visual Profiler to show complete traces.
//cudaStatus = cudaDeviceReset();
//if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaDeviceReset failed!");
// return 1;
//}
//allocate GPU memory
cudaMalloc((void**)&d_a, sizeof(double)*ARRAY_SIZE2);
cudaMalloc((void**)&d_b, sizeof(double)*ARRAY_SIZE);
cudaMalloc((void**)&d_c, sizeof(double)*ARRAY_SIZE);
//transfer the array from Host to device(CPU->GPU)
cudaMemcpy(d_a, h_a, sizeof(double)*ARRAY_SIZE2, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(double)*ARRAY_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, h_c, sizeof(double)*ARRAY_SIZE, cudaMemcpyHostToDevice);
//Run kernel function calculate the matrix-vector multiplication
printf("\n\nRunning Kernel...\n\n");
//MVKernel_gm<<<1,256>>>(d_a, d_b, d_c, ARRAY_SIZE);//ARRAY_SIZE/256+1, 256
addKernel<<<1, ARRAY_SIZE>>>(d_a,d_b,d_c,ARRAY_SIZE);
//transfer the array from Device to Host(GPU->CPU)
//cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(h_c, d_c, sizeof(double)*ARRAY_SIZE, cudaMemcpyDeviceToHost);
//print out the result array
for(int i = 0; i<ARRAY_SIZE;i++){
printf("%f\n", h_c[i]);
//printf(((i%4)!=3)? "\t" : "\n");
}
//free GPU memory allocation
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
//free Host memory allocation
free(h_a);
free(h_b);
free(h_c);
system("pause");
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
//cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
//{
// int *dev_a = 0;
// int *dev_b = 0;
// int *dev_c = 0;
// cudaError_t cudaStatus;
//
// // Choose which GPU to run on, change this on a multi-GPU system.
// cudaStatus = cudaSetDevice(0);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
// goto Error;
// }
//
// // Allocate GPU buffers for three vectors (two input, one output) .
// cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// // Copy input vectors from host memory to GPU buffers.
// cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
//
// cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
//
// // Launch a kernel on the GPU with one thread for each element.
// //addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
//
// // Check for any errors launching the kernel
// cudaStatus = cudaGetLastError();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
// goto Error;
// }
//
// // cudaDeviceSynchronize waits for the kernel to finish, and returns
// // any errors encountered during the launch.
// cudaStatus = cudaDeviceSynchronize();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
// goto Error;
// }
//
// // Copy output vector from GPU buffer to host memory.
// cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
//
//Error:
// cudaFree(dev_c);
// cudaFree(dev_a);
// cudaFree(dev_b);
//
//
//
// return cudaStatus;
//}
|
10,941 |
#define M_PI 3.14159265358979323846 // pi
#define DEG2RAD (M_PI/180.0f)
__global__ void computeAccum(unsigned char* result, unsigned char* bw_image, unsigned int* accum, int w, int h, int w_accum, int h_accum, double hough_h)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int tid = y * w + x;
int _r = w * h * 0;
int _g = w * h * 1;
int _b = w * h * 2;
int _a = w * h * 3;
if (x >= w || y >= h)
return;
double center_x = w / 2;
double center_y = h / 2;
if (result[tid + _r] > 128 && result[tid + _g] > 128 && result[tid + _b] > 128) {
for (int t = 0; t < 180; t++) {
double r = (((double)x - center_x) * cos((double)t * DEG2RAD)) + (((double)y - center_y) * sin((double)t * DEG2RAD));
//accum[(int)((round(r + hough_h) * 180.0)) + t]++;
int help = (int)((round(r + hough_h) * 180.0)) + t;
int ind = 0;
int alma = help / h_accum;
int korte = help % h_accum;
ind = alma + (korte * w_accum);
atomicAdd(&accum[(int)((round(r + hough_h) * 180.0)) + t], 1);
//atomicAdd(&accum[ind], 1);
}
bw_image[tid + _r] = 255;
bw_image[tid + _g] = 255;
bw_image[tid + _b] = 255;
bw_image[tid + _a] = 255;
}
else {
bw_image[tid + _r] = 0;
bw_image[tid + _g] = 0;
bw_image[tid + _b] = 0;
bw_image[tid + _a] = 255;
}
return;
}
|
10,942 | #include <iostream>
#include <stdio.h>
using namespace std;
#define BLOCK_SIZE_1D 1024
#define BLOCK_SIZE_2D 32
/**
* CUDA kernel responsible for multiplying two matrices 'A' and 'B', using the
* naive approach, and storing result in matrix 'Y'
*
* @param use2D Defines whether 2D blocks and grid are used. This only affects
* the way the indices 'i' and 'j' are calculated.
*/
__global__ void naiveMMKernel(int n, double *A, double *B, double *Y, bool use2D) {
int i, j;
if (use2D) {
i = threadIdx.y + (blockIdx.y * blockDim.y);
j = threadIdx.x + (blockIdx.x * blockDim.x);
} else {
i = (threadIdx.x + (blockIdx.x * blockDim.x)) / n;
j = (threadIdx.x + (blockIdx.x * blockDim.x)) - (i * n);
}
if (i >= n || j >= n) {
return;
}
double res = 0;
for (int k = 0; k < n; k++) {
res += A[i * n + k] * B[k * n + j];
}
Y[i * n + j] = res;
}
/**
* Multiplies matrices 'd_A' and 'd_B' using the naive approach and stores the
* result in matrix 'd_Y'
*
* The input matrices have to reference the device memory
*
* @param use2D Defines whether 2D blocks and grid should be used for the
* kernel configuration
*/
void naiveMM(int n, double *d_A, double *d_B, double *d_Y, bool use2D) {
dim3 dimGrid, dimBlock;
if (use2D) {
// Total of 1024 threads
dimBlock = dim3(BLOCK_SIZE_2D, BLOCK_SIZE_2D);
int dimSize = (n + BLOCK_SIZE_2D - 1) / BLOCK_SIZE_2D;
dimGrid = dim3(dimSize, dimSize);
} else {
dimBlock = dim3(BLOCK_SIZE_1D);
dimGrid = dim3((n * n + BLOCK_SIZE_1D - 1) / BLOCK_SIZE_1D);
}
naiveMMKernel<<<dimGrid, dimBlock>>>(n, d_A, d_B, d_Y, use2D);
cudaThreadSynchronize();
cout << dimBlock.x << "x" << dimBlock.y << "\t\t";
cout << dimGrid.x << "x" << dimGrid.y << "\t\t";
}
|
10,943 | #include <cstdio>
#include <stdio.h>
#define BUF_SIZ 1000000
__global__ void addKernel(int *c , const int *a, const int *b){
int i = threadIdx.x;
c[i] = a[i]+ b[i];
}
__global__ void GetKernel(int *b)
{
int i = threadIdx.x;
b[i] = b[i] * 10;
}
int main(void)
{
int a[BUF_SIZ];
int b[BUF_SIZ];
int *ary1=0;
// int *ary2=0;
// int *ary3=0;
for(int i=0;i<BUF_SIZ;i++)
{
a[i] = i;
}
cudaMalloc((void**)&ary1 , BUF_SIZ*sizeof(int));
// cudaMalloc((void**)&ary2 , BUF_SIZ*sizeof(int));
// cudaMalloc((void**)&ary3 , BUF_SIZ*sizeof(int));
// cudaMemcpy(ary2, a, BUF_SIZ*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(ary1, a, BUF_SIZ*sizeof(int),cudaMemcpyHostToDevice);
/*
for(int i=0;i<100000;i++)
{
ary3[i] = ary1[i] + ary2[i];
}
*/
printf("addkernel start\n");
// addKernel<<<1,3>>>(ary3,ary1,ary2);
GetKernel<<<1,3>>>(ary1);
printf("addkernel end\n");
// cudaMemcpy(b, ary3 ,BUF_SIZ*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(b, ary1 ,BUF_SIZ*sizeof(int),cudaMemcpyDeviceToHost);
for(int i =0; i < BUF_SIZ; i++)
{
if(i/500 ==0)
{
printf("%d ",b[i]);
}
}
cudaFree(ary1);
// cudaFree(ary2);
// cudaFree(ary3);
return 0;
}
|
10,944 | #include <stdio.h>
#include <time.h>
#define SIZE 100
__global__ void VectorAdd(int *a, int *b, int *c, int n){
int i=threadIdx.x;
if(i<n)
c[i]=a[i]+b[i];
}
int main(){
int *a, *b, *c;
cudaMallocManaged(&a,SIZE * sizeof(int));
cudaMallocManaged(&b,SIZE * sizeof(int));
cudaMallocManaged(&c,SIZE * sizeof(int));
for (int i = 0; i < SIZE; i++)
{
a[i]=i;
b[i]=i;
c[i]=0;
}
VectorAdd <<<1,SIZE>>> (a,b,c,SIZE);
cudaDeviceSynchronize();
for (int i = 0; i < 10; ++i)
{
printf("c[%d] = %d\n", i, c[i]);
}
cudaFree(a);
cudaFree(b);
cudaFree(c);
return 0;
} |
10,945 | /**
* Nearest neighbor search
* マップ内に、店、工場などのゾーンがある確率で配備されている時、
* 住宅ゾーンから直近の店、工場までのマンハッタン距離を計算する。
*
* 各店、工場から周辺に再帰的に距離を更新していくので、O(N)で済む。
* しかも、GPUで並列化することで、さらに計算時間を短縮できる。
*/
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <list>
#include <time.h>
#define CITY_SIZE 200
#define NUM_GPU_BLOCKS 4
#define NUM_GPU_THREADS 128
#define NUM_FEATURES 5
#define QUEUE_SIZE 5000
#define CUDA_CALL(x) {if((x) != cudaSuccess){ \
printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \
printf(" %s\n", cudaGetErrorString(cudaGetLastError())); \
exit(EXIT_FAILURE);}}
struct ZoneType {
int type;
int level;
};
struct ZoningPlan {
ZoneType zones[CITY_SIZE][CITY_SIZE];
};
struct DistanceMap {
int distances[CITY_SIZE][CITY_SIZE][NUM_FEATURES];
};
struct Point2D {
int x;
int y;
__host__ __device__
Point2D() : x(0), y(0) {}
__host__ __device__
Point2D(int x, int y) : x(x), y(y) {}
};
struct Point2DAndFeature {
int x;
int y;
int featureId;
__host__ __device__
Point2DAndFeature() : x(0), y(0), featureId(0) {}
__host__ __device__
Point2DAndFeature(int x, int y, int featureId) : x(x), y(y), featureId(featureId) {}
};
__host__ __device__
unsigned int rand(unsigned int* randx) {
*randx = *randx * 1103515245 + 12345;
return (*randx)&2147483647;
}
__host__ __device__
float randf(unsigned int* randx) {
return rand(randx) / (float(2147483647) + 1);
}
__host__ __device__
float randf(unsigned int* randx, float a, float b) {
return randf(randx) * (b - a) + a;
}
__host__ __device__
int sampleFromCdf(unsigned int* randx, float* cdf, int num) {
float rnd = randf(randx, 0, cdf[num-1]);
for (int i = 0; i < num; ++i) {
if (rnd <= cdf[i]) return i;
}
return num - 1;
}
__host__ __device__
int sampleFromPdf(unsigned int* randx, float* pdf, int num) {
if (num == 0) return 0;
float cdf[40];
cdf[0] = pdf[0];
for (int i = 1; i < num; ++i) {
if (pdf[i] >= 0) {
cdf[i] = cdf[i - 1] + pdf[i];
} else {
cdf[i] = cdf[i - 1];
}
}
return sampleFromCdf(randx, cdf, num);
}
/**
* ゾーンプランを生成する。
*/
__host__
void generateZoningPlan(ZoningPlan& zoningPlan, std::vector<float> zoneTypeDistribution) {
std::vector<float> numRemainings(zoneTypeDistribution.size());
for (int i = 0; i < zoneTypeDistribution.size(); ++i) {
numRemainings[i] = CITY_SIZE * CITY_SIZE * zoneTypeDistribution[i];
}
unsigned int randx = 0;
for (int r = 0; r < CITY_SIZE; ++r) {
for (int c = 0; c < CITY_SIZE; ++c) {
int type = sampleFromPdf(&randx, numRemainings.data(), numRemainings.size());
zoningPlan.zones[r][c].type = type;
numRemainings[type] -= 1;
}
}
}
__global__
void initDistance(ZoningPlan* zoningPlan, DistanceMap* distanceMap, Point2DAndFeature* queue, int* queueEnd) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
queueEnd[idx] = 0;
int stride = ceilf((float)(CITY_SIZE * CITY_SIZE) / NUM_GPU_BLOCKS / NUM_GPU_THREADS);
// 分割された領域内で、店を探す
for (int i = 0; i < stride; ++i) {
int r = (idx * stride + i) / CITY_SIZE;
int c = (idx * stride + i) % CITY_SIZE;
if (r >= CITY_SIZE) continue;
for (int feature_id = 0; feature_id < NUM_FEATURES; ++feature_id) {
if (zoningPlan->zones[r][c].type - 1 == feature_id) {
queue[idx * QUEUE_SIZE + queueEnd[idx]++] = Point2DAndFeature(c, r, feature_id);
distanceMap->distances[r][c][feature_id] = 0;
} else {
distanceMap->distances[r][c][feature_id] = 9999;
}
}
}
}
/**
* 直近の店までの距離を計算する(マルチスレッド版)
*/
__global__
void computeDistanceToStore(ZoningPlan* zoningPlan, DistanceMap* distanceMap, Point2DAndFeature* queue, int* queueEnd) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int queue_begin = 0;
// 距離マップを生成
while (queue_begin < queueEnd[idx]) {
Point2DAndFeature pt = queue[idx * QUEUE_SIZE + queue_begin++];
if (queue_begin >= QUEUE_SIZE) queue_begin = 0;
int d = distanceMap->distances[pt.y][pt.x][pt.featureId];
if (pt.y > 0) {
int old = atomicMin(&distanceMap->distances[pt.y-1][pt.x][pt.featureId], d + 1);
if (old > d + 1) {
queue[idx * QUEUE_SIZE + queueEnd[idx]++] = Point2DAndFeature(pt.x, pt.y-1, pt.featureId);
if (queueEnd[idx] >= QUEUE_SIZE) queueEnd[idx] = 0;
}
}
if (pt.y < CITY_SIZE - 1) {
int old = atomicMin(&distanceMap->distances[pt.y+1][pt.x][pt.featureId], d + 1);
if (old > d + 1) {
queue[idx * QUEUE_SIZE + queueEnd[idx]++] = Point2DAndFeature(pt.x, pt.y+1, pt.featureId);
if (queueEnd[idx] >= QUEUE_SIZE) queueEnd[idx] = 0;
}
}
if (pt.x > 0) {
int old = atomicMin(&distanceMap->distances[pt.y][pt.x-1][pt.featureId], d + 1);
if (old > d + 1) {
queue[idx * QUEUE_SIZE + queueEnd[idx]++] = Point2DAndFeature(pt.x-1, pt.y, pt.featureId);
if (queueEnd[idx] >= QUEUE_SIZE) queueEnd[idx] = 0;
}
}
if (pt.x < CITY_SIZE - 1) {
int old = atomicMin(&distanceMap->distances[pt.y][pt.x+1][pt.featureId], d + 1);
if (old > d + 1) {
queue[idx * QUEUE_SIZE + queueEnd[idx]++] = Point2DAndFeature(pt.x+1, pt.y, pt.featureId);
if (queueEnd[idx] >= QUEUE_SIZE) queueEnd[idx] = 0;
}
}
}
}
/**
* 直近の店までの距離を計算する(CPU版)
*/
__host__
void computeDistanceToStoreCPU(ZoningPlan* zoningPLan, DistanceMap* distanceMap) {
std::list<Point2DAndFeature> queue;
for (int feature_id = 0; feature_id < NUM_FEATURES; ++feature_id) {
for (int cell_id = 0; cell_id < CITY_SIZE * CITY_SIZE; ++cell_id) {
int r = cell_id / CITY_SIZE;
int c = cell_id % CITY_SIZE;
if (zoningPLan->zones[r][c].type - 1== feature_id) {
queue.push_back(Point2DAndFeature(c, r, feature_id));
distanceMap->distances[r][c][feature_id] = 0;
} else {
distanceMap->distances[r][c][feature_id] = 9999;
}
}
}
while (!queue.empty()) {
Point2DAndFeature pt = queue.front();
queue.pop_front();
int d = distanceMap->distances[pt.y][pt.x][pt.featureId];
if (pt.y > 0) {
if (distanceMap->distances[pt.y-1][pt.x][pt.featureId] > d + 1) {
distanceMap->distances[pt.y-1][pt.x][pt.featureId] = d + 1;
queue.push_back(Point2DAndFeature(pt.x, pt.y-1, pt.featureId));
}
}
if (pt.y < CITY_SIZE - 1) {
if (distanceMap->distances[pt.y+1][pt.x][pt.featureId] > d + 1) {
distanceMap->distances[pt.y+1][pt.x][pt.featureId] = d + 1;
queue.push_back(Point2DAndFeature(pt.x, pt.y+1, pt.featureId));
}
}
if (pt.x > 0) {
if (distanceMap->distances[pt.y][pt.x-1][pt.featureId] > d + 1) {
distanceMap->distances[pt.y][pt.x-1][pt.featureId] = d + 1;
queue.push_back(Point2DAndFeature(pt.x-1, pt.y, pt.featureId));
}
}
if (pt.x < CITY_SIZE - 1) {
if (distanceMap->distances[pt.y][pt.x+1][pt.featureId] > d + 1) {
distanceMap->distances[pt.y][pt.x+1][pt.featureId] = d + 1;
queue.push_back(Point2DAndFeature(pt.x+1, pt.y, pt.featureId));
}
}
}
}
int main()
{
time_t start, end;
ZoningPlan* hostZoningPlan = (ZoningPlan*)malloc(sizeof(ZoningPlan));
DistanceMap* hostDistanceMap = (DistanceMap*)malloc(sizeof(DistanceMap));
DistanceMap* hostDistanceMap2 = (DistanceMap*)malloc(sizeof(DistanceMap));
// 距離を初期化
memset(hostDistanceMap, 9999, sizeof(DistanceMap));
memset(hostDistanceMap2, 9999, sizeof(DistanceMap));
std::vector<float> zoneTypeDistribution(6);
zoneTypeDistribution[0] = 0.5f;
zoneTypeDistribution[1] = 0.2f;
zoneTypeDistribution[2] = 0.1f;
zoneTypeDistribution[3] = 0.1f;
zoneTypeDistribution[4] = 0.05f;
zoneTypeDistribution[5] = 0.05f;
// 初期プランを生成
start = clock();
generateZoningPlan(*hostZoningPlan, zoneTypeDistribution);
end = clock();
printf("generateZoningPlan: %lf\n", (double)(end-start)/CLOCKS_PER_SEC);
// デバッグ用
if (CITY_SIZE <= 100) {
for (int r = CITY_SIZE - 1; r >= 0; --r) {
for (int c = 0; c < CITY_SIZE; ++c) {
printf("%d, ", hostZoningPlan->zones[r][c].type);
}
printf("\n");
}
printf("\n");
}
// 初期プランをデバイスバッファへコピー
ZoningPlan* devZoningPlan;
CUDA_CALL(cudaMalloc((void**)&devZoningPlan, sizeof(ZoningPlan)));
CUDA_CALL(cudaMemcpy(devZoningPlan, hostZoningPlan, sizeof(ZoningPlan), cudaMemcpyHostToDevice));
// 距離マップ用に、デバイスバッファを確保
DistanceMap* devDistanceMap;
CUDA_CALL(cudaMalloc((void**)&devDistanceMap, sizeof(DistanceMap)));
// キュー用にデバイスバッファを確保
Point2DAndFeature* devQueue;
CUDA_CALL(cudaMalloc((void**)&devQueue, sizeof(Point2DAndFeature) * QUEUE_SIZE * NUM_GPU_BLOCKS * NUM_GPU_THREADS));
int* devQueueEnd;
CUDA_CALL(cudaMalloc((void**)&devQueueEnd, sizeof(int) * NUM_GPU_BLOCKS * NUM_GPU_THREADS));
///////////////////////////////////////////////////////////////////////
// CPU版で、直近の店までの距離を計算
start = clock();
for (int iter = 0; iter < 1000; ++iter) {
computeDistanceToStoreCPU(hostZoningPlan, hostDistanceMap2);
}
end = clock();
printf("computeDistanceToStore CPU: %lf\n", (double)(end-start)/CLOCKS_PER_SEC);
///////////////////////////////////////////////////////////////////////
// マルチスレッドで、直近の店までの距離を計算
float elapsed1 = 0.0f;
float elapsed2 = 0.0f;
for (int iter = 0; iter < 1000; ++iter) {
start = clock();
initDistance<<<NUM_GPU_BLOCKS, NUM_GPU_THREADS>>>(devZoningPlan, devDistanceMap, devQueue, devQueueEnd);
cudaDeviceSynchronize();
end = clock();
elapsed1 += (double)(end-start)/CLOCKS_PER_SEC;
start = clock();
computeDistanceToStore<<<NUM_GPU_BLOCKS, NUM_GPU_THREADS>>>(devZoningPlan, devDistanceMap, devQueue, devQueueEnd);
cudaDeviceSynchronize();
end = clock();
elapsed2 += (double)(end-start)/CLOCKS_PER_SEC;
}
printf("computeDistanceToStore: initDistance = %lf, updateDistance = %lf\n", elapsed1, elapsed2);
// 距離をCPUバッファへコピー
CUDA_CALL(cudaMemcpy(hostDistanceMap, devDistanceMap, sizeof(DistanceMap), cudaMemcpyDeviceToHost));
// CPU版とマルチスレッドの結果を比較
int bad_k = 0;
bool err = false;
{
for (int r = CITY_SIZE - 1; r >= 0 && !err; --r) {
for (int c = 0; c < CITY_SIZE && !err; ++c) {
for (int k = 0; k < NUM_FEATURES && !err; ++k) {
if (hostDistanceMap->distances[r][c][k] != hostDistanceMap2->distances[r][c][k]) {
err = true;
printf("ERROR! %d,%d k=%d, %d != %d\n", r, c, k, hostDistanceMap->distances[r][c][k], hostDistanceMap2->distances[r][c][k]);
bad_k = k;
}
}
}
}
}
// デバッグ用
if (CITY_SIZE <= 100 && err) {
for (int r = CITY_SIZE - 1; r >= 0; --r) {
for (int c = 0; c < CITY_SIZE; ++c) {
printf("%d, ", hostDistanceMap->distances[r][c][bad_k]);
}
printf("\n");
}
printf("\n");
for (int r = CITY_SIZE - 1; r >= 0; --r) {
for (int c = 0; c < CITY_SIZE; ++c) {
printf("%d, ", hostDistanceMap2->distances[r][c][bad_k]);
}
printf("\n");
}
printf("\n");
}
// デバイスバッファの開放
cudaFree(devZoningPlan);
cudaFree(devDistanceMap);
// CPUバッファの開放
free(hostZoningPlan);
free(hostDistanceMap);
free(hostDistanceMap2);
cudaDeviceReset();
}
|
10,946 | #include <stdint.h>
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
static void HandleError( cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define HANDLE_NULL( a ) {if (a == NULL) { \
printf( "Host memory failed in %s at line %d\n", \
__FILE__, __LINE__ ); \
exit( EXIT_FAILURE );}}
int
main( int argc, char *argv[] )
{
int ITERATIONS = 1;
//int numBytes = 131072;
int numBytes = 131072*2;
uint64_t *memory_to_access;
//HANDLE_ERROR(cudaHostAlloc(&memory_to_access,sizeof(uint64_t)*numBytes,0));
HANDLE_ERROR(cudaMallocManaged(&memory_to_access,sizeof(uint64_t)*numBytes));
for(int k=0;k< numBytes ;k++)
memory_to_access[k]=5;
printf("address = %p\n",memory_to_access);
printf("Press enter to continue...\n");
getchar();
uint64_t fake=0;
for(int i=0; i<ITERATIONS; i++) {
for (int j = 0; j < (numBytes); j += 8) {
fake += memory_to_access[j];
fake += memory_to_access[j + 1];
fake += memory_to_access[j + 2];
fake += memory_to_access[j + 3];
fake += memory_to_access[j + 4];
fake += memory_to_access[j + 5];
fake += memory_to_access[j + 6];
fake += memory_to_access[j + 7];
}
}
printf("Press enter to continue...\n");
getchar();
//cudaFreeHost(memory_to_access);
cudaFree(memory_to_access);
return 0;
}
|
10,947 | #include <stdio.h>
#include <cuda_runtime.h>
__global__ void matMul(float* A, float* B, float* C, int nFil, int nCol)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int idy = blockDim.y * blockIdx.y + threadIdx.y;
int i = idx + idy * nCol;
if(idx < nCol && idy < nFil)
{
float sum = 0;
for(int k = 0; k < nCol; k++)
{
sum += A[idy * nCol + k] * B[k * nCol + idx];
}
C[i] = sum;
}
}
int main()
{
int nFil = 5;
int nCol = 5;
int N = nFil * nCol;
size_t size = N * sizeof(float);
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
float* h_C = (float*)malloc(size);
//Initialialize input vectors
for(int i = 0; i < nFil; i++)
{
for(int j = 0; j < nCol; j++)
{
h_A[i * nCol + j] = 1;
h_B[i * nCol + j] = 2;
}
}
float* d_A;
cudaMalloc(&d_A, size);
float* d_B;
cudaMalloc(&d_B, size);
float* d_C;
cudaMalloc(&d_C, size);
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
matMul<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, nFil, nCol);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
printf("\n\nMatriz resultante:\n");
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
printf("%.2f", h_C[i*nCol+j]);
}
printf("\n");
}
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
return(0);
}
|
10,948 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void kernel(int** arr)
{
for (int i=0; i<3; i++)
printf("%d\n", arr[i][0]);
}
int main()
{
int arr[][3] = {{1},{2},{3}}; // 3 arrays, 1 element each
char x = 0b100000100;
printf("Bin: %d\n", x);
int **d_arr;
cudaMalloc((void***)(&d_arr), sizeof(int*)*3); // allocate for 3 int pointers
for (int i=0; i<3; i++)
{
int* temp;
cudaMalloc( (void**) &(temp), sizeof(int) * 1 ); // allocate for 1 int in each int pointer
cudaMemcpy(temp, arr[i], sizeof(int) * 1, cudaMemcpyHostToDevice); // copy data
cudaMemcpy(d_arr+i, &temp, sizeof(int*), cudaMemcpyHostToDevice);
}
kernel<<<1,1>>>(d_arr);
cudaDeviceSynchronize();
cudaDeviceReset();
}
|
10,949 | #include "includes.h"
__device__ float mulWithKernel(int x, int y, int kx, int ky, float* input, int width, int height)
{
int px = min(max(x, 0), width - 1);
int py = min(max(y, 0), height - 1);
return D_KERNEL[3 * (ky + 1) + kx + 1] * input[py * width + px];
/* CROP
if (x >= 0 && y >= 0 && x < width && y < height)
{
return D_KERNEL[3 * (ky + 1) + kx + 1] * input[y * width + x];
}
else
{
return 0;
}
*/
}
__global__ void Convolution3x3Single(float* input, float* output, int width, int height)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
int size = width * height;
if(threadId < size)
{
float result = 0;
int x = threadId % width;
int y = threadId / width;
result += mulWithKernel(x - 1, y - 1, -1, -1, input, width, height);
result += mulWithKernel(x - 1, y , -1, 0, input, width, height);
result += mulWithKernel(x - 1, y + 1, -1, 1, input, width, height);
result += mulWithKernel(x, y - 1, 0, -1, input, width, height);
result += mulWithKernel(x, y , 0, 0, input, width, height);
result += mulWithKernel(x, y + 1, 0, 1, input, width, height);
result += mulWithKernel(x + 1, y - 1, 1, -1, input, width, height);
result += mulWithKernel(x + 1, y , 1, 0, input, width, height);
result += mulWithKernel(x + 1, y + 1, 1, 1, input, width, height);
output[y * width + x] = result;
}
} |
10,950 | #include "shared.cuh"
__global__ void move(int size, double *__restrict__ x, double *__restrict__ y,
double *__restrict__ z, const double *__restrict__ u,
const double *__restrict__ v, const double *__restrict__ w,
const double *__restrict__ distance) {
int i = thread_id();
if (i >= size) return;
x[i] += u[i] * distance[i];
y[i] += v[i] * distance[i];
z[i] += w[i] * distance[i];
}
|
10,951 | #include <cuda.h>
#include <stdio.h>
#include <iostream>
#include <string>
#include <cstdio>
#include <cstdlib>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
int main(int argc, char** argv) {
using namespace std;
using namespace thrust;
unsigned int n = atol(argv[1]);
thrust::host_vector<int> h_vec(n);
thrust::fill(h_vec.begin(), h_vec.end(), 1);
// cout << "start copying" << endl;
thrust::device_vector<int> d_vec(n);
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
int tot = thrust::reduce(d_vec.begin(), d_vec.end());
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float ms;
cudaEventElapsedTime(&ms, start, stop);
cout << tot << endl;
cout << ms << endl;
}
|
10,952 | // incrementArray.cu
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
void incrementArrayOnHost(float *a, int N)
{
int i;
for (i=0; i < N; i++) a[i] = a[i]+1.f;
}
__global__ void incrementArrayOnDevice(float *a, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<N) a[idx] = a[idx]+1.f;
}
void printarray(float *a, int n)
{
int i = 0;
for (i = 0; i < n; i++) printf("%f ", a[i]);
printf("\n");
}
int main(int argc, char** argv)
{
/* init program args & global variable */
if(argc < 2){
printf("usage: incrementArray [repetitions] [startSizeArray]\n");
return EXIT_SUCCESS;
}
//int threadPerBlock = atoi(argv[1]);
int repetitions = atoi(argv[1]);
double N = atoi(argv[2]);
float *a_h, *b_h; // pointers to host memory
float *a_d; // pointer to device memory
int i,rep;
int totalSuccess = 0;
/* end init */
/* looping from 0 to repettions */
for (rep=0; rep<=repetitions;rep++){
/* Start Looping */
size_t size = N*sizeof(float);
// allocate arrays on host
a_h = (float *)malloc(size);
b_h = (float *)malloc(size);
// allocate array on device
cudaMalloc((void **) &a_d, size);
// initialization of host data
for (i=0; i<N; i++) a_h[i] = (float)i;
// copy data from host to device
cudaMemcpy(a_d, a_h, sizeof(float)*N, cudaMemcpyHostToDevice);
// do calculation on host
incrementArrayOnHost(a_h, N);
//printarray(a_h, N);
// do calculation on device:
// Part 1 of 2. Compute execution configuration
//int numBlocks = N/threadPerBlock + (N%threadPerBlock == 0?0:1);//data/threadPerBlock = JumlahBlock
//int numBlocks = 262144;
/* init nBlocks and blockSize */
dim3 threadPerBlock(1024);
dim3 numBlocks(65535,2);
/* end init block */
// Part 2 of 2. Call incrementArrayOnDevice kernel
incrementArrayOnDevice <<<numBlocks,threadPerBlock>>> (a_d, N);
// Retrieve result from device and store in b_h
cudaMemcpy(b_h, a_d, sizeof(float)*N, cudaMemcpyDeviceToHost);
// print results
//printarray(a_h, N);
//for (i=0; i<N; i++) assert(a_h[i] == b_h[i]);
/* start cek result */
int success = 1;
for (i=0; i<N; i++) {
if (a_h[i] != b_h[i]) {
success = 0;
break;
}
}
/* end end result */
printf("rep %d a[%f] = %s "/*>> numBlocks = %d ,threadPerBlock = %d\n*/, rep, N, (success == 1) ? "true" : "false"/*, numBlocks, threadPerBlock*/);
if (success == 1) totalSuccess += 1;
N= N*2;;// double N size
//threadPerBlock++;
//threadPerBlock = threadPerBlock * 2; //increse thread number
}/* end looping */
printf("\nsuccess rate: %f%%\n", totalSuccess / ((float)repetitions) * 100.0);
// cleanup
free(a_h); free(b_h); cudaFree(a_d);
return EXIT_SUCCESS;
}
|
10,953 | //Based on the work of Andrew Krepps
#include <iostream>
#include <random>
#include <chrono>
#include <stdio.h>
__constant__ static const int VAL_A = 1;
__constant__ static const int VAL_B = 3;
// Device GPU add c[i] = a[i] + b[i]
__device__ void add(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] + b[thread_idx];
}
// Device GPU subtract c[i] = a[i] - b[i]
__device__ void subtract(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] - b[thread_idx];
}
// Device GPU multiply c[i] = a[i] * b[i]
__device__ void mult(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] * b[thread_idx];
}
// Device GPU div c[i] = a[i] / b[i]
__device__ void div(int *a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] / b[thread_idx];
}
// Device GPU mod c[i] = a[i] % b[i]
__device__ void mod(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] % b[thread_idx];
}
// Executes all 5 shared math operations
__global__ void executeSharedMathOperations(int * a, int * b, int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
extern __shared__ int sharedMem[];
// Use offsets in the shared mem to create arrays.
int * sharedA = &sharedMem[0];
int * sharedB = &sharedMem[size];
int * sharedRet = &sharedMem[2*size];
sharedA[tid] = a[tid];
sharedB[tid] = b[tid];
// Add sharedA to sharedB and store in addDest
add(sharedA, sharedB, sharedRet);
addDest[tid] = sharedRet[tid];
// Subtract sharedB from sharedA and store in subDest
subtract(sharedA, sharedB, sharedRet);
subDest[tid] = sharedRet[tid];
// Multiply sharedA to sharedB and store in mutlDest
mult(sharedA, sharedB, sharedRet);
multDest[tid] = sharedRet[tid];
// Divide sharedA by sharedB and store in divDest
div(sharedA, sharedB, sharedRet);
divDest[tid] = sharedRet[tid];
// Mod sharedA by sharedB and store in modDest
mod(sharedA, sharedB, sharedRet);
modDest[tid] = sharedRet[tid];
}
// Executes all 5 global math operations
__global__ void executeGlobalMathOperations(int * a, int * b, int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
// Add a to b and store in addDest
add(a, b, addDest);
// Subtract a from b and store in subDest
subtract(a, b, subDest);
// Multiply a to b and store in mutlDest
mult(a, b, multDest);
// Divide a by b and store in divDest
div(a, b, divDest);
// Mod a by b and store in modDest
mod(a, b, modDest);
}
// Executes all 5 constant math operations
__global__ void executeConstantMathOperations(int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
// Add VAL_A to VAL_B and store in addDest
addDest[tid] = VAL_A + VAL_B;
// Subtract a from b and store in subDest
subDest[tid] = VAL_A - VAL_B;
// Multiply a to b and store in mutlDest
multDest[tid] = VAL_A * VAL_B;
// Divide a by b and store in divDest
divDest[tid] = VAL_A / VAL_B; // B is chosen to not be 0.
// Mod a by b and store in modDest
modDest[tid] = VAL_A / VAL_B; // B is chosen to not be 0.
}
// Host (Cpu) add c[i] = a[i] + b[i]
void hostAdd(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] + b[i];
}
}
// Host (Cpu) sub c[i] = a[i] - b[i]
void hostSub(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] - b[i];
}
}
// Host (Cpu) multiply c[i] = a[i] * b[i]
void hostMult(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] * b[i];
}
}
// Host (Cpu) divide c[i] = a[i] / b[i]
void hostDiv(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
if (b[i] != 0)
{
c[i] = a[i] / b[i];
}
else
{
c[i] = 0;
}
}
}
// Host (Cpu) mod c[i] = a[i] % b[i]
void hostMod(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
// Protect against divide by 0.
// cuda code catches this error and sets result to 0 by default.
if (b[i] == 0)
{
c[i] = 0;
}
else
{
c[i] = a[i] % b[i];
}
}
}
// Executes each of the host (cpu) tests by creating local memory and executing all 5 math operations on the data.
// The data is filled with random numbers that uses the same seed as the GPU tests.
void executeHostTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], c[totalThreads];
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
// Add all of the numbers c[i] = a[i] + b[i];
hostAdd(a,b,c, totalThreads);
// Subtract all of the numbers c[i] = a[i] - b[i];
hostSub(a,b,c, totalThreads);
// Multiply all of the numbers c[i] = a[i] * b[i];
hostMult(a,b,c, totalThreads);
// Divides all of the numbers c[i] = a[i] / b[i]; if b[i] == 0, c[i] = 0
hostDiv(a,b,c, totalThreads);
// Mod all of the numbers c[i] = a[i] % b[i];
hostMod(a,b,c, totalThreads);
}
// Executes each of the global memory gpu tests by creating local memory, copying it global memory, and then performing
// all 5 math operations on the data. The data is filled with random numbers that uses the same seed as the CPU tests.
void executeGlobalTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], add_dest[totalThreads], sub_dest[totalThreads], mult_dest[totalThreads], div_dest[totalThreads], mod_dest[totalThreads];
int *gpu_a, *gpu_b, *gpu_add_dest, *gpu_sub_dest, *gpu_mult_dest, *gpu_div_dest, *gpu_mod_dest;
cudaMalloc((void**)&gpu_a, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_b, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_add_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_sub_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mult_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_div_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mod_dest, totalThreads * sizeof(int));
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
cudaMemcpy(gpu_a, a, totalThreads * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, b, totalThreads * sizeof(int), cudaMemcpyHostToDevice);
executeGlobalMathOperations<<<numBlocks, blockSize>>>(gpu_a,gpu_b,gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, numBlocks * blockSize);
cudaMemcpy(add_dest, gpu_add_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(sub_dest, gpu_sub_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mult_dest, gpu_mult_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(div_dest, gpu_div_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mod_dest, gpu_mod_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_add_dest);
cudaFree(gpu_sub_dest);
cudaFree(gpu_mult_dest);
cudaFree(gpu_div_dest);
cudaFree(gpu_mod_dest);
}
// Executes each of the shared memory gpu tests by creating local memory, copying it global memory, and then performing
// all 5 math operations on the data after creating shared memory. The data is filled with random numbers that uses the same seed as the CPU tests.
void executeSharedTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], add_dest[totalThreads], sub_dest[totalThreads], mult_dest[totalThreads], div_dest[totalThreads], mod_dest[totalThreads];
int *gpu_a, *gpu_b, *gpu_add_dest, *gpu_sub_dest, *gpu_mult_dest, *gpu_div_dest, *gpu_mod_dest;
cudaMalloc((void**)&gpu_a, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_b, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_add_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_sub_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mult_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_div_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mod_dest, totalThreads * sizeof(int));
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
cudaMemcpy(gpu_a, a, totalThreads * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, b, totalThreads * sizeof(int), cudaMemcpyHostToDevice);
// The third parameter is the size of the shared memory
// We multiply by 3 because we need to copy A and B and then also have room for the return in shared memory.
executeSharedMathOperations<<<numBlocks, blockSize, 3 * totalThreads * sizeof(int)>>>(gpu_a,gpu_b,gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, totalThreads);
cudaMemcpy(add_dest, gpu_add_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(sub_dest, gpu_sub_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mult_dest, gpu_mult_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(div_dest, gpu_div_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mod_dest, gpu_mod_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_add_dest);
cudaFree(gpu_sub_dest);
cudaFree(gpu_mult_dest);
cudaFree(gpu_div_dest);
cudaFree(gpu_mod_dest);
}
// Executes each of the consnt memory gpu tests by creating local memory, copying it global memory, and then performing
// all 5 math operations on the data using constant values. The data is filled with random numbers that uses the same seed as the CPU tests.
void executeConstantTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int add_dest[totalThreads], sub_dest[totalThreads], mult_dest[totalThreads], div_dest[totalThreads], mod_dest[totalThreads];
int *gpu_add_dest, *gpu_sub_dest, *gpu_mult_dest, *gpu_div_dest, *gpu_mod_dest;
cudaMalloc((void**)&gpu_add_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_sub_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mult_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_div_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mod_dest, totalThreads * sizeof(int));
// The third parameter is the size of the shared memory
// We multiply by 3 because we need to copy A and B and then also have room for the return in shared memory.
executeConstantMathOperations<<<numBlocks, blockSize>>>(gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, totalThreads);
cudaMemcpy(add_dest, gpu_add_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(sub_dest, gpu_sub_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mult_dest, gpu_mult_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(div_dest, gpu_div_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mod_dest, gpu_mod_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(gpu_add_dest);
cudaFree(gpu_sub_dest);
cudaFree(gpu_mult_dest);
cudaFree(gpu_div_dest);
cudaFree(gpu_mod_dest);
}
void printArray(const int * const arr, const int xSize, const int ySize)
{
for (size_t i = 0; i < xSize; ++i)
{
for(size_t j = 0; j < ySize; ++j)
{
std::cout << arr[i * ySize + j] << " ";
}
std::cout << '\n';
}
std::cout << std::flush;
}
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = 256;
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
auto startTime = std::chrono::system_clock::now();
executeHostTest(totalThreads, blockSize, numBlocks);
auto endTime = std::chrono::system_clock::now();
std::chrono::duration<double> totalTime = endTime-startTime;
std::cout << "Host execution took: " << totalTime.count() << " seconds." << std::endl;
startTime = std::chrono::system_clock::now();
executeGlobalTest(totalThreads, blockSize, numBlocks);
endTime = std::chrono::system_clock::now();
totalTime = endTime-startTime;
std::cout << "Global Memory execution took: " << totalTime.count() << " seconds." << std::endl;
startTime = std::chrono::system_clock::now();
executeSharedTest(totalThreads, blockSize, numBlocks);
endTime = std::chrono::system_clock::now();
totalTime = endTime-startTime;
std::cout << "Shared Memory execution took: " << totalTime.count() << " seconds." << std::endl;
startTime = std::chrono::system_clock::now();
executeConstantTest(totalThreads, blockSize, numBlocks);
endTime = std::chrono::system_clock::now();
totalTime = endTime-startTime;
std::cout << "Constant Memory execution took: " << totalTime.count() << " seconds." << std::endl;
return 0;
}
|
10,954 | #include "includes.h"
// Helper function for using CUDA to call kernel functions
cudaError_t cuda_code(float* , float*, int , int );
__device__ float sum = 0;
__global__ void substitution(int i, int N, float *row, float *matrix, float*resultVector) {
int j = i + blockIdx.x * blockDim.x + threadIdx.x;
//From previous line, "i" assigns the initial thread index, so threads are not
//created for indexes that will not affect the results
int ij; //element i,j of the matrix
if (j > i && j < N)
{
ij = j + (N + 1)*i;
row[j] = matrix[ij] * resultVector[j];
atomicAdd(&sum, row[j]);
}
__syncthreads();//Barrier to wait all threads to finish their tasks
} |
10,955 | /* Linear convection equation with periodic BC
* solved using MUSCL scheme
* CUDA implementation of hyp.c using only global memory
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define minmod(a,b) ( (fabs(a) < fabs(b)) ? (a) : (b) )
//function prototypes
void initCond(int, float, float*);
__global__ void fluxFun(float*, float*);
__global__ void update(int, float, float, float*, float*, float*);
__global__ void periodic(float*, int);
int main(){
float *u;
float *uold_d, *u_d, *fl_d;
int np = 101, ns;
float dx = 1.0/(np-1);
float dt, cfl;
int niter, maxiter;
int nirk, rkmax=3;
int i;
FILE *fpt;
dim3 grid, block;
ns = np + 2 + 2;
u = (float*)malloc(ns*sizeof(float));
cudaMalloc((void**)&uold_d, (ns)*sizeof(float));
cudaMalloc((void**)&u_d, (ns)*sizeof(float));
cudaMalloc((void**)&fl_d, (np+1)*sizeof(float));
cfl = 0.9;
dt = cfl*dx;
maxiter = 1.0/dt + 1;
//set initial conditions
initCond(np, dx, u);
fpt = fopen("init.dat", "w");
for(i=0; i<np; i++) fprintf(fpt, "%e %e\n", dx*i, u[i+2]);
fclose(fpt);
cudaMemcpy(uold_d, u, (ns)*sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(u_d, u, (ns)*sizeof(float),
cudaMemcpyHostToDevice);
//time-step loop
for(niter=0; niter<maxiter; niter++){
//RK stages
for(nirk=0; nirk<rkmax; nirk++){
//flux computation
block.x = 3;
grid.x = (np+1)/block.x;
fluxFun<<<grid,block>>>(u_d, fl_d);
//update conserved variable
block.x = 1;
grid.x = (np)/block.x;
update<<<grid,block>>>(nirk, dt, dx, uold_d, fl_d, u_d);
//set periodicity
block.x = 1;
grid.x = (ns)/block.x;
periodic<<<grid,block>>>(u_d, np);
}
cudaMemcpy(uold_d, u_d, (ns)*sizeof(float),
cudaMemcpyDeviceToDevice);
}
cudaMemcpy(u, u_d, (ns)*sizeof(float),
cudaMemcpyDeviceToHost);
fpt = fopen("final.dat", "w");
for(i=0; i<np; i++) fprintf(fpt, "%e %e\n", dx*i, u[i+2]);
fclose(fpt);
free(u);
cudaFree(uold_d);
cudaFree(u_d);
cudaFree(fl_d);
}
//set initial condition
void initCond(int np, float dx, float *u){
int i;
float x;
for(i=0; i<np; i++){
x = dx*i;
u[i+2] = sin(2.0*M_PI*x);
}
u[0] = u[np];
u[1] = u[np+1];
u[np+2] = u[2];
u[np+3] = u[3];
}
//flux function
__global__ void fluxFun(float *u, float *fl){
float uj, ujp1, ujm1, ujp2;
float ul, ur;
int idx = blockIdx.x*blockDim.x + threadIdx.x;
ujm1 = *(u+idx);
uj = *(u+idx+1);
ujp1 = *(u+idx+2);
ujp2 = *(u+idx+3);
ul = uj + 0.5*minmod( (uj-ujm1), (ujp1-uj) );
ur = ujp1 - 0.5*minmod( (ujp1-uj), (ujp2-ujp1) );
fl[idx] = ul;
}
//perform one stage of RK
__global__ void update(int nirk, float dt, float dx, float *uold, float *fl,
float *u){
int idx = blockIdx.x*blockDim.x + threadIdx.x;
float res;
float airk[3] = {0.0, 3.0/4.0, 1.0/3.0};
res = fl[idx+1] - fl[idx];
u[idx+2] = airk[nirk]*uold[idx+2] +
(1.0-airk[nirk])*(u[idx+2] - (dt/dx)*res);
}
//set periodic BC
__global__ void periodic(float *u, int np){
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx==0)
u[idx] = u[np];
else if(idx==1)
u[idx] = u[np+1];
else if(idx==np+2)
u[idx] = u[2];
else if(idx==np+3)
u[idx] = u[3];
else
u[idx] = u[idx];
}
|
10,956 | __global__
void vecAdd(float *l, float *r, float *result, size_t N) {
for (size_t i = 0; i < N; ++i) {
result[i] = l[i] + r[i];
}
}
|
10,957 | #include "includes.h"
__global__ void add(int *a, int *b, int *c,int columns,int rows)
{
// get the global id for the thread
int x = (blockIdx.x * blockDim.x + threadIdx.x);
int y = (blockIdx.y * blockDim.y + threadIdx.y);
// calculate the index of the input data
int index = y * columns + x;
c[index] = a[index] + b[index];
} |
10,958 | #include "includes.h"
__global__ void cuAddQNormAndSqrt(float *vec1, float *vec2, int width){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex<width){
vec1[xIndex] = sqrt(vec1[xIndex]+vec2[xIndex]);
}
} |
10,959 | #include <fstream>
#include <string>
#include <chrono>
#include <string>
#include <iostream>
#include <iomanip>
//For GPU Access
int* grid = nullptr;
int* gridGPU = nullptr;
int* tempGPU = nullptr;
int width = 0;
int height = 0;
//Error checking for cuda calls
void checkError(cudaError_t e)
{
if (e != cudaSuccess)
{
std::cerr << "CUDA error: " << int(e) << " : " << cudaGetErrorString(e) << '\n';
abort();
}
}
//Changes the mod operator to work like python
__host__
int modFixH(int x, int dimension) {
if (x < 0) {
return dimension + (x % dimension);
} else {
return x % dimension;
}
}
//Changes the mod operator to work like python
__device__
int modFix(int x, int dimension) {
if (x < 0) {
return dimension + (x % dimension);
} else {
return x % dimension;
}
}
/**
* Given the state of a cell the GoL rules apply:
* - Any live call with fewer than 2 neighbors dies = underpopulation
* - Any live cell with two or three live neighbors lives on to the next gen
* - Any live cell with more than 3 live neighbors dies = overpopulation
* - Any dead cell with exactly 3 live neighbors becomes living = reproduction
*
* After these rules have been checked, the grid is then updated.
*
* @gridGPU: Representation of the grid that lives on the GPU memory.
* @tempGPU: Representation of the grid that lives on the GPU memory that is used for inbetween states.
* @width: The width of the grid.
* @height: The height of the grid.
*/
__global__
void evolve(int* gridGPU, int* tempGPU, int width, int height) {
//Defineing alive and dead
int deadValue = 0;
int aliveValue = 1;
//Gpu loop set up
int index = blockIdx.x*blockDim.x + threadIdx.x;
int index2 = blockIdx.y*blockDim.y + threadIdx.y;
int stride = blockDim.x*gridDim.x;
int stride2 = blockDim.y*gridDim.y;
//Gpu iteration of the cells universe
for (int a=index; a < width; a += stride){
for(int b=index2; b < height; b += stride2) {
//Checks for alive neighbors
int aliveCells = 0;
for (int k=-1; k <= 1; k++) {
for (int l=-1; l <= 1; l++) {
if (!(k == 0 && l == 0)) {
int neigh = gridGPU[(modFix(k + a,width) * width) + modFix(l + b,height)];
if ((neigh == aliveValue)) {
aliveCells++;
}
}
}
}
//Modifies the grid depending on surrounding
if (aliveCells < 2 || aliveCells > 3) {
tempGPU[(modFix(a, width)*width)+modFix(b, height)] = deadValue;
} else if (aliveCells == 3) {
tempGPU[(modFix(a, width)*width)+modFix(b, height)] = aliveValue;
}
}
}
}
/**
* Sets up the grid so that every cell of the environment starts off
* as being dead.
*
* @grid: The grid that the game of life will be played on.
* @width: The width of the environment.
* @height: The height of the environment.
*/
void intialGrid(int* grid, int width, int height){
for (int i = 0; i < width; ++i) {
for (int j = 0; j < height; ++j) {
grid[j*width + i] = 0;
}
}
}
// void debugGrid(int* grid, int width, int height) {
// for (int i = 0; i < width; ++i) {
// for (int j = 0; j < height; ++j) {
// std::cout<<grid[j*width + i]<<" ";
// }
// std::cout<<std::endl;
// }
// std::cout<<std::endl;
// }
//Runs Program
int main(int argc, char const *argv[]) {
int generations;
std::string visOut = "NoVis";
const std::string visStatus = "NoVis";
if (argc == 6) {
//argv[1] is the file you want to insert
width = atoi(argv[2]); //argv[2]
height = atoi(argv[3]); //argv[3]
generations = atoi(argv[4]); //argv[4]
visOut = argv[5];
} else {
//argv[1] is the file you want to insert
width = 256; //argv[2]
height = 256; //argv[3]
generations = 50; //argv[4]
}
//Initalize the grid for the game
grid = new int[width*height];
intialGrid(grid, width, height);
//Add pattern to the grid
std::ifstream file(argv[1]);
std::string line;
int centering = 0;
bool centered = false;
int i = 0;
while(getline(file, line)) {
if(centered == false){
centering += line.size();
centered = true;
}
for (uint j=0; j<line.length(); j++) {
if (line[j] == 'O') {
grid[(((i+(width/2)) - (centering/2)) * width) + (j+(height/2) - (centering/2))] = 1;
}
}
i++;
}
file.close();
std::ofstream outFile;
if (visStatus.compare(visOut)) { //For visualizing and verification
outFile.open("game_of_life_save.txt");
}
//Allocate GPU memory for calculations
checkError(cudaMalloc(&gridGPU, width*height*sizeof(int)));
checkError(cudaMalloc(&tempGPU, width*height*sizeof(int)));
//Copy intial array to GPU memory locations
checkError(cudaMemcpy(gridGPU, grid, width*height*sizeof(int), cudaMemcpyHostToDevice));
checkError(cudaMemcpy(tempGPU, grid, width*height*sizeof(int), cudaMemcpyHostToDevice));
// assign a 2D distribution of CUDA "threads" within each CUDA "block"
int Threads = 256;
int Blocks = (width+Threads-1)/Threads;
for (int i = 0; i < generations; i++) {
//plays gol
evolve<<<Blocks, Threads>>>(gridGPU, tempGPU, width, height);
//Sync host and device
cudaDeviceSynchronize();
// copy from GPU to CPU
checkError(cudaMemcpy(grid, tempGPU, width*height*sizeof(int), cudaMemcpyDeviceToHost));
checkError(cudaMemcpy(gridGPU, grid, width*height*sizeof(int), cudaMemcpyHostToDevice));
if (visStatus.compare(visOut)) { //For visualizing and verification
for (int i=0; i < width; i++) {
for (int j=0; j < height; j++) {
outFile << grid[(modFixH(i, width)*width)+modFixH(j, height)];
}
outFile << std::endl;
}
outFile << std::endl;
}
}
if (visStatus.compare(visOut)) {
outFile.close();
}
cudaFree(gridGPU);
cudaFree(tempGPU);
free(grid);
return 0;
} |
10,960 | #include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
typedef struct vertex vertex;
struct vertex {
unsigned int vertex_id;
float pagerank;
float pagerank_next;
unsigned int n_successors;
vertex ** successors;
};
float abs_float(float in) {
if (in >= 0)
return in;
else
return -in;
}
__global__ void setPagerankNextToZero(vertex * vertices) {
int i = threadIdx.x;
vertices[i].pagerank_next = 0;
}
__global__ void initializePageranks(vertex * vertices, int n_vertices) {
int i = threadIdx.x;
vertices[i].pagerank = 1.0/(float)n_vertices;
}
__global__ void addToNextPagerank(vertex * vertices, float * dangling_value) {
int i = threadIdx.x;
int j;
if(vertices[i].n_successors > 0) {
for(j = 0; j < vertices[i].n_successors; j++) {
atomicAdd(&(vertices[i].successors[j]->pagerank_next), 0.85*(vertices[i].pagerank)/vertices[i].n_successors);
}
}else {
atomicAdd(dangling_value, 0.85*vertices[i].pagerank);
}
}
__global__ void finalPagerankForIteration(vertex * vertices, int n_vertices, float dangling_value){
int i = threadIdx.x;
vertices[i].pagerank_next += (dangling_value + (1-0.85))/((float)n_vertices);
}
__global__ void setPageranksFromNext(vertex * vertices) {
int i = threadIdx.x;
vertices[i].pagerank = vertices[i].pagerank_next;
}
int main(void) {
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
/*************************************************************************/
// build up the graph
int i,j;
unsigned int n_vertices = 0;
unsigned int n_edges = 0;
unsigned int vertex_from = 0, vertex_to = 0;
vertex * vertices;
FILE * fp;
if ((fp = fopen("testInput.txt", "r")) == NULL) {
fprintf(stderr,"ERROR: Could not open input file.\n");
exit(-1);
}
// parse input file to count the number of vertices
// expected format: vertex_from vertex_to
while (fscanf(fp, "%u %u", &vertex_from, &vertex_to) != EOF) {
if (vertex_from > n_vertices)
n_vertices = vertex_from;
else if (vertex_to > n_vertices)
n_vertices = vertex_to;
}
n_vertices++;
// CALC NUMBER OF OUTGOING LINKS PER PAGE ***********************************
unsigned int * outgoingLinks = (unsigned int *) calloc(n_vertices,sizeof(unsigned int));
fseek(fp,0L, SEEK_SET);
while(fscanf(fp,"%u %u", &vertex_from, &vertex_to) != EOF) {
outgoingLinks[vertex_from] += 1;
}
// allocate memory for vertices
//vertices = (vertex *)malloc(n_vertices*sizeof(vertex));
err = cudaMallocManaged((void **)&vertices, n_vertices*sizeof(vertex));
// err = cudaMemcpy(d_vertices, vertices, sizeOfVertices, cudaMemcpyHostToDevice);
// SET Initial values **********************************************************
unsigned int n_iterations = 25;
float alpha = 0.85;
float eps = 0.000001;
if (!vertices) {
fprintf(stderr,"Malloc failed for vertices.\n");
exit(-1);
}
memset((void *)vertices, 0, (size_t)(n_vertices*sizeof(vertex)));
// parse input file to count the number of successors of each vertex
fseek(fp, 0L, SEEK_SET);
while (fscanf(fp, "%u %u", &vertex_from, &vertex_to) != EOF) {
vertices[vertex_from].n_successors++;
n_edges++;
}
// allocate memory for successor pointers
for (i=0; i<n_vertices; i++) {
vertices[i].vertex_id = i;
if (vertices[i].n_successors > 0) {
// vertices[i].successors = (vertex **)malloc(vertices[i].n_successors*sizeof(vertex *));
err = cudaMallocManaged((void***)&vertices[i].successors,vertices[i].n_successors*sizeof(vertex*));
if (!vertices[i].successors) {
fprintf(stderr,"Malloc failed for successors of vertex %d.\n",i);
exit(-1);
}
memset((void *)vertices[i].successors, 0, (size_t)(vertices[i].n_successors*sizeof(vertex *)));
}
else
vertices[i].successors = NULL;
}
// parse input file to set up the successor pointers
fseek(fp, 0L, SEEK_SET);
while (fscanf(fp, "%d %d", &vertex_from, &vertex_to) != EOF) {
for (i=0; i<vertices[vertex_from].n_successors; i++) {
if (vertices[vertex_from].successors[i] == NULL) {
vertices[vertex_from].successors[i] = &vertices[vertex_to];
break;
}
else if (i==vertices[vertex_from].n_successors-1) {
printf("Setting up the successor pointers of virtex %u failed",vertex_from);
return -1;
}
}
}
fclose(fp);
// PRINT THE DATASTRUCTURE
/* for(i = 0; i < n_vertices; i++) {
printf("Page: %d, Suc: ", (vertices+i)->vertex_id);
for(j = 0; j < (vertices+i)->n_successors; j++) {
printf("%d, ",(vertices+i)->successors[j]->vertex_id);
}
printf("\n");
}*/
/*************************************************************************/
// compute the pagerank on the GPU
float dangling_value_h = 0;
float * dangling_value_d;
err = cudaMalloc((void **)&dangling_value_d, sizeof(float));
err = cudaMemcpy(dangling_value_d, &dangling_value_h, sizeof(float), cudaMemcpyHostToDevice);
//err = cudaMallocManaged((void *)&dangling_value, sizeof(float));
initializePageranks<<<1,46>>>(vertices, n_vertices);
cudaDeviceSynchronize();
for(i = 0; i < 23; i++) {
// set the next pagerank values to 0
setPagerankNextToZero<<<1,46>>>(vertices);
cudaDeviceSynchronize();
// set the dangling value to 0
dangling_value_h = 0;
err = cudaMemcpy(dangling_value_d, &dangling_value_h, sizeof(float), cudaMemcpyHostToDevice);
// initial parallel pagerank_next computation
addToNextPagerank<<<1,46>>>(vertices, dangling_value_d);
cudaDeviceSynchronize();
// get the dangling value
err = cudaMemcpy(&dangling_value_h, dangling_value_d, sizeof(float), cudaMemcpyDeviceToHost);
printf("the dangling_value is now: %.3f\n",dangling_value_h);
// final parallel pagerank_next computation
finalPagerankForIteration<<<1,46>>>(vertices, n_vertices, dangling_value_h);
cudaDeviceSynchronize();
setPageranksFromNext<<<1,46>>>(vertices);
cudaDeviceSynchronize();
}
// print the pagerank values computed on the GPU
for (i=0;i<n_vertices;i++) {
printf("AFTER GPU | Vertex %u:\tpagerank = %.6f\n", i, vertices[i].pagerank);
}
/*****************************************************************************************/
// Compute pagerank on host using old method for comparison purposes
unsigned int i_iteration;
float value, diff;
float pr_dangling_factor = alpha / (float)n_vertices; // pagerank to redistribute from dangling nodes
float pr_dangling;
float pr_random_factor = (1-alpha) / (float)n_vertices; // random portion of the pagerank
float pr_random;
float pr_sum, pr_sum_inv, pr_sum_dangling;
float temp;
// initialization of values before pagerank loop
for (i=0;i<n_vertices;i++) {
vertices[i].pagerank = 1 / (float)n_vertices;
vertices[i].pagerank_next = 0;
}
pr_sum = 0;
pr_sum_dangling = 0;
for (i=0; i<n_vertices; i++) {
pr_sum += vertices[i].pagerank;
if (!vertices[i].n_successors)
pr_sum_dangling += vertices[i].pagerank;
}
i_iteration = 0;
diff = eps+1;
//****** transfer data structure to CUDA memory ************************************************
// NOTE: CAN PROBABLY REMOVE THIS SECTION FOR NOW
/*size_t sizeOfVertices = n_vertices * sizeof(struct vertex);
vertex *d_vertices = NULL;
err = cudaMalloc((void **)&d_vertices, sizeOfVertices);
err = cudaMemcpy(d_vertices, vertices, sizeOfVertices, cudaMemcpyHostToDevice);
vertex ** d_testVar;
cudaMalloc(&d_testVar, 3*sizeof(vertex*));
cudaMemcpy(d_testVar, vertices[0].successors, 3*sizeof(vertex*), cudaMemcpyHostToDevice);
cudaMemcpy(&(d_vertices[0].successors),&d_testVar,sizeof(vertex**), cudaMemcpyHostToDevice);
*/
//*********************************************************************************************
while ( (diff > eps) && (i_iteration < n_iterations) ) {
for (i=0;i<n_vertices;i++) {
if (vertices[i].n_successors)
value = (alpha/vertices[i].n_successors)*vertices[i].pagerank; //value = vote value after splitting equally
else
value = 0;
//printf("vertex %d: value = %.6f \n",i,value);
for (j=0;j<vertices[i].n_successors;j++) { // pagerank_next = sum of votes linking to it
vertices[i].successors[j]->pagerank_next += value;
}
}
// for normalization
pr_sum_inv = 1/pr_sum;
// alpha
pr_dangling = pr_dangling_factor * pr_sum_dangling;
pr_random = pr_random_factor * pr_sum;
pr_sum = 0;
pr_sum_dangling = 0;
diff = 0;
for (i=0;i<n_vertices;i++) {
// update pagerank
temp = vertices[i].pagerank;
vertices[i].pagerank = vertices[i].pagerank_next*pr_sum_inv + pr_dangling + pr_random;
vertices[i].pagerank_next = 0;
// for normalization in next cycle
pr_sum += vertices[i].pagerank;
if (!vertices[i].n_successors)
pr_sum_dangling += vertices[i].pagerank;
// convergence
diff += abs_float(temp - vertices[i].pagerank);
}
printf("Iteration %u:\t diff = %.12f\n", i_iteration, diff);
i_iteration++;
}
/*************************************************************************/
// print the pageranks from this host computation
for (i=0;i<n_vertices;i++) {
printf("Vertex %u:\tpagerank = %.6f\n", i, vertices[i].pagerank);
}
/*************************************************************************/
// Free device global memory
// err = cudaFree(d_A);
// Free host memory
// free(h_A);
// Reset the device and exit
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
|
10,961 | #include<stdio.h>
#define N 10
int main(int argc, char** argv){
int vec_in[N] ={6,1,7,3,2,9,10,5,4,8};
int vec_out[N];
int* d_vec;
cudaMalloc(&d_vec, N*sizeof(int));
cudaMemcpy(d_vec, vec_in, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(vec_out, d_vec, N*sizeof(int), cudaMemcpyDeviceToHost);
printf("vec_out[3]= %d \n",vec_out[3]);
return 0;
}
|
10,962 | #include <stdio.h>
#include <sys/time.h>
#define initTimer struct timeval tv1, tv2; struct timezone tz
#define startTimer gettimeofday(&tv1, &tz)
#define stopTimer gettimeofday(&tv2, &tz)
#define tpsCalcul (tv2.tv_sec-tv1.tv_sec)*1000000L + (tv2.tv_usec-tv1.tv_usec)
#define MAX_DIM_GRID 65535
#define MAX_DIM_BLOCK 1024
long tailleVecteur ;
/* KERNEL CUDA */
__global__ void add_vec_scalaire_gpu(int *vec, int *res, int a, long N) {
long i = (long)blockIdx.x * (long)blockDim.x + (long)threadIdx.x;
if (i < N) {
res[i] = vec[i] + a;
}
}
void add_vec_scalaire_cpu(int *vec, int *res, int a, long N) {
int i ;
for (i=0 ; i < N ; i ++) {
res[i] = vec[i] + a;
}
}
int main(int argc, char *argv[]) {
int alpha = 10;
if (argc < 2) {
printf("Erreur, manque un argument\n");
exit(0);
}
tailleVecteur = atol(argv[1]);
long blocksize = 1;
if (argc ==3) {
blocksize = atoi(argv[2]);
}
int *vecteur;
int *resultat;
int *cudaVec;
int *cudaRes;
initTimer;
long size = sizeof(int)*tailleVecteur;
vecteur = (int *)malloc(size);
resultat = (int *)malloc(size);
if (vecteur == NULL) {
printf("Allocation memoire qui pose probleme (vecteur) \n");
}
if (resultat == NULL) {
printf("Allocation memoire qui pose probleme (resultat) \n");
}
long i ;
for (i= 0 ; i < tailleVecteur ; i++) {
vecteur[i] = rand() % 100;
resultat[i] = 0;
}
// cudaSetDevice(1);
if (cudaMalloc((void **)&cudaVec, size) == cudaErrorMemoryAllocation) {
printf("Allocation memoire qui pose probleme (cudaVec) \n");
}
if (cudaMalloc((void **)&cudaRes, size) == cudaErrorMemoryAllocation) {
printf("Allocation memoire qui pose probleme (cudaRes) \n");
}
long dimBlock = blocksize;
long dimGrid = tailleVecteur/blocksize;
if ((tailleVecteur % blocksize) != 0) {
dimGrid++;
}
int res = cudaMemcpy(&cudaVec[0], &vecteur[0], size, cudaMemcpyHostToDevice);
printf("Copy CPU -> GPU %d \n",res);
startTimer;
add_vec_scalaire_gpu<<<dimGrid, dimBlock>>>(cudaVec, cudaRes, alpha, tailleVecteur);
stopTimer;
cudaMemcpy(&resultat[0], &cudaRes[0], size, cudaMemcpyDeviceToHost);
/* Test bon fonctionnement */
bool ok = true;
int indice = -1;
for (i= 0 ; i < tailleVecteur ; i++) {
// printf("Resultat GPU %d Resultat CPU %d \n",resultat[i], vecteur[i]+alpha);
if (resultat[i] != vecteur[i] + alpha) {
ok = false;
if (indice ==-1) {
indice = i;
}
}
}
printf("------ ");
printf("dimGrid %ld dimBlock %ld ",dimGrid, dimBlock);
if (ok) {
printf("Resultat ok\n");
} else {
printf("resultat NON ok (%d)\n", indice);
}
printf("Vecteur %ld => Temps calcul GPU %ld \n", tailleVecteur, tpsCalcul);
startTimer;
add_vec_scalaire_cpu (vecteur, resultat, alpha, tailleVecteur);
stopTimer;
printf("Vecteur %ld => Temps total CPU %ld \n", tailleVecteur, tpsCalcul);
cudaFree(cudaVec);
cudaFree(cudaRes);
}
|
10,963 | /*
* Copyright (c) 2012, Kristopher Wuollett
* All rights reserved.
*
* This file is part of kriswuollett/compute.
*
* kriswuollett/compute is free software: you can redistribute it and/or modify
* it under the terms of the BSD 3-Clause License as written in the COPYING
* file.
*/
#ifndef _CUDA_ADD_GPU_CU_
#define _CUDA_ADD_GPU_CU_
#include <cuda_runtime.h>
__global__
void add_gpu_kernel(float * a, float * b, float * c, int len)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < len) c[i] = a[i] + b[i];
}
extern "C"
void add_gpu(dim3 &dimGrid, dim3 &dimBlock,
float * a, float * b, float * c, int len);
void add_gpu(dim3 &dimGrid, dim3 &dimBlock,
float * a, float * b, float * c, int len)
{
add_gpu_kernel<<<dimGrid, dimBlock>>>(a, b, c, len);
}
#endif // define _CUDA_ADD_GPU_CU_
|
10,964 | #include <cuda_runtime.h>
#include <stdio.h>
/* Naive kernel for transposing a rectangular host array. */
const int TILE_DIM = 32;
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
void initialData(float *in, const int size)
{ // initialise matrix
for (int i = 0; i < size; i++)
{ in[i] = (float)(rand() & 0xFF) / 10.0f; }
return;
}
void printData(float *in, const int size)
{ // print matrix
for (int i = 0; i < size; i++)
{ printf("%3.0f ", in[i]); }
printf("\n");
return;
}
void checkResult(float *hostRef, float *gpuRef, int rows, int cols)
{ // check that transposed matrix is correct
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
int index = i*cols + j;
if (abs(hostRef[index] - gpuRef[index]) > epsilon) {
match = 0;
printf("different on (%d, %d) (offset=%d) element in transposed matrix: host %f gpu %f\n", i, j, index, hostRef[index], gpuRef[index]);
break;
}
}
if (!match) break;
}
if (!match) printf("Arrays do not match.\n\n");
}
void transposeHost(float *out, float *in, const int nrows, const int ncols)
{ // transpose using CPU
for (int iy = 0; iy < ncols; ++iy)
{
for (int ix = 0; ix < nrows; ++ix)
{ out[ix * ncols + iy] = in[iy * nrows + ix]; }
}
}
__global__ void justcopy(float *out, float *in, const int nrows, const int ncols)
{ // routine to copy data from one matrix to another -- no transposition done
// get matrix coordinate (ix,iy)
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
// copy data as is with boundary test
if (ix < nrows && iy < ncols)
{ out[ix * ncols + iy] = in[ix * ncols + iy]; }
}
__global__ void naivetranspose(float *out, float *in, const int nrows, const int ncols)
{ // naive routine to transpose a matrix -- no optimisations considered
// get matrix coordinate (ix,iy)
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
// transpose with boundary test
if (ix < nrows && iy < ncols)
{ out[ix * ncols + iy] = in[iy * nrows + ix]; }
}
__global__ void lessnaivetranspose(float *out, float *in, const int nrows, const int ncols)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.x + threadIdx.y;
unsigned int width = gridDim.x * blockDim.x;
for (int j=0; j<blockDim.x; j+=blockDim.y) {
out[x*width + (y+j)] = in[(y+j)*width + x];
}
}
__global__ void opttranspose(float *out, float *in, const int nrows, const int ncols)
{
__shared__ float tile[TILE_DIM][TILE_DIM];
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.x + threadIdx.y;
int width = gridDim.x * blockDim.x;
for (int j = 0; j < blockDim.x; j += blockDim.y)
tile[threadIdx.y+j][threadIdx.x] = in[(y+j)*width + x];
__syncthreads();
x = blockIdx.y * blockDim.x + threadIdx.x; // transpose block offset
y = blockIdx.x * blockDim.x + threadIdx.y;
for (int j = 0; j < blockDim.x; j += blockDim.y)
out[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j];
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s starting transpose at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// initialise CUDA timing
float milli;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
bool iprint = 0;
// set up array size 1024
int nrows = 1 << 10;
int ncols = 1 << 10;
int blockx = 16;
int blocky = 16;
// interpret command line arguments if present
if (argc > 1) iprint = atoi(argv[1]);
if (argc > 2) blockx = atoi(argv[2]);
if (argc > 3) blocky = atoi(argv[3]);
if (argc > 4) nrows = atoi(argv[4]);
if (argc > 5) ncols = atoi(argv[5]);
printf(" with matrix nrows %d ncols %d\n", nrows, ncols);
size_t ncells = nrows * ncols;
size_t nBytes = ncells * sizeof(float);
// execution configuration
dim3 block (blockx, blocky);
dim3 grid ((nrows + block.x - 1) / block.x, (ncols + block.y - 1) / block.y);
dim3 grid_opt (nrows/block.x, ncols/block.x);
// allocate host memory
float *h_A = (float *)malloc(nBytes);
float *hostRef = (float *)malloc(nBytes);
float *gpuRef = (float *)malloc(nBytes);
// initialize host array
initialData(h_A, nrows * ncols);
// transpose at host side
transposeHost(hostRef, h_A, nrows, ncols);
// allocate device memory
float *d_A, *d_C;
CHECK(cudaMalloc((float**)&d_A, nBytes));
CHECK(cudaMalloc((float**)&d_C, nBytes));
// copy data from host to device
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
// execute justcopy kernel
CHECK(cudaMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
cudaEventRecord(start); // start timing
justcopy<<<grid, block>>>(d_C, d_A, nrows, ncols);
CHECK(cudaDeviceSynchronize());
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop); // stop timing actual kernel execution
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprint) printData(gpuRef, nrows * ncols);
float ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / (milli/1000); // convert bytes and millisec to GB/sec
// ibnd = 2 * ncells * sizeof(float) / 1e9 / milli/1000;
printf("justcopy kernel elapsed %f msec <<< grid (%d,%d) block (%d,%d)>>> effective bandwidth %f GB/s\n", milli, grid.x, grid.y, block.x, block.y, ibnd);
// execute naive transpose kernel
CHECK(cudaMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
cudaEventRecord(start); // start timing
// naivetranspose<<<grid, block>>>(d_C, d_A, nrows, ncols);
// lessnaivetranspose<<<grid_opt, block>>>(d_C, d_A, nrows, ncols);
opttranspose<<<grid_opt, block>>>(d_C, d_A, nrows, ncols);
CHECK(cudaDeviceSynchronize());
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop); // stop timing actual kernel execution
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprint) printData(gpuRef, ncells);
ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / (milli/1000);
printf("optimised transpose elapsed %f msec <<< grid (%d,%d) block (%d,%d)>>> effective bandwidth %f GB/s\n", milli, grid_opt.x, grid_opt.y, block.x, block.y, ibnd);
checkResult(hostRef, gpuRef, ncols, nrows);
// free host and device memory
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_C));
free(h_A);
free(hostRef);
free(gpuRef);
// reset device
CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
10,965 | #include <iostream>
#include <stdio.h>
__global__ void kernel(void){
}
int main(void){
kernel<<<1,1>>>();
printf("Hello World!\n");
printf("This is the first CUDA C code!\n");
return 0;
}
|
10,966 | #include "includes.h"
__device__ size_t GIDX(size_t row, size_t col, int H, int W) {
return row * W + col;
}
__global__ void kernel_partials( float* d_f1ptr, float* d_f1dx, float* d_f1dy, int H, int W ) {
size_t row = threadIdx.y + blockDim.y * blockIdx.y;
size_t col = threadIdx.x + blockDim.x * blockIdx.x;
size_t idx = GIDX(row, col, H, W);
if (row >= H || row <= 1 || col >= W || col <= 1) {
return;
}
float gray_x1 = d_f1ptr[GIDX(row, col - 1, H, W)];
float gray_x2 = d_f1ptr[GIDX(row, col + 1, H, W)];
float gray_y1 = d_f1ptr[GIDX(row - 1, col, H, W)];
float gray_y2 = d_f1ptr[GIDX(row + 1, col, H, W)];
d_f1dx[idx] = (gray_x2 - gray_x1) / 2.0f;
d_f1dy[idx] = (gray_y2 - gray_y1) / 2.0f;
} |
10,967 | #include <stdio.h>
# define N 64
# define TPB 32
__device__ float scale(int i, int n)
{
return ((float)i)/(n-1);
}
__device__ float distance(float x1, float x2)
{
return sqrt((x2 - x1) * (x2 - x1));
}
__global__ void distanceKernel(float *d_out, float ref, int len)
{
int const i = blockIdx.x * blockDim.x + threadIdx.x;
float const x = scale(i, len);
d_out[i] = distance(x, ref);
printf("i = %2d: dist from %f to %f is %f.\n", i, ref, x, d_out[i]);
}
int main()
{
float const ref = 0.5f;
// Declare pointer for an array of floats
float *d_out = 0;
// Allocate device memory to store the output array
cudaMalloc(&d_out, N * sizeof(float));
exit(0);
// Launch kernel to compute and store distance values
distanceKernel<<<N/TPB, TPB>>>(d_out, ref, N);
cudaFree(d_out); // Free the memory.
} |
10,968 | #include <iostream>
void run_cuda();
int main() {
run_cuda();
return 0;
} |
10,969 | #include "includes.h"
__global__ void reduceNeighboredLess(int *g_idata, int *g_odata, unsigned int n)
{
// set the thread id.
unsigned int tid = threadIdx.x;
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
// convert global data pointer to the local pointer of this block.
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check.
if (idx >= n) return;
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
// convert tid into local array index.
int index = 2 * stride * tid;
if (index < blockDim.x)
{
idata[index] += idata[index + stride];
}
// synchronize within threadblock.
__syncthreads();
}
// write result for this block to global mem.
if (tid == 0)
{
g_odata[blockIdx.x] = idata[0];
}
} |
10,970 | #include <stdio.h>
#include <iostream>
#include <cuda_profiler_api.h>
//#include <cutil.h>
#include <cuda_runtime.h>
#include <string>
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__
//void compute(const float* A, const float* B, const float* C, float* D, int n) {
void shared_latency(float* D, int n, int div) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
double I1 = tid * 2.0;
int thread_id = threadIdx.x % 32;
if (thread_id < div) {
__asm volatile (
" .reg .f64 %r129;\n\t"
" .reg .f64 %r113;\n\t"
" .reg .f64 %r114;\n\t"
" .reg .f64 %r115;\n\t"
" .reg .f64 %r116;\n\t"
" .reg .f64 %r117;\n\t"
" .reg .f64 %r118;\n\t"
" .reg .f64 %r119;\n\t"
" .reg .f64 %r120;\n\t"
" .reg .f64 %r121;\n\t"
" .reg .f64 %r122;\n\t"
" .reg .f64 %r123;\n\t"
" .reg .f64 %r124;\n\t"
" .reg .f64 %r125;\n\t"
" .reg .f64 %r126;\n\t"
" .reg .f64 %r127;\n\t"
" .reg .f64 %r128;\n\t"
"mov.f64 %r129, 4.4;\n\t"
"mov.f64 %r113, %r129;\n\t"
"mov.f64 %r114, 2.2;\n\t"
"mov.f64 %r115, 3.3;\n\t"
"mov.f64 %r116, 1.23;\n\t"
"mov.f64 %r117, 2.42;\n\t"
"mov.f64 %r118, 3.34;\n\t"
"mov.f64 %r119, 5.62;\n\t"
"mov.f64 %r120, 2.56;\n\t"
"mov.f64 %r121, 1.56;\n\t"
"mov.f64 %r122, 2.56;\n\t"
"mov.f64 %r123, 5.56;\n\t"
"mov.f64 %r124, 8.56;\n\t"
"mov.f64 %r125, 3.56;\n\t"
"mov.f64 %r126, 5.56;\n\t"
"mov.f64 %r127, 6.56;\n\t"
"mov.f64 %r128, 0.56;\n\t"
);
for (int k = 0; k < n; k++) {
__asm volatile (
"add.rn.f64 %r121, %r129, %r121;\n\t"
"add.rn.f64 %r122, %r129, %r122;\n\t"
"add.rn.f64 %r123, %r129, %r123;\n\t"
"add.rn.f64 %r124, %r129, %r124;\n\t"
"add.rn.f64 %r125, %r129, %r125;\n\t"
"add.rn.f64 %r126, %r129, %r126;\n\t"
"add.rn.f64 %r127, %r129, %r127;\n\t"
"add.rn.f64 %r128, %r129, %r128;\n\t"
"add.rn.f64 %r113, %r129, %r113;\n\t"
"add.rn.f64 %r114, %r129, %r114;\n\t"
"add.rn.f64 %r115, %r129, %r115;\n\t"
"add.rn.f64 %r116, %r129, %r116;\n\t"
"add.rn.f64 %r117, %r129, %r117;\n\t"
"add.rn.f64 %r118, %r129, %r118;\n\t"
"add.rn.f64 %r119, %r129, %r119;\n\t"
"add.rn.f64 %r120, %r129, %r120;\n\t"
"add.rn.f64 %r121, %r129, %r121;\n\t"
"add.rn.f64 %r122, %r129, %r122;\n\t"
"add.rn.f64 %r123, %r129, %r123;\n\t"
"add.rn.f64 %r124, %r129, %r124;\n\t"
"add.rn.f64 %r125, %r129, %r125;\n\t"
"add.rn.f64 %r126, %r129, %r126;\n\t"
"add.rn.f64 %r127, %r129, %r127;\n\t"
"add.rn.f64 %r128, %r129, %r128;\n\t"
"add.rn.f64 %r113, %r129, %r113;\n\t"
"add.rn.f64 %r114, %r129, %r114;\n\t"
"add.rn.f64 %r115, %r129, %r115;\n\t"
"add.rn.f64 %r116, %r129, %r116;\n\t"
"add.rn.f64 %r117, %r129, %r117;\n\t"
"add.rn.f64 %r118, %r129, %r118;\n\t"
"add.rn.f64 %r119, %r129, %r119;\n\t"
"add.rn.f64 %r120, %r129, %r120;\n\t"
"add.rn.f64 %r121, %r129, %r121;\n\t"
"add.rn.f64 %r122, %r129, %r122;\n\t"
"add.rn.f64 %r123, %r129, %r123;\n\t"
"add.rn.f64 %r124, %r129, %r124;\n\t"
"add.rn.f64 %r125, %r129, %r125;\n\t"
"add.rn.f64 %r126, %r129, %r126;\n\t"
"add.rn.f64 %r127, %r129, %r127;\n\t"
"add.rn.f64 %r128, %r129, %r128;\n\t"
"add.rn.f64 %r113, %r129, %r113;\n\t"
"add.rn.f64 %r114, %r129, %r114;\n\t"
"add.rn.f64 %r115, %r129, %r115;\n\t"
"add.rn.f64 %r116, %r129, %r116;\n\t"
"add.rn.f64 %r117, %r129, %r117;\n\t"
"add.rn.f64 %r118, %r129, %r118;\n\t"
"add.rn.f64 %r119, %r129, %r119;\n\t"
"add.rn.f64 %r120, %r129, %r120;\n\t"
"add.rn.f64 %r121, %r129, %r121;\n\t"
"add.rn.f64 %r122, %r129, %r122;\n\t"
"add.rn.f64 %r123, %r129, %r123;\n\t"
"add.rn.f64 %r124, %r129, %r124;\n\t"
"add.rn.f64 %r125, %r129, %r125;\n\t"
"add.rn.f64 %r126, %r129, %r126;\n\t"
"add.rn.f64 %r127, %r129, %r127;\n\t"
"add.rn.f64 %r128, %r129, %r128;\n\t"
"add.rn.f64 %r113, %r129, %r113;\n\t"
"add.rn.f64 %r114, %r129, %r114;\n\t"
"add.rn.f64 %r115, %r129, %r115;\n\t"
"add.rn.f64 %r116, %r129, %r116;\n\t"
"add.rn.f64 %r117, %r129, %r117;\n\t"
"add.rn.f64 %r118, %r129, %r118;\n\t"
"add.rn.f64 %r119, %r129, %r119;\n\t"
"add.rn.f64 %r120, %r129, %r120;\n\t"
"add.rn.f64 %r121, %r129, %r121;\n\t"
"add.rn.f64 %r122, %r129, %r122;\n\t"
"add.rn.f64 %r123, %r129, %r123;\n\t"
"add.rn.f64 %r124, %r129, %r124;\n\t"
"add.rn.f64 %r125, %r129, %r125;\n\t"
"add.rn.f64 %r126, %r129, %r126;\n\t"
"add.rn.f64 %r127, %r129, %r127;\n\t"
"add.rn.f64 %r128, %r129, %r128;\n\t"
"add.rn.f64 %r113, %r129, %r113;\n\t"
"add.rn.f64 %r114, %r129, %r114;\n\t"
"add.rn.f64 %r115, %r129, %r115;\n\t"
"add.rn.f64 %r116, %r129, %r116;\n\t"
"add.rn.f64 %r117, %r129, %r117;\n\t"
"add.rn.f64 %r118, %r129, %r118;\n\t"
"add.rn.f64 %r119, %r129, %r119;\n\t"
"add.rn.f64 %r120, %r129, %r120;\n\t"
"add.rn.f64 %r121, %r129, %r121;\n\t"
"add.rn.f64 %r122, %r129, %r122;\n\t"
"add.rn.f64 %r123, %r129, %r123;\n\t"
"add.rn.f64 %r124, %r129, %r124;\n\t"
"add.rn.f64 %r125, %r129, %r125;\n\t"
"add.rn.f64 %r126, %r129, %r126;\n\t"
"add.rn.f64 %r127, %r129, %r127;\n\t"
"add.rn.f64 %r128, %r129, %r128;\n\t"
"add.rn.f64 %r113, %r129, %r113;\n\t"
"add.rn.f64 %r114, %r129, %r114;\n\t"
"add.rn.f64 %r115, %r129, %r115;\n\t"
"add.rn.f64 %r116, %r129, %r116;\n\t"
"add.rn.f64 %r117, %r129, %r117;\n\t"
"add.rn.f64 %r118, %r129, %r118;\n\t"
"add.rn.f64 %r119, %r129, %r119;\n\t"
"add.rn.f64 %r120, %r129, %r120;\n\t"
"add.rn.f64 %r121, %r129, %r121;\n\t"
"add.rn.f64 %r122, %r129, %r122;\n\t"
"add.rn.f64 %r123, %r129, %r123;\n\t"
"add.rn.f64 %r124, %r129, %r124;\n\t"
"add.rn.f64 %r125, %r129, %r125;\n\t"
"add.rn.f64 %r126, %r129, %r126;\n\t"
"add.rn.f64 %r127, %r129, %r127;\n\t"
"add.rn.f64 %r128, %r129, %r128;\n\t"
"add.rn.f64 %r113, %r129, %r113;\n\t"
"add.rn.f64 %r114, %r129, %r114;\n\t"
"add.rn.f64 %r115, %r129, %r115;\n\t"
"add.rn.f64 %r116, %r129, %r116;\n\t"
"add.rn.f64 %r117, %r129, %r117;\n\t"
"add.rn.f64 %r118, %r129, %r118;\n\t"
"add.rn.f64 %r119, %r129, %r119;\n\t"
"add.rn.f64 %r120, %r129, %r120;\n\t"
"add.rn.f64 %r121, %r129, %r121;\n\t"
"add.rn.f64 %r122, %r129, %r122;\n\t"
"add.rn.f64 %r123, %r129, %r123;\n\t"
"add.rn.f64 %r124, %r129, %r124;\n\t"
"add.rn.f64 %r125, %r129, %r125;\n\t"
"add.rn.f64 %r126, %r129, %r126;\n\t"
"add.rn.f64 %r127, %r129, %r127;\n\t"
"add.rn.f64 %r128, %r129, %r128;\n\t"
"add.rn.f64 %r113, %r129, %r113;\n\t"
"add.rn.f64 %r114, %r129, %r114;\n\t"
"add.rn.f64 %r115, %r129, %r115;\n\t"
"add.rn.f64 %r116, %r129, %r116;\n\t"
"add.rn.f64 %r117, %r129, %r117;\n\t"
"add.rn.f64 %r118, %r129, %r118;\n\t"
"add.rn.f64 %r119, %r129, %r119;\n\t"
"add.rn.f64 %r120, %r129, %r120;\n\t"
"add.rn.f64 %r121, %r129, %r121;\n\t"
"add.rn.f64 %r122, %r129, %r122;\n\t"
"add.rn.f64 %r123, %r129, %r123;\n\t"
"add.rn.f64 %r124, %r129, %r124;\n\t"
"add.rn.f64 %r125, %r129, %r125;\n\t"
"add.rn.f64 %r126, %r129, %r126;\n\t"
"add.rn.f64 %r127, %r129, %r127;\n\t"
"add.rn.f64 %r128, %r129, %r128;\n\t"
"add.rn.f64 %r113, %r129, %r113;\n\t"
"add.rn.f64 %r114, %r129, %r114;\n\t"
"add.rn.f64 %r115, %r129, %r115;\n\t"
"add.rn.f64 %r116, %r129, %r116;\n\t"
"add.rn.f64 %r117, %r129, %r117;\n\t"
"add.rn.f64 %r118, %r129, %r118;\n\t"
"add.rn.f64 %r119, %r129, %r119;\n\t"
"add.rn.f64 %r120, %r129, %r120;\n\t"
"add.rn.f64 %r121, %r129, %r121;\n\t"
"add.rn.f64 %r122, %r129, %r122;\n\t"
"add.rn.f64 %r123, %r129, %r123;\n\t"
"add.rn.f64 %r124, %r129, %r124;\n\t"
"add.rn.f64 %r125, %r129, %r125;\n\t"
"add.rn.f64 %r126, %r129, %r126;\n\t"
"add.rn.f64 %r127, %r129, %r127;\n\t"
"add.rn.f64 %r128, %r129, %r128;\n\t"
"add.rn.f64 %r113, %r129, %r113;\n\t"
"add.rn.f64 %r114, %r129, %r114;\n\t"
"add.rn.f64 %r115, %r129, %r115;\n\t"
"add.rn.f64 %r116, %r129, %r116;\n\t"
"add.rn.f64 %r117, %r129, %r117;\n\t"
"add.rn.f64 %r118, %r129, %r118;\n\t"
"add.rn.f64 %r119, %r129, %r119;\n\t"
"add.rn.f64 %r120, %r129, %r120;\n\t"
"add.rn.f64 %r121, %r129, %r121;\n\t"
"add.rn.f64 %r122, %r129, %r122;\n\t"
"add.rn.f64 %r123, %r129, %r123;\n\t"
"add.rn.f64 %r124, %r129, %r124;\n\t"
"add.rn.f64 %r125, %r129, %r125;\n\t"
"add.rn.f64 %r126, %r129, %r126;\n\t"
"add.rn.f64 %r127, %r129, %r127;\n\t"
"add.rn.f64 %r128, %r129, %r128;\n\t"
"add.rn.f64 %r113, %r129, %r113;\n\t"
"add.rn.f64 %r114, %r129, %r114;\n\t"
"add.rn.f64 %r115, %r129, %r115;\n\t"
"add.rn.f64 %r116, %r129, %r116;\n\t"
"add.rn.f64 %r117, %r129, %r117;\n\t"
"add.rn.f64 %r118, %r129, %r118;\n\t"
"add.rn.f64 %r119, %r129, %r119;\n\t"
"add.rn.f64 %r120, %r129, %r120;\n\t"
"add.rn.f64 %r121, %r129, %r121;\n\t"
"add.rn.f64 %r122, %r129, %r122;\n\t"
"add.rn.f64 %r123, %r129, %r123;\n\t"
"add.rn.f64 %r124, %r129, %r124;\n\t"
"add.rn.f64 %r125, %r129, %r125;\n\t"
"add.rn.f64 %r126, %r129, %r126;\n\t"
"add.rn.f64 %r127, %r129, %r127;\n\t"
"add.rn.f64 %r128, %r129, %r128;\n\t"
"add.rn.f64 %r113, %r129, %r113;\n\t"
"add.rn.f64 %r114, %r129, %r114;\n\t"
"add.rn.f64 %r115, %r129, %r115;\n\t"
"add.rn.f64 %r116, %r129, %r116;\n\t"
"add.rn.f64 %r117, %r129, %r117;\n\t"
"add.rn.f64 %r118, %r129, %r118;\n\t"
"add.rn.f64 %r119, %r129, %r119;\n\t"
"add.rn.f64 %r120, %r129, %r120;\n\t"
"add.rn.f64 %r121, %r129, %r121;\n\t"
"add.rn.f64 %r122, %r129, %r122;\n\t"
"add.rn.f64 %r123, %r129, %r123;\n\t"
"add.rn.f64 %r124, %r129, %r124;\n\t"
"add.rn.f64 %r125, %r129, %r125;\n\t"
"add.rn.f64 %r126, %r129, %r126;\n\t"
"add.rn.f64 %r127, %r129, %r127;\n\t"
"add.rn.f64 %r128, %r129, %r128;\n\t"
);
}
// double temp;
// float output = 0.0;
// asm("add.rn.f64 %0, r113, r114" : "=d"(temp));
// asm("cvt.rn.f32.f64 %0, %1" : "=f"(output) : "d"(temp));
// printf("%lf \n", output);
}
__syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
*D = I1;
__syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
int main(int argc, char **argv)
{
if (argc != 6) {
usage();
exit(1);
}
int num_blocks = atoi(argv[1]);
int num_threads_per_block = atoi(argv[2]);
int iterations = atoi(argv[3]);
int divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
// h_A = new float(2.0);
// h_B = new float(3.0);
// h_C = new float(4.0);
// cudaMalloc((void**)&d_A, sizeof(float));
// cudaMalloc((void**)&d_B, sizeof(float));
// cudaMalloc((void**)&d_C, sizeof(float));
cudaMalloc((void**)&d_res, sizeof(double));
// cudaMemcpy(d_A, h_A, sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(d_B, h_B, sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(d_C, h_C, sizeof(float), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaProfilerStart();
// compute<<<num_blocks, num_threads_per_block>>>(d_A, d_B, d_C, d_res, iterations);
shared_latency<<<num_blocks, num_threads_per_block>>>(d_res, iterations, divergence);
cudaDeviceSynchronize();
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
std::cout << time << std::endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaMemcpy(h_res, d_res, sizeof(double), cudaMemcpyDeviceToHost);
return 0;
}
|
10,971 | __global__ void sum_atomic(int nx, int *sum, int *data){
const int x = blockDim.x * blockIdx.x + threadIdx.x;
if (x < nx){
atomicAdd(sum, data[x]);
}
} |
10,972 | // VectorAdd.cu
#include <stdio.h>
#include <cuda.h> 2
#include <stdlib.h>
#define N 10 // size of vectors
#define B 1 // blocks in the grid
#define T 10 // threads in a block
__global__ void add (int *a,int *b, int *c) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < N) {
c[tid] = a[tid]+b[tid];
}
}
int main(void) {
int a[N],b[N],c[N];
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void**)&dev_a,N * sizeof(int));
cudaMalloc((void**)&dev_b,N * sizeof(int));
cudaMalloc((void**)&dev_c,N * sizeof(int));
for (int i=0;i<N;i++) {
a[i] = i;
b[i] = i*1;
}
cudaMemcpy(dev_a, a , N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b , N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, c , N*sizeof(int),cudaMemcpyHostToDevice);
add<<<B,T>>>(dev_a,dev_b,dev_c);
cudaMemcpy(c,dev_c,N*sizeof(int),cudaMemcpyDeviceToHost);
for (int i=0;i<N;i++) {
printf("%d+%d=%d\n",a[i],b[i],c[i]);
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
10,973 | #include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <time.h>
#define N 1000
////////////////////////////Each thread 1 row 1 column
__global__ void kernel_1t1e(float A[N][N], float B[N][N], float C[N][N], int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < size && j < size){
A[i][j] = B[i][j] + C[i][j];
}
}
///////////////////////////Each thread 1 row
__global__ void kernel_1t1r(float A[N][N], float B[N][N], float C[N][N], int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size){
for (int j = 0; j < size; j++){
A[i][j] = B[i][j] + C[i][j];
}
}
}
///////////////////////////Each thread 1 column
__global__ void kernel_1t1c(float A[N][N], float B[N][N], float C[N][N], int size) {
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j < size){
for (int i = 0; i < size; i++){
A[i][j] = B[i][j] + C[i][j];
}
}
}
int main(void){
int nDevices;
int i, j;
float A[N][N], B[N][N], C[N][N], (*A_d)[N], (*B_d)[N], (*C_d)[N];
//Print device properties
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" MaxThreadPerBlock: %d\n",
prop.maxThreadsPerBlock);
printf(" MaxThreadsDim0: %d\n",
prop.maxThreadsDim[0]);
printf(" MaxThreadsDim1: %d\n",
prop.maxThreadsDim[1]);
printf(" MaxThreadsDim2: %d\n",
prop.maxThreadsDim[2]);
printf(" MaxGridSize: %d\n",
prop.maxGridSize[1]);
printf(" Warp Size: %d\n",
prop.warpSize);
}
//Populate First Matrix
srand(1);
for (i = 0; i < N; i++){
for (j = 0; j < N; j++) {
B[i][j] = ((float)rand()/(float)(RAND_MAX)) * 100;
// printf("%f ", B[i][j]);
}
printf("\n");
}
printf("\n");
//Populate Second Matrix
for (i = 0; i < N; i++){
for (j = 0; j < N; j++) {
C[i][j] = ((float)rand()/(float)(RAND_MAX)) * 100;
// printf("%f ", C[i][j]);
}
printf("\n");
}
printf("\n");
printf("===============================");
printf("\n");
//Allocate memory in the device
cudaMalloc((void**) &A_d, (N*N)*sizeof(float));
cudaMalloc((void**) &B_d, (N*N)*sizeof(float));
cudaMalloc((void**) &C_d, (N*N)*sizeof(float));
//Mem copy from host to device
cudaMemcpy(A_d, A, (N*N)*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B, (N*N)*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(C_d, C, (N*N)*sizeof(float), cudaMemcpyHostToDevice);
dim3 threadsPerBlock(N, N);
dim3 numBlocks(N / threadsPerBlock.x, N / threadsPerBlock.y);
cudaEvent_t start, stop;
float elapsed = 0;
//ThreadAll
//Run
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
kernel_1t1e<<<numBlocks,threadsPerBlock>>>(A_d, B_d, C_d, N);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("GPU Run TIme threadsall %.2f ms \n", elapsed);
////////////////////////////////////Thread Row
/*
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
kernel_1t1r<<<numBlocks,threadsPerBlock>>>(A_d, B_d, C_d, N);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("GPU Run TIme threadsrow %.2f ms \n", elapsed);
////////////////////////////////////Thread Column
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
kernel_1t1c<<<numBlocks,threadsPerBlock>>>(A_d, B_d, C_d, N);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("GPU Run TIme threadscol %.2f ms \n", elapsed);
*/
//Mem Copy
cudaMemcpy(A, A_d, (N*N)*sizeof(float), cudaMemcpyDeviceToHost);
/////////////////////////////////////Print matrix A
for (i = 0; i < N; i++){
for (j = 0; j < N; j++) {
printf("%f ", A[i][j]);
}
printf("\n");
}
printf("\n");
/////////////////////////////////////Free up memory
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
}
|
10,974 | #include "includes.h"
__global__ void MxM_naive(double* A, double* B, double* C, const int N) {
int i = blockIdx.y * blockDim.y + threadIdx.y; // Row i of matrix C
int j = blockIdx.x * blockDim.x + threadIdx.x; // Column j of matrix C
double C_temp = 0;
for (int k=0; k<N; k++) {
// use 1D indexing
C_temp += A[i*N + k] * B[k*N + j];
}
// write back to global memory
// no synchronization needed here because one thread handles one element
C[i*N + j] = C_temp;
} |
10,975 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx SystemML.cu
***********************************/
// dim => rlen (Assumption: rlen == clen)
// N = length of dense array
extern "C"
__global__ void copyUpperToLowerTriangleDense(double* ret, int dim, int N) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int id_dest = iy * dim + ix;
if(iy > ix && id_dest < N) {
// TODO: Potential to reduce the number of threads by half
int id_src = ix * dim + iy;
ret[id_dest] = ret[id_src];
}
}
extern "C"
__device__ double getBoolean(int val) {
if(val == 0)
return 0.0;
else
return 1.0;
}
// op = {0=plus, 1=minus, 2=multiply, 3=divide, 4=power,
// 5=less, 6=lessequal, 7=greater, 8=greaterequal, 9=equal, 10=notequal,
// 11=min, 12=max, 13=and, 14=or, 15=log}
extern "C"
__device__ double binaryOp(double x, double y, int op) {
// 0=plus, 1=minus, 2=multiply, 3=divide, 4=power
if(op == 0)
return x + y;
else if(op == 1)
return x - y;
else if(op == 2)
return x * y;
else if(op == 3)
return x / y;
else if(op == 4)
return pow(x, y);
// 5=less, 6=lessequal, 7=greater, 8=greaterequal, 9=equal, 10=notequal,
else if(op == 5)
return getBoolean(x < y);
else if(op == 6)
return getBoolean(x <= y);
else if(op == 7)
return getBoolean(x > y);
else if(op == 8)
return getBoolean(x >= y);
else if(op == 9)
return getBoolean(x == y);
else if(op == 10)
return getBoolean(x != y);
// 11=min, 12=max, 13=and, 14=or, 15=log
else if(op == 11) {
return min(x, y);
}
else if(op == 12) {
return max(x, y);
}
return -999;
}
extern "C"
__global__ void dense_matrix_set(double* A, double scalar, int rlen, int clen) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int index = ix * clen + iy;
if(index < rlen*clen) {
A[index] = scalar;
}
}
extern "C"
__global__ void dense_matrix_copy(double* A, double* ret, int rlen, int clen) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int index = ix * clen + iy;
if(ix < rlen && iy < clen) {
ret[index] = A[index];
}
}
extern "C"
__global__ void relu(double* A, double* ret, int rlen, int clen) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if(ix < rlen && iy < clen) {
int index = ix * clen + iy;
ret[index] = max(0.0, A[index]);
}
}
// Compares the value and set
extern "C"
__global__ void compareAndSet(double* A, double* ret, int rlen, int clen, double compareVal, double tol, double ifEqualsVal, double ifLessThanVal, double ifGreaterThanVal) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int index = ix * clen + iy;
if(ix < rlen && iy < clen) {
if(abs(A[index]-compareVal) < tol)
ret[index] = ifEqualsVal;
else if(A[index] < compareVal)
ret[index] = ifLessThanVal;
else
ret[index] = ifGreaterThanVal;
}
}
extern "C"
__global__ void binCellOp(double* A, double* B, double* C,
int maxRlen, int maxClen, int vectorAStatus, int vectorBStatus, int op) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if(ix < maxRlen && iy < maxClen) {
int outIndex = ix * maxClen + iy;
int aIndex = outIndex;
int bIndex = outIndex;
if(vectorAStatus == 1)
aIndex = ix; // clen == 1
else if(vectorAStatus == 2)
aIndex = iy; // rlen == 1
if(vectorBStatus == 1)
bIndex = ix; // clen == 1
else if(vectorBStatus == 2)
bIndex = iy; // rlen == 1
C[outIndex] = binaryOp(A[aIndex], B[bIndex], op);
// printf("C[%d] = A[%d](%f) B[%d](%f) (%d %d)\n", outIndex, aIndex, A[aIndex], bIndex, B[bIndex], (ix+1), (iy+1));
}
}
extern "C"
__global__ void binCellScalarOp(double* A, double scalar, double* C, int rlenA, int clenA, int op, int isLeftScalar) {
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int index = ix * clenA + iy;
if(index < rlenA*clenA) {
if(isLeftScalar)
C[index] = binaryOp(scalar, A[index], op);
else
C[index] = binaryOp(A[index], scalar, op);
}
}
/**
* Sets all elements (fills) of a double array of given length with a given scalar value
* @param A array to be filled
* @param scalar value to fill array with
* @param lenA length of array A
*/
extern "C"
__global__ void fill(double* A, double scalar, int lenA) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < lenA){
A[index] = scalar;
}
}
extern "C"
__global__ void reduce(double *g_idata, double *g_odata, unsigned int n)
{
extern __shared__ double sdata[];
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x*2 + threadIdx.x;
unsigned int gridSize = blockDim.x*2*gridDim.x;
double mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
mySum += g_idata[i];
// ensure we don't read out of bounds
if (i + blockDim.x < n)
mySum += g_idata[i+blockDim.x];
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); }
if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); }
if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); }
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile double* smem = sdata;
if (blockDim.x >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; }
if (blockDim.x >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
if (blockDim.x >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
if (blockDim.x >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
if (blockDim.x >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
if (blockDim.x >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
|
10,976 | #include "includes.h"
__global__ void getIndex(unsigned int *d_index, unsigned int *d_scan, unsigned int *d_mask, unsigned int in_size, unsigned int total_pre) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index < in_size) {
if (d_mask[index] == 1) {
d_index[index] = total_pre + d_scan[index];
}
}
} |
10,977 | //
// main.cpp
//
//
// Created by Elijah Afanasiev on 25.09.2018.
//
//
// System includes
#include <assert.h>
#include <stdio.h>
#include <chrono>
#include <cstdlib>
#include <iostream>
// CUDA runtime
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#ifndef MAX
#define MAX(a, b) (a > b ? a : b)
#endif
__global__ void vectorAddGPU(float* a, float* b, float* c, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
c[idx] = a[idx] + b[idx];
}
}
void unified_sample(int size = 1048576) {
int n = size;
int nBytes = n * sizeof(float);
float *a, *b, *c;
// float *d_a, *d_b, *d_c;
cudaEvent_t unifiedStart, unifiedStop;
cudaEventCreate(&unifiedStart);
cudaEventCreate(&unifiedStop);
dim3 block(256);
dim3 grid((unsigned int)ceil(n / (float)block.x));
printf("Allocating managed(unified) memory on both host and device..\n");
cudaMallocManaged(&a, nBytes);
cudaMallocManaged(&b, nBytes);
cudaMallocManaged(&c, nBytes);
for (int i = 0; i < n; i++) {
a[i] = rand() / (float)RAND_MAX;
b[i] = rand() / (float)RAND_MAX;
}
cudaEventRecord(unifiedStart);
printf("Doing GPU Vector add\n");
vectorAddGPU<<<grid, block>>>(a, b, c, n);
cudaEventRecord(unifiedStop);
cudaDeviceSynchronize();
float elapsedUnified;
cudaEventElapsedTime(&elapsedUnified, unifiedStart, unifiedStop);
std::cout << "Unified-Memory copying Elapsed Time: " << elapsedUnified
<< " ms.\n";
cudaThreadSynchronize();
}
void pinned_sample(int size = 1048576) {
int n = size;
int nBytes = n * sizeof(float);
float *h_a, *h_b, *h_c;
float *d_a, *d_b, *d_c;
cudaEvent_t pinnedStart, pinnedStop;
cudaEventCreate(&pinnedStart);
cudaEventCreate(&pinnedStop);
dim3 block(256);
dim3 grid((unsigned int)ceil(n / (float)block.x));
printf("Allocating device pinned memory on host\n");
cudaMallocHost(&h_a, nBytes);
cudaMallocHost(&h_b, nBytes);
cudaMallocHost(&h_c, nBytes);
cudaMalloc(&d_a, nBytes);
cudaMalloc(&d_b, nBytes);
cudaMalloc(&d_c, nBytes);
for (int i = 0; i < n; i++) {
h_a[i] = rand() / (float)RAND_MAX;
h_b[i] = rand() / (float)RAND_MAX;
h_c[i] = 0;
}
printf("Copying to device..\n");
printf("Doing GPU Vector Add\n");
cudaEventRecord(pinnedStart);
cudaMemcpy(d_a, h_a, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, nBytes, cudaMemcpyHostToDevice);
vectorAddGPU<<<grid, block>>>(d_a, d_b, d_c, n);
cudaEventRecord(pinnedStop);
cudaDeviceSynchronize();
float elapsedPinned;
cudaEventElapsedTime(&elapsedPinned, pinnedStart, pinnedStop);
std::cout << "Pinned-Memory copying Elapsed Time: " << elapsedPinned
<< " ms.\n";
cudaThreadSynchronize();
}
void usual_sample(int size = 1048576) {
int n = size;
int nBytes = n * sizeof(float);
float *a, *b; // host data
float* c; // results
a = (float*)malloc(nBytes);
b = (float*)malloc(nBytes);
c = (float*)malloc(nBytes);
float *a_d, *b_d, *c_d;
dim3 block(256);
dim3 grid((unsigned int)ceil(n / (float)block.x));
for (int i = 0; i < n; i++) {
a[i] = rand() / (float)RAND_MAX;
b[i] = rand() / (float)RAND_MAX;
c[i] = 0;
}
printf("Allocating device memory on host..\n");
cudaMalloc((void**)&a_d, n * sizeof(float));
cudaMalloc((void**)&b_d, n * sizeof(float));
cudaMalloc((void**)&c_d, n * sizeof(float));
printf("Copying to device..\n");
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaMemcpy(a_d, a, n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, n * sizeof(float), cudaMemcpyHostToDevice);
printf("Doing GPU Vector add\n");
vectorAddGPU<<<grid, block>>>(a_d, b_d, c_d, n);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("time: %f ms\n", milliseconds);
cudaThreadSynchronize();
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
free(a);
free(b);
free(c);
}
int main(int argc, char** argv) {
std::cout << "-------> USUAL SAMPLE <-------\n";
usual_sample(atoi(argv[1]));
std::cout << "-------> USUAL SAMPLE <-------\n\n";
std::cout << "-------> PINNED SAMPLE <-------\n";
pinned_sample(atoi(argv[1]));
std::cout << "-------> PINNED SAMPLE <-------\n\n";
std::cout << "-------> UNIFIED SAMPLE <-------\n";
unified_sample(atoi(argv[1]));
std::cout << "-------> UNIFIED SAMPLE <-------\n";
return 0;
} |
10,978 | __global__ void scrambleGammaToSigma( float* Sigma, float* Gamma, float lambda, int* nonEmptyRows, int* nonEmptyCols, int GammaDim, int HOGDim, int nNonEmptyCells )
{
int r = blockDim.x * blockIdx.x + threadIdx.x; // rows
int c = blockDim.y * blockIdx.y + threadIdx.y; // cols
int sigmaDim = HOGDim * nNonEmptyCells;
if( r < sigmaDim && c < sigmaDim ){
int HOG_row_idx = r % HOGDim;
int HOG_col_idx = c % HOGDim;
int currCellIdx = r / HOGDim;
int otherCellIdx = c / HOGDim;
int gammaRowIdx = abs( nonEmptyRows[currCellIdx] - nonEmptyRows[otherCellIdx] );
int gammaColIdx = abs( nonEmptyCols[currCellIdx] - nonEmptyCols[otherCellIdx] );
Sigma[r + c * sigmaDim] = Gamma[ ((gammaRowIdx * HOGDim) + HOG_row_idx) + ( ( gammaColIdx * HOGDim ) + HOG_col_idx ) * GammaDim ]; // + (r==c)?lambda:0 ;
if (r == c) Sigma[r + c * sigmaDim] += lambda;
}
} |
10,979 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <float.h>
#include <math.h>
#include <cuda_runtime.h>
void showMatrix(float *x, int m, int n, bool gpu = 1){
float *y;
if(gpu){
y = (float*)calloc(m*n, sizeof(float));
cudaMemcpy(y, x, m*n*sizeof(float), cudaMemcpyDeviceToHost);
}else{
y = x;
}
for(int i = 0;i < m;++i){
for(int j = 0; j< n;++j){
printf("%f ", y[i*n+j]);
}
printf("\n");
}
if(gpu){
free(y);
}
} |
10,980 | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
__global__ void VecAdd(float* A, float* B, float*
C, int N){
// Host code
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
int main() {
int N = pow(2,15);
int avg_loop=1000;
int threadsPerBlock_array[6];
size_t size = N * sizeof(float);
//Helper variables
int loop;
int thread_loop;
int clock_loop;
float time_spent;
// Allocate input vectors h_A and h_B in host memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
float* h_C = (float*)malloc(size);
printf("\nThreads per Block per array\n");
for(loop = 5; loop < 11; loop++){
threadsPerBlock_array[loop-5] = pow(2,loop);
printf("%d ", threadsPerBlock_array[loop-5]);
}
// Initialize input vectors
printf("Array A (first 10 values) \n ");
for(loop = 0; loop < N; loop++){
h_A[loop] = rand() % 100 + 1;
if (loop<10){
printf("%f ", h_A[loop]);
}
}
printf("\nArray B (first 10 values) \n ");
for(loop = 0; loop < N; loop++){
h_B[loop] = rand() % 100 + 1;
if (loop<10){
printf("%f ", h_B[loop]);
}
}
// Allocate vectors in device memory
float* d_A; cudaMalloc(&d_A, size);
float* d_B; cudaMalloc(&d_B, size);
float* d_C; cudaMalloc(&d_C, size);
//GPU timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, size,cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size,cudaMemcpyHostToDevice);
for (thread_loop=0;thread_loop<6;thread_loop++){
for(clock_loop=0;clock_loop<avg_loop;clock_loop++){
if (clock_loop==1){
cudaEventRecord(start, 0);
}
// Invoke kernel
int threadsPerBlock = threadsPerBlock_array[thread_loop];
int blocksPerGrid = (N + threadsPerBlock - 1) /threadsPerBlock;
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A,d_B, d_C, N);
// h_C contains the result in host memory
cudaMemcpy(h_C, d_C, size,cudaMemcpyDeviceToHost);
//printf("\nArray C (first 10 outputs)\n");
//for(loop = 0; loop < 10; loop++)
// printf("%f ", h_C[loop]);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time_spent, start, stop);
time_spent=time_spent/(avg_loop-1)*10;
printf("\n Average Time spent in loop %d is %f",thread_loop,time_spent);
}
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// Free host memory
free(h_A);
free(h_B);
return 0;
} |
10,981 | #include "includes.h"
__global__ void cuConvertLABToRGBKernel(const float4* src, float4* dst, size_t stride, int width, int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
int c = y*stride + x;
if (x<width && y<height)
{
// Read
float4 in = src[c];
float L = in.x;
float a = in.y;
float b = in.z;
// convert to XYZ
const float T1 = cbrtf(216/24389.0f);
const float fy = (L+16) / 116.0f;
float4 XYZ;
if (L > 8)
XYZ.y = fy*fy*fy;
else
XYZ.y = L / (24389/27.0f);
float fx = a/500.0f + fy;
if (fx > T1)
XYZ.x = fx*fx*fx;
else
XYZ.x = (116*fx-16) / (24389/27.0f);
float fz = fy - b/200.0f;
if (fz > T1)
XYZ.z = fz*fz*fz;
else
XYZ.z = (116*fz-16) / (24389/27.0f);
// Normalize for D65 white point
XYZ.x *= 0.950456f;
XYZ.z *= 1.088754f;
float4 rgb;
rgb.x = 3.2404542f*XYZ.x + -1.5371385f*XYZ.y + -0.4985314f*XYZ.z;
rgb.y = -0.9692660f*XYZ.x + 1.8760108f*XYZ.y + 0.0415560f*XYZ.z;
rgb.z = 0.0556434f*XYZ.x + -0.2040259f*XYZ.y + 1.0572252f*XYZ.z;
rgb.w = in.w;
dst[c] = rgb;
}
} |
10,982 | #include <cstdio>
#include <ctime>
#include <cstring> // memset
#include <cstdlib> // rand, RAND_MAX
#include <cmath> // sqrtf
#include <string>
#include <vector>
using namespace std;
float randomf(){
return (rand()+0.5)/(RAND_MAX+1.0);
}
static double get_time(){
timespec tp;
clock_gettime(CLOCK_MONOTONIC,&tp);
return tp.tv_sec+tp.tv_nsec*1e-9;
}
// input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3)
// output: idx (b,m,nsample)
__global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx) {
int index = threadIdx.x;
xyz1 += n*3*index;
xyz2 += m*3*index;
idx += m*nsample*index;
for (int j=0;j<m;++j) {
int cnt = 0;
for (int k=0;k<n;++k) {
if (cnt == nsample)
break; // only pick the FIRST nsample points in the ball
float x2=xyz2[j*3+0];
float y2=xyz2[j*3+1];
float z2=xyz2[j*3+2];
float x1=xyz1[k*3+0];
float y1=xyz1[k*3+1];
float z1=xyz1[k*3+2];
float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f);
if (d<radius) {
if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx[j*nsample+l] = k;
}
idx[j*nsample+cnt] = k;
cnt+=1;
}
}
}
}
// input: points (b,n,c), idx (b,m,nsample)
// output: out (b,m,nsample,c)
__global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) {
int index = threadIdx.x;
points += n*c*index;
idx += m*nsample*index;
out += m*nsample*c*index;
for (int j=0;j<m;++j) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
out[j*nsample*c+k*c+l] = points[ii*c+l];
}
}
}
}
// input: grad_out (b,m,nsample,c), idx (b,m,nsample),
// output: grad_points (b,n,c)
__global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) {
int index = threadIdx.x;
idx += m*nsample*index;
grad_out += m*nsample*c*index;
grad_points += n*c*index;
for (int j=0;j<m;++j) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
grad_points[ii*c+l] += grad_out[j*nsample*c+k*c+l];
}
}
}
}
int main()
{
int b=32,n=512,m=128,nsample=64,c=64;
float radius=0.1;
float *xyz1, *xyz2, *points;
cudaMallocManaged(&xyz1, b*n*3*sizeof(float));
cudaMallocManaged(&xyz2, b*m*3*sizeof(float));
cudaMallocManaged(&points, b*n*c*sizeof(float));
int *idx;
cudaMallocManaged(&idx, b*m*nsample*sizeof(int));
memset(idx, 0, sizeof(int)*b*m*nsample);
float *out, *grad_out;
cudaMallocManaged(&out, b*m*nsample*c*sizeof(float));
cudaMallocManaged(&grad_out, b*m*nsample*c*sizeof(float));
memset(grad_out, 0.0, sizeof(float)*b*m*nsample*c);
float *grad_points;
cudaMallocManaged(&grad_points, b*n*c*sizeof(float));
for (int i=0;i<b*n*3;i++)
xyz1[i]=randomf();
for (int i=0;i<b*m*3;i++)
xyz2[i]=randomf();
for (int i=0;i<b*n*c;i++)
points[i]=randomf();
double t0=get_time();
query_ball_point_gpu<<<1,b>>>(b,n,m,radius,nsample,xyz1,xyz2,idx);
cudaDeviceSynchronize();
printf("query_ball_point gpu time %f\n",get_time()-t0);
t0=get_time();
group_point_gpu<<<1,b>>>(b,n,c,m,nsample,points,idx,out);
cudaDeviceSynchronize();
printf("grou_point gpu time %f\n",get_time()-t0);
t0=get_time();
group_point_grad_gpu<<<1,b>>>(b,n,c,m,nsample,grad_out,idx,grad_points);
cudaDeviceSynchronize();
printf("grou_point_grad gpu time %f\n",get_time()-t0);
cudaFree(xyz1);
cudaFree(xyz2);
cudaFree(points);
cudaFree(idx);
cudaFree(out);
cudaFree(grad_out);
cudaFree(grad_points);
return 0;
}
|
10,983 | #include "bounding_box.cuh"
__host__ __device__ BoundingBox::BoundingBox(){
m_p_min = make_float2(-1.0f, -1.0f);
m_p_max = make_float2(1.0f, 1.0f);
}
__host__ __device__ void BoundingBox::compute_center(float2 ¢er) const{
center.x = (m_p_min.x + m_p_max.x)/2;
center.y = (m_p_min.y + m_p_max.y )/2;
}
__host__ __device__ const float2& BoundingBox::get_max() const{
return m_p_max;
}
__host__ __device__ const float2& BoundingBox::get_min() const{
return m_p_min;
}
__host__ __device__ bool BoundingBox::contains(float2& p) const{
return p.x >= m_p_min.x && p.y >= m_p_min.y && p.x <= m_p_max.x && p.y <= m_p_max.y;
}
__host__ __device__ void BoundingBox::set(float min_x, float min_y, float max_x, float max_y){
m_p_min.x = min_x;
m_p_min.y = min_y;
m_p_max.x = max_x;
m_p_max.y = max_y;
}
|
10,984 | #include <stdio.h>
#include <sys/time.h>
#define P (1 << 14)
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp,NULL);
return (double) tp.tv_sec + (double)tp.tv_usec*1e-6;
}
void copymat_host_x(int m, int n, int* A, int *B)
{
int ix,iy,idx;
for(iy = 0; iy < n; iy++)
for(ix = 0; ix < m; ix++)
{
idx = iy*m + ix;
B[idx] = A[idx];
}
}
void copymat_host_y(int m, int n, int* A, int *B)
{
int ix,iy,idx;
for(ix = 0; ix < m; ix++)
for(iy = 0; iy < n; iy++)
{
idx = iy*m + ix;
B[idx] = A[idx];
}
}
int main(int argc, char** argv)
{
int *A, *B;
size_t m, n, nbytes;
double etime, start;
m = 1 << 14;
n = 1 << 14;
nbytes = m*n*sizeof(int);
printf("P = %d\n",P);
A = (int*) malloc(nbytes);
B = (int*) malloc(nbytes);
start = cpuSecond();
#if 0
copymat_host_x(m,n,A,B);
#else
copymat_host_y(m,n,A,B);
#endif
etime = cpuSecond() - start;
printf("Host %10.3g (s)\n",etime);
free(A);
free(B);
}
|
10,985 | /**********************************************************************
* DESCRIPTION:
* Parallel Concurrent Wave Equation w/ CUDA acceleration
* This program implements the concurrent wave equation
*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
#define PI 3.14159265
// CUDA
#define BLOCK_SIZE 1024
// function declaration
void check_param(void);
void update (void);
void printfinal (void);
// global variable
int nsteps, /* number of time steps */
tpoints, /* total points along string */
rcode; /* generic return code */
float values[MAXPOINTS + 2]; /* values at time t */
// CUDA
float *Vd;
/**********************************************************************
* Checks input values from parameters
*********************************************************************/
void check_param(void)
{
char tchar[20];
// check number of points, number of iterations
while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) {
printf("Enter number of points along vibrating string [%d-%d]: "
,MINPOINTS, MAXPOINTS);
scanf("%s", tchar);
tpoints = atoi(tchar);
if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS))
printf("Invalid. Please enter value between %d and %d\n",
MINPOINTS, MAXPOINTS);
}
while ((nsteps < 1) || (nsteps > MAXSTEPS)) {
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if ((nsteps < 1) || (nsteps > MAXSTEPS))
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
/**********************************************************************
* CUDA acceleration code block
* - Initialize points on line
* - Calculate new values using wave equation and update accordingly
*********************************************************************/
__global__ void runOnGPU(float *Vd, int tpoints, int nsteps)
{
// variable declaration
int i, k;
float x, fac, tmp;
float dtime, c, dx, tau, sqtau;
float value, newVal, oldVal;
// init_line()
fac = 2.0 * PI;
k = 1 + blockIdx.x * BLOCK_SIZE + threadIdx.x;
tmp = tpoints - 1;
x = (k - 1) / tmp;
value = sin(fac * x);
oldVal = value;
// do_math() + update()
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
if(k <= tpoints) {
// propagate for nstpes iterations
for (i = 1; i <= nsteps; i++) {
// check boundary
if ((k == 1) || (k == tpoints))
newVal = 0.0;
else
newVal = (2.0 * value) - oldVal + (sqtau * (-2.0) * value);
// update oldVal and value
oldVal = value;
value = newVal;
}
// update final value to Vd
Vd[k] = value;
}
}
/**********************************************************************
* Print final results
*********************************************************************/
void printfinal()
{
int i;
for (i = 1; i <= tpoints; i++) {
printf("%6.4f ", values[i]);
if (i % 10 == 0)
printf("\n");
}
}
/**********************************************************************
* Main program
*********************************************************************/
int main(int argc, char *argv[])
{
// variable declaration
int size;
int blockNum;
// parse arguments
sscanf(argv[1], "%d", &tpoints);
sscanf(argv[2], "%d", &nsteps);
check_param();
printf("Initializing points on the line...\n");
printf("Updating all points for all time steps...\n");
// cuda memory allocation
size = (tpoints + 1) * sizeof(float);
cudaMalloc((void**) &Vd, size);
// CUDA
if (tpoints % BLOCK_SIZE == 0) {
blockNum = tpoints / BLOCK_SIZE;
} else {
blockNum = tpoints / BLOCK_SIZE + 1;
}
runOnGPU<<<blockNum, BLOCK_SIZE>>>(Vd, tpoints, nsteps);
cudaMemcpy(values, Vd, size, cudaMemcpyDeviceToHost);
cudaFree(Vd);
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
return 0;
}
|
10,986 | #include<iostream>
#include<cstdlib>
#include<cmath>
#include<time.h>
using namespace std;
__global__ void matrixVectorMultiplication(int *a, int *b, int *c, int n)
{
int row=threadIdx.x+blockDim.x*blockIdx.x;
int sum=0;
if(row<n){
for(int j=0;j<n;j++)
{
sum=sum+a[(j*n)+row]*b[j];
}
c[row]=sum;
}
}
int main()
{
int *a,*b,*c;
int *a_dev,*b_dev,*c_dev;
int n=10;
a=new int[n*n];
b=new int[n];
c=new int[n];
int *d=new int[n];
int size=n*sizeof(int);
cudaMalloc(&a_dev,size*size);
cudaMalloc(&b_dev,size);
cudaMalloc(&c_dev,size);
cout<<"\n\nMatrix is :\n\n";
for(int i=0;i<n;i++)
{
for(int j=0;j<n;j++)
{
a[i*n+j]= i*n+j+1; //rand()%n;
cout<<a[i*n+j]<<" ";
}
b[i]= i+1; //rand()%n;
cout<<"\n";
// d[i]=a[i]+b[i];
}
cout<<"\n\nVector is: \n\n";
for(int i=0;i<n;i++)
cout<<b[i]<<" ";
cout<<"\n\n";
cudaMemcpy(a_dev,a,size*size,cudaMemcpyHostToDevice);
cudaMemcpy(b_dev,b,size,cudaMemcpyHostToDevice);
dim3 threadsPerBlock(n, n);
dim3 blocksPerGrid(1, 1);
if(n*n>512){
threadsPerBlock.x=512;
threadsPerBlock.y=512;
blocksPerGrid.x=ceil((double)n/(double)threadsPerBlock.x);
blocksPerGrid.y=ceil((double)n/(double)threadsPerBlock.y);
}
matrixVectorMultiplication<<<n/256 +1,256>>>(a_dev,b_dev,c_dev,n);
cudaMemcpy(c,c_dev,size,cudaMemcpyDeviceToHost);
//CPU matrixVector multiplication
clock_t t=clock();
int sum=0;
for(int row=0;row<n;row++)
{
sum=0;
for(int col=0;col<n;col++)
{
sum=sum+a[col*n+row]*b[col];
}
d[row]=sum;
}
t=clock()-t;
cout<<"\nCPU Time Elapsed: "<<((double)t); //((double)t)/CLOCKS_PER_SEC;
int error=0;
cout<<"\n\n";
for(int i=0;i<n;i++){
error+=d[i]-c[i];
cout<<" gpu "<<c[i]<<" CPU "<<d[i]<<endl;
}
cout<<"\nError : "<<error<<"\n\n";
return 0;
}
/*
Output
==9336== NVPROF is profiling process 9336, command: ./a.out
Matrix is :
1 2 3 4 5 6 7 8 9 10
11 12 13 14 15 16 17 18 19 20
21 22 23 24 25 26 27 28 29 30
31 32 33 34 35 36 37 38 39 40
41 42 43 44 45 46 47 48 49 50
51 52 53 54 55 56 57 58 59 60
61 62 63 64 65 66 67 68 69 70
71 72 73 74 75 76 77 78 79 80
81 82 83 84 85 86 87 88 89 90
91 92 93 94 95 96 97 98 99 100
Vector is:
1 2 3 4 5 6 7 8 9 10
CPU Time Elapsed: 2
gpu 3355 CPU 3355
gpu 3410 CPU 3410
gpu 3465 CPU 3465
gpu 3520 CPU 3520
gpu 3575 CPU 3575
gpu 3630 CPU 3630
gpu 3685 CPU 3685
gpu 3740 CPU 3740
gpu 3795 CPU 3795
gpu 3850 CPU 3850
Error : 0
==9336== Profiling application: ./a.out
==9336== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 48.61% 3.9360us 1 3.9360us 3.9360us 3.9360us matrixVectorMultiplication(int*, int*, int*, int)
30.84% 2.4970us 2 1.2480us 1.0250us 1.4720us [CUDA memcpy HtoD]
20.55% 1.6640us 1 1.6640us 1.6640us 1.6640us [CUDA memcpy DtoH]
API calls: 99.69% 142.59ms 3 47.529ms 4.8320us 142.57ms cudaMalloc
0.17% 239.79us 94 2.5500us 239ns 99.603us cuDeviceGetAttribute
0.07% 96.375us 1 96.375us 96.375us 96.375us cuDeviceTotalMem
0.03% 42.822us 3 14.274us 7.1920us 18.151us cudaMemcpy
0.02% 35.703us 1 35.703us 35.703us 35.703us cuDeviceGetName
0.02% 22.820us 1 22.820us 22.820us 22.820us cudaLaunch
0.00% 3.0470us 3 1.0150us 274ns 2.4380us cuDeviceGetCount
0.00% 1.7430us 2 871ns 296ns 1.4470us cuDeviceGet
0.00% 1.1430us 4 285ns 166ns 524ns cudaSetupArgument
0.00% 818ns 1 818ns 818ns 818ns cudaConfigureCall
*/ |
10,987 | #include "includes.h"
#define HANDLE_ERROR( err ) ( HandleError( err, __FILE__, __LINE__ ))
__global__ void Vector_Plus ( int *AG , int *BG , int *CG)
{
int id = blockDim.x*blockIdx.x+threadIdx.x ;
if ( id < N )
*(CG+id)=*(AG+id)+ *(BG+id);
} |
10,988 | #include "includes.h"
using namespace std;
#ifndef MAP_FILE
#define MAP_FILE MAP_SHARED
#endif
__global__ void mul_sub_grad(float* in1_x, float* in1_d, float* in2_x, float* in2_d, float* out, int in1ScalarCount, int in2ScalarCount) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < in1ScalarCount; tid += stride) {
int index = tid % in2ScalarCount;
in1_d[tid] += out[tid] * in2_x[index];
in2_d[tid] = in1_x[tid] * out[tid]; // this is the temp array, need to be reduced!
}
} |
10,989 | #include <stdio.h>
#include <stdlib.h>
#include <cufft.h>
const int DEFAULT_SIGNAL_LENGTH = 4096;
const int DEFAULT_FFT_TRIALS = 10000;
const int DEFAULT_META_TRIALS = 10;
const int BATCH_SIZE = 1;
int main(int argc, char **argv) {
int fft_trials = DEFAULT_FFT_TRIALS;
int meta_trials = DEFAULT_META_TRIALS;
printf("[INFO] META trials: %d\n", meta_trials);
printf("[INFO] FFT trials: %d\n", fft_trials);
long signal_length = DEFAULT_SIGNAL_LENGTH;
printf("[INFO] Signal Length: %ld\n", signal_length);
cufftComplex *h_original_signal;
cudaMallocHost((void **) &h_original_signal, sizeof(cufftComplex) * signal_length);
cufftComplex *d_original_signal, *d_applied_fft_signal;
cudaMalloc((void **) &d_original_signal, sizeof(cufftComplex) * signal_length);
cudaMalloc((void **) &d_applied_fft_signal, sizeof(cufftComplex) * signal_length);
/*
* generate random signal as original signal
*/
srand(0); // initialize random seed
for (int i = 0; i < signal_length; i++) {
h_original_signal[i].x = (float)rand() / RAND_MAX;
h_original_signal[i].y = 0.0;
}
cudaMemcpy(d_original_signal, h_original_signal, sizeof(cufftComplex) * signal_length, cudaMemcpyHostToDevice);
cufftHandle fft_plan;
cufftPlan1d(&fft_plan, signal_length, CUFFT_C2C, BATCH_SIZE);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float sum_of_elapsed_times = 0.0;
printf("[INFO] Run benchmark...\n");
for (int i = 0; i < meta_trials; i++) {
cudaEventRecord(start, 0);
for (int j = 0; j < fft_trials; j++) {
cufftExecC2C(fft_plan, d_original_signal, d_applied_fft_signal, CUFFT_FORWARD);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsed_time_ms;
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
float elapsed_time_sec = elapsed_time_ms / 1000.0;
sum_of_elapsed_times += elapsed_time_sec;
printf("%f sec\n", elapsed_time_sec);
}
printf("[INFO] Finished!\n");
printf("[INFO] Average: %lf sec\n", sum_of_elapsed_times / meta_trials);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
|
10,990 |
extern "C" __global__
void histgramMakerKernel_GlobalMemAtomics(int *d_histgram,int *d_partialHistgrams,
const unsigned char *d_text, int textLength) {
int *d_myHistgram = &d_partialHistgrams[blockIdx.x * 256];
int stride = gridDim.x * blockDim.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
for (int pos = gid; pos < textLength; pos += stride) {
int ch = d_text[pos];
atomicAdd(&d_myHistgram[ch], 1);
}
__syncthreads();
for (int histPos = threadIdx.x; histPos < 256; histPos += blockDim.x)
atomicAdd(&d_histgram[histPos], d_myHistgram[histPos]);
}
|
10,991 | #include "includes.h"
/*
* Read TODO items below
*/
__global__
__global__ void sharedMatmul(float *a, float *b, float *c, int n)
{
__shared__ float A_tile[32][32];
__shared__ float B_tile[32][32];
int width = gridDim.x*blockDim.x;
float acc = 0;
int i = blockIdx.x*32 + threadIdx.x;
int j = blockIdx.y*32 + threadIdx.y;
/* Accumulate C tile by tile. */
for (int tileIdx = 0; tileIdx < gridDim.x ; tileIdx+=1)
{
/* Load one tile of A and one tile of B into shared mem */
A_tile[threadIdx.y][ threadIdx.x] = a[j * width + tileIdx*32+threadIdx.x];
B_tile[threadIdx.y][threadIdx.x] = b[(tileIdx * 32 + threadIdx.y)* width+ i ];
__syncthreads();
/* Accumulate one tile of C from tiles of A and B in shared mem */
for (int k = 0 ;k < 32; k++)
{
acc += A_tile[threadIdx.y][k] * B_tile[k][threadIdx.x];
}
__syncthreads();
}
c[j * width + i ] = acc;
} |
10,992 | #include "includes.h"
__global__ void saxpy2D(float scalar, float * x, float * y)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if ( row < NX && col < NY ) // Make sure we don't do more work than we have data!
y[row*NY+col] = scalar * x[row*NY+col] + y[row*NY+col];
} |
10,993 | // Compile: nvcc -g -G -arch=sm_61 -std=c++11 assignment5-p3.cu -o assignment5-p3
// Execute: ./assignment5-p3
#include <cmath>
#include <iostream>
#include <sys/time.h>
#include <cuda.h>
#define SIZE 4096
#define THRESHOLD (0.000001)
#define BLOCK_SIZE 16
using std::cerr;
using std::cout;
using std::endl;
double rtclock() { // Seconds
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday(&Tp, &Tzp);
if (stat != 0) {
cout << "Error return from gettimeofday: " << stat << "\n";
}
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
}
__host__ void ATAonCPU(double* M, double* P) {
for (int k = 0; k < SIZE; k++) {
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++)
P[i*SIZE + j] += M[k*SIZE + i] * M[k*SIZE + j];
}
}
}
__host__ void check_result(double* Test, double* Ref) {
double maxdiff = 0, rel_diff = 0;
int numdiffs = 0;
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
rel_diff = (Test[i*SIZE + j] - Ref[i*SIZE + j]);
if (fabs(rel_diff) > THRESHOLD) {
numdiffs++;
if (rel_diff > maxdiff)
maxdiff = rel_diff;
}
}
}
if (numdiffs > 0)
cout << numdiffs << " Diffs found over THRESHOLD " << THRESHOLD << " Max Diff = " << maxdiff
<< "\n";
else
cout << "No differences found between base and test versions\n";
}
__host__ void reset(double* h_dev_out){
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
h_dev_out[i*SIZE + j] = 0;
}
}
}
__global__ void ATAkernel1(double* A, double* B) {
// TODO: Fill in
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
double val = 0;
for(int k = 0; k < SIZE; k++){
val += A[k*SIZE + i] * A[k*SIZE + j];
}
B[i*SIZE + j] = val;
}
__global__ void ATAkernel2(double* A, double* B) {
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int top_left_i = blockIdx.y * BLOCK_SIZE;
int top_left_j = blockIdx.x * BLOCK_SIZE;
double val = 0;
for(int block_num = 0; block_num < SIZE/BLOCK_SIZE; block_num++){
__shared__ double mat1[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double mat2[BLOCK_SIZE][BLOCK_SIZE];
mat1[threadIdx.y][threadIdx.x] = A[(block_num * BLOCK_SIZE + threadIdx.y)*SIZE + (top_left_i + threadIdx.x)];
mat2[threadIdx.y][threadIdx.x] = A[(block_num * BLOCK_SIZE + threadIdx.y)*SIZE + (top_left_j + threadIdx.x)];
__syncthreads();
for(int k = 0; k < BLOCK_SIZE; k++){
val += mat1[k][threadIdx.y] * mat2[k][threadIdx.x];
}
__syncthreads();
}
B[i*SIZE + j] = val;
}
__global__ void ATAkernel3(double* A, double* B) {
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int top_left_i = blockIdx.y * BLOCK_SIZE;
int top_left_j = blockIdx.x * BLOCK_SIZE;
double val = 0;
for(int block_num = 0; block_num < SIZE/BLOCK_SIZE; block_num++){
__shared__ double mat1[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double mat2[BLOCK_SIZE][BLOCK_SIZE];
mat1[threadIdx.y][threadIdx.x] = A[(block_num * BLOCK_SIZE + threadIdx.y)*SIZE + (top_left_i + threadIdx.x)];
mat2[threadIdx.y][threadIdx.x] = A[(block_num * BLOCK_SIZE + threadIdx.y)*SIZE + (top_left_j + threadIdx.x)];
__syncthreads();
for(int k = 0; k < BLOCK_SIZE; k += 4){
val += mat1[k][threadIdx.y] * mat2[k][threadIdx.x];
val += mat1[k+1][threadIdx.y] * mat2[k+1][threadIdx.x];
val += mat1[k+2][threadIdx.y] * mat2[k+2][threadIdx.x];
val += mat1[k+3][threadIdx.y] * mat2[k+3][threadIdx.x];
}
__syncthreads();
}
B[i*SIZE + j] = val;
}
int main() {
cout << "Matrix Size = " << SIZE << "\n";
double* h_in = new double[SIZE*SIZE];
double* h_cpu_out = new double[SIZE*SIZE];
double* h_dev_out = new double[SIZE*SIZE];
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
h_in[i*SIZE + j] = i * j * 0.25;
h_cpu_out[i*SIZE + j] = 0;
h_dev_out[i*SIZE + j] = 0;
}
}
double clkbegin = rtclock();
ATAonCPU(h_in, h_cpu_out);
double clkend = rtclock();
double cpu_time = clkend - clkbegin;
cout << "A^T.A on CPU: " << ((2.0 * SIZE * SIZE * SIZE) / cpu_time)
<< " GFLOPS; Time = " << cpu_time * 1000 << " msec" << endl;
cudaError_t status;
cudaEvent_t start, end;
float kernel_time;
double* d_in;
double* d_out;
// TODO: Fill in
// first kernel
size_t size = SIZE * SIZE * sizeof(double);
dim3 threadsPerBlock(32,32);
dim3 numBlocks(SIZE/threadsPerBlock.x, SIZE/threadsPerBlock.y);
status = cudaMalloc(&d_in, size);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMalloc() failed");
return EXIT_FAILURE;
}
status = cudaMalloc(&d_out, size);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMalloc() failed");
return EXIT_FAILURE;
}
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
status = cudaMemcpy(d_in, h_in, size, cudaMemcpyHostToDevice);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
ATAkernel1<<<numBlocks, threadsPerBlock>>>(d_in, d_out);
status = cudaMemcpy(h_dev_out, d_out, size, cudaMemcpyDeviceToHost);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&kernel_time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
check_result(h_cpu_out, h_dev_out);
cout << "A^T.A version1 on GPU: " << ((2.0 * SIZE * SIZE * SIZE) / (kernel_time * 1.0e-03))
<< " GFLOPS; Time = " << kernel_time << " msec" << endl;
// second kernel
reset(h_dev_out);
threadsPerBlock = dim3(BLOCK_SIZE,BLOCK_SIZE);
numBlocks = dim3(SIZE/threadsPerBlock.x, SIZE/threadsPerBlock.y);
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
status = cudaMemcpy(d_in, h_in, size, cudaMemcpyHostToDevice);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
ATAkernel2<<<numBlocks, threadsPerBlock>>>(d_in, d_out);
status = cudaMemcpy(h_dev_out, d_out, size, cudaMemcpyDeviceToHost);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&kernel_time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
check_result(h_cpu_out, h_dev_out);
cout << "A^T.A version2 on GPU: " << ((2.0 * SIZE * SIZE * SIZE) / (kernel_time * 1.0e-03))
<< " GFLOPS; Time = " << kernel_time << " msec" << endl;
// third kernel
reset(h_dev_out);
threadsPerBlock = dim3(BLOCK_SIZE,BLOCK_SIZE);
numBlocks = dim3(SIZE/threadsPerBlock.x, SIZE/threadsPerBlock.y);
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
status = cudaMemcpy(d_in, h_in, size, cudaMemcpyHostToDevice);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
ATAkernel3<<<numBlocks, threadsPerBlock>>>(d_in, d_out);
status = cudaMemcpy(h_dev_out, d_out, size, cudaMemcpyDeviceToHost);
if (status != cudaSuccess) {
fprintf(stderr, "cudaMemcpy() failed");
return EXIT_FAILURE;
}
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&kernel_time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
check_result(h_cpu_out, h_dev_out);
cout << "A^T.A version3 on GPU: " << ((2.0 * SIZE * SIZE * SIZE) / (kernel_time * 1.0e-03))
<< " GFLOPS; Time = " << kernel_time << " msec" << endl;
cudaFree(d_in);
cudaFree(d_out);
delete[] h_in;
delete[] h_cpu_out;
delete[] h_dev_out;
return EXIT_SUCCESS;
}
|
10,994 | /******************************************************************************
* PROGRAM: copyStruture
* PURPOSE: This program is a test which test the ability to transfer multilevel
* C++ structured data from host to device, modify them and transfer back.
*
*
* NAME: Vuong Pham-Duy.
* College student.
* Faculty of Computer Science and Technology.
* Ho Chi Minh University of Technology, Viet Nam.
* vuongpd95@gmail.com
*
* DATE: 5/10/2017
*
******************************************************************************/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", \
cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
__global__ void func() {
for(int i = 0; i < 1000; i++);
}
int main(int argc, char *argv[])
{
dim3 thread_per_block(1024);
int num_block = 1024;
func<<<num_block, thread_per_block>>>();
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
return 0;
}
|
10,995 | #include <cuda.h>
#include <stdio.h>
#include <math.h>
__global__ void interp2(double *imgout, double *fCol, double *fRow, double *imgin, int rows, int cols)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= rows || j >= cols)
return;
double i_o_f = fCol[i * cols + j];
double j_o_f = fRow[i * cols + j];
i_o_f = fmax(1.0, fmin(i_o_f, (double) cols));
j_o_f = fmax(1.0, fmin(j_o_f, (double) rows));
//we will interpolate x direction first, giving R1 and R2//
double R1 = (floor(i_o_f + 1) - i_o_f) * imgin[(int) floor(i_o_f - 1) * cols + (int) floor(j_o_f - 1)] + (i_o_f - floor(i_o_f)) * imgin[(int) ceil(i_o_f - 1) * cols + (int) floor(j_o_f - 1)];
double R2 = (floor(i_o_f + 1) - i_o_f) * imgin[(int) floor(i_o_f - 1) * cols + (int) ceil(j_o_f - 1)] + (i_o_f - floor(i_o_f)) * imgin[(int) ceil(i_o_f - 1) * cols + (int) ceil(j_o_f - 1)];
//now finish//
imgout[i * cols + j] = (floor(j_o_f + 1) - j_o_f) * R1 + (j_o_f - floor(j_o_f)) * R2;
} |
10,996 | /**
* @file : constant_eg1.cu
* @brief : Examples of using constant memory for CUDA, with smart pointers
* @details : constant memory for CUDA examples
*
* @author : Ernest Yeung <ernestyalumni@gmail.com>
* @date : 20170103
* @ref : http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#device-memory-specifiers
*
* https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=ernestsaveschristmas%2bpaypal%40gmail%2ecom&lc=US&item_name=ernestyalumni¤cy_code=USD&bn=PP%2dDonationsBF%3abtn_donateCC_LG%2egif%3aNonHosted
*
* which won't go through a 3rd. party such as indiegogo, kickstarter, patreon.
* Otherwise, I receive emails and messages on how all my (free) material on
* physics, math, and engineering have helped students with their studies,
* and I know what it's like to not have money as a student, but love physics
* (or math, sciences, etc.), so I am committed to keeping all my material
* open-source and free, whether or not
* sufficiently crowdfunded, under the open-source MIT license:
* feel free to copy, edit, paste, make your own versions, share, use as you wish.
* Just don't be an asshole and not give credit where credit is due.
* Peace out, never give up! -EY
*
* */
/*
* COMPILATION TIP
* nvcc constant_eg.cu -o constant_eg
*
* */
#include <iostream>
#include <memory> // std::unique_ptr
#include <array> // std::array
#include <math.h> // std::exp
/*
// custom deleter as lambda function for float arrays (RR=real numbers=floats)
auto del_RRarr_lambda=[&](float* ptr) { cudaFree(ptr); };
// custom deleter as lambda function for int arrays (ZZ=integers=ints)
auto del_ZZarr_lambda=[&](int* ptr) { cudaFree(ptr); };
/* error: expected a type specifier // constant_eg1.cu(39): error: variable "del_RRarr_lambda" is not a type name
* obtained when trying to initialize in type declaration in struct */
/*
struct S1_unique_lambda {
// (data) members
// std::unique_ptr<float[],decltype(del_RRarr_lambda)> dev_X_uptr(nullptr, del_RRarr_lambda);
// std::unique_ptr<int[], decltype(del_ZZarr_lambda)> dev_S_uptr(nullptr, del_ZZarr_lambda);
// std::unique_ptr<float[],decltype(del_RRarr_lambda)> dev_X_uptr;
// std::unique_ptr<int[], decltype(del_ZZarr_lambda)> dev_S_uptr;
// std::unique_ptr<float[]> dev_X_uptr; // error:
/* error: no operator "=" matches these operands
operand types are: std::unique_ptr<float [], std::default_delete<float []>> = std::unique_ptr<float [], lambda [](float *)->void>
*/
/*
std::unique_ptr<int[]> dev_S_uptr;
size_t Lx; // 8 bytes
size_t Ly; // 8 bytes
unsigned long long Nx; // 8 bytes
unsigned long long Ny; // 8 bytes
// constructor
S1_unique_lambda(size_t Lx,size_t Ly,unsigned long long Nx,unsigned long long Ny);
// move constructor
S1_unique_lambda(S1_unique_lambda &&);
// operator overload assignment =
S1_unique_lambda &operator=(S1_unique_lambda &&);
};
// constructor
S1_unique_lambda::S1_unique_lambda(size_t Lx,size_t Ly,unsigned long long Nx,unsigned long long Ny) :
Lx {Lx}, Ly {Ly}, Nx {Nx}, Ny{Ny}
{
std::unique_ptr<float[],decltype(del_RRarr_lambda)> dev_X_uptr_new(nullptr,del_RRarr_lambda);
dev_X_uptr = std::move(dev_X_uptr_new);
std::unique_ptr<int[],decltype(del_ZZarr_lambda)> dev_S_uptr_new(nullptr,del_ZZarr_lambda);
dev_S_uptr = std::move(dev_S_uptr_new);
};
// move constructor
S1_unique_lambda::S1_unique_lambda(S1_unique_lambda&& old_struct) :
Lx { old_struct.Lx }, Ly { old_struct.Ly}, Nx { old_struct.Nx }, Ny { old_struct.Ny },
dev_X_uptr{std::move(old_struct.dev_X_uptr) }, dev_S_uptr{std::move(old_struct.dev_S_uptr) } {};
// operator overload assignment =
S1_unique_lambda & S1_unique_lambda::operator=(S1_unique_lambda && old_struct) {
Lx = old_struct.Lx;
Ly = old_struct.Ly;
Nx = old_struct.Nx;
Ny = old_struct.Ny;
// unique_ptrs moved
dev_X_uptr = std::move( old_struct.dev_X_uptr) ;
dev_S_uptr = std::move( old_struct.dev_S_uptr);
return *this;
};
*/
// custom deleter as struct for float arrays (RR=real numbers=floats)
struct del_RRarr_struct {
void operator()(float* ptr) { cudaFree(ptr); }
};
// custom deleter as struct for int arrays (ZZ=integers=ints)
struct del_ZZarr_struct {
void operator()(int* ptr) { cudaFree(ptr); }
};
struct S1_unique_struct {
// (data) members , no dynamic initialization, but then no suitable constructor
// std::unique_ptr<float[],del_RRarr_struct> dev_X_uptr; //(nullptr, del_RRarr_struct());
// std::unique_ptr<int[], del_ZZarr_struct> dev_S_uptr; // (nullptr, del_ZZarr_struct());
std::array<float, 17> transProb;
size_t Lx; // 8 bytes
size_t Ly; // 8 bytes
unsigned long long Nx; // 8 bytes
unsigned long long Ny; // 8 bytes
/* no dynamic initialization
// default constructor, needed by __constant__
S1_unique_struct();
// constructor
S1_unique_struct(size_t Lx, size_t Ly,unsigned long long Nx, unsigned long long Ny);
// move constructor
S1_unique_struct(S1_unique_struct &&);
// operator overload assignment =
S1_unique_struct &operator=(S1_unique_struct &&);
*/
};
/* no dynamic initialization
// default constructor
S1_unique_struct::S1_unique_struct() {
/* std::unique_ptr<float[],del_RRarr_struct> dev_X_uptr_new(nullptr,del_RRarr_struct());
dev_X_uptr = std::move(dev_X_uptr_new);
std::unique_ptr<int[],del_ZZarr_struct> dev_S_uptr_new(nullptr,del_ZZarr_struct());
dev_S_uptr = std::move(dev_S_uptr_new);
};
/*
* error: dynamic initialization is not supported for __device__, __constant__ and __shared__ variables.
*/
// constructor
/*
S1_unique_struct::S1_unique_struct(size_t Lx,size_t Ly,unsigned long long Nx,unsigned long long Ny) :
Lx {Lx}, Ly {Ly}, Nx {Nx}, Ny{Ny}
{
std::unique_ptr<float[],del_RRarr_struct> dev_X_uptr_new(nullptr,del_RRarr_struct());
dev_X_uptr = std::move(dev_X_uptr_new);
std::unique_ptr<int[],del_ZZarr_struct> dev_S_uptr_new(nullptr,del_ZZarr_struct());
dev_S_uptr = std::move(dev_S_uptr_new);
};
// move constructor
S1_unique_struct::S1_unique_struct(S1_unique_struct&& old_struct) :
Lx { old_struct.Lx }, Ly { old_struct.Ly}, Nx { old_struct.Nx }, Ny { old_struct.Ny },
dev_X_uptr{std::move(old_struct.dev_X_uptr) }, dev_S_uptr{std::move(old_struct.dev_S_uptr) } {};
// operator overload assignment =
S1_unique_struct & S1_unique_struct::operator=(S1_unique_struct && old_struct) {
Lx = old_struct.Lx;
Ly = old_struct.Ly;
Nx = old_struct.Nx;
Ny = old_struct.Ny;
// unique_ptrs moved
dev_X_uptr = std::move( old_struct.dev_X_uptr) ;
dev_S_uptr = std::move( old_struct.dev_S_uptr);
return *this;
};
*/
__constant__ S1_unique_struct constS1_uniq_struct;
/*
* struct with a constructor (but very trivial, or what CUDA doc calls "empty"
* */
struct S2 {
float E;
float M;
float T;
// default constructor
// S2();
// constructors
// S2(float, float, float);
// S2(float); // only given T
};
// error: dynamic initialization is not supported for __device__, __constant__ and __shared__ variables.
/*
S2::S2() : E {0.f}, M {0.f}, T {0.f} {};
S2::S2(float E, float M, float T) : E{E}, M{M}, T{T} {};
S2::S2(float T) : E{0.f}, M{0.f}, T{T} {};
*/
// error: dynamic initialization is not supported for __device__, __constant__ and __shared__ variables.
//S2::S2() { E = 0.f; M=0.f; T=0.f; } ;
__constant__ S2 constS2 ;
struct S3 {
// (data) members , no dynamic initialization, but then no suitable constructor
std::array<float, 17> transProb;
size_t Lx; // 8 bytes
size_t Ly; // 8 bytes
unsigned long long Nx; // 8 bytes
unsigned long long Ny; // 8 bytes
float J;
// getting function
float get_by_DeltaE(int DeltaE) {
return transProb[DeltaE+8]; }
};
__constant__ S3 constS3;
int main(int argc, char* argv[]) {
// std::cout << " sizeof S1_unique_lambda : " << sizeof(S1_unique_lambda) << std::endl;
std::cout << " sizeof S1_unique_struct : " << sizeof(S1_unique_struct) << std::endl;
/* "boilerplate" test values */
// on host
std::array<float,17> h_transProb;
for (int i=0; i<17; i++) {
h_transProb[i] = std::exp( (float) i) ;
}
S1_unique_struct s1_uniq_struct { h_transProb, 256,128,64,32 };
cudaMemcpyToSymbol(constS1_uniq_struct, &s1_uniq_struct, sizeof(S1_unique_struct));
// this works as well
// cudaMemcpyToSymbol(constS1_uniq_struct, &s1_uniq_struct, sizeof(s1_uniq_struct));
/* sanity check */
S1_unique_struct h_s1_uniq_struct;
cudaMemcpyFromSymbol(&h_s1_uniq_struct, constS1_uniq_struct, sizeof(S1_unique_struct));
std::cout << " Lx : " << h_s1_uniq_struct.Lx << " Ly : " << h_s1_uniq_struct.Ly <<
" Nx : " << h_s1_uniq_struct.Nx << " Ny : " << h_s1_uniq_struct.Ny << std::endl;
for (int i =0; i<17; i++) {
std::cout << h_s1_uniq_struct.transProb[i] << " ";
}
std::cout << std::endl << std::endl;
/* "boilerplate" test values */
// on host
S2 hS2 { 1.f };
cudaMemcpyToSymbol(constS2, &hS2, sizeof(S2));
/* ********** testing struct S3 ********** */
S3 hS3 { h_transProb, 256,128,64,32, 1.0f } ;
cudaMemcpyToSymbol(constS3, &hS3, sizeof(S3));
}
|
10,997 | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
__global__ void VecAdd(int n, const float *A, const float *B, float* C) {
/********************************************************************
*
* Compute C = A + B
* where A is a (1 * n) vector
* where B is a (1 * n) vector
* where C is a (1 * n) vector
*
********************************************************************/
// INSERT KERNEL CODE HERE
int i = threadIdx.x + blockDim.x * blockIdx.x;
if(i<n) C[i] = A[i] + B[i];
//end of added code hdskfhasdkf
}
void basicVecAdd( float *A, float *B, float *C, int n)
{
// Initialize thread block and kernel grid dimensions ---------------------
// cudaDeviceProp p;
// int deviceId;
// cudaGetDevice(&deviceId);
// cudaGetDeviceProperties(&p, deviceId);
const unsigned int BLOCK_SIZE = 256;
//INSERT CODE HERE
dim3 DimGrid((n-1/BLOCK_SIZE) + 1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
/* if (p.concurrentManagedAccess)
{
cudaMemPrefetchAsync(A, n*sizeof(float), deviceId);
cudaMemPrefetchAsync(B, n*sizeof(float), deviceId);
cudaMemPrefetchAsync(C, n*sizeof(float), deviceId);
} */
VecAdd<<<DimGrid,DimBlock>>>(n, A, B, C);
/* if (p.concurrentManagedAccess)
{
cudaMemPrefetchAsync(C,n*sizeof(float), cudaCpuDeviceId);
}*/
}
|
10,998 | //
// Created by mustafa on 6/3/20.
//
#include "../headers/MC_Point.cuh"
__device__ __host__ MC_Point::MC_Point(float const x, float const y, float const z) {
this->_x = x;
this->_y = y;
this->_z = z;
}
__device__ __host__ float MC_Point::x() const { return this->_x; }
__device__ __host__ float MC_Point::y() const { return this->_y; }
__device__ __host__ float MC_Point::z() const { return this->_z; }
__device__ __host__ MC_Point MC_Point::operator-(MC_Point const &other) const {
float result_x = this->_x - other.x();
float result_y = this->_y - other.y();
float result_z = this->_z - other.z();
return {result_x, result_y, result_z};
}
__device__ __host__ MC_Point MC_Point::operator+(MC_Point const &other) const {
float result_x = this->_x + other.x();
float result_y = this->_y + other.y();
float result_z = this->_z + other.z();
return {result_x, result_y, result_z};
}
__device__ __host__ MC_Point MC_Point::operator*(float const &other) const {
float result_x = this->_x * other;
float result_y = this->_y * other;
float result_z = this->_z * other;
return {result_x, result_y, result_z};
} |
10,999 | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <iostream>
#include <ctype.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda.h"
#define CEIL(a,b) ((a+b-1)/b)
#define SWAP(a,b,t) t=b; b=a; a=t;
#define PI 3.1415926
#define EDGE 0
#define NOEDGE 255
#define DATAKB(bytes) (bytes/1024)
#define DATAMB(bytes) (bytes/1024/1024)
#define DATABW(bytes,timems) ((float)bytes/(timems * 1.024*1024.0*1024.0))
#define MAXSTREAMS 32
typedef unsigned char uch;
typedef unsigned long ul;
typedef unsigned int ui;
uch *TheImg, *CopyImg; // Where images are stored in CPU
int ThreshLo=50, ThreshHi=100; // "Edge" vs. "No Edge" thresholds
// Where images and temporary results are stored in GPU
uch *GPUImg, *GPUResultImg;
double *GPUBWImg, *GPUGaussImg, *GPUGradient, *GPUTheta;
struct ImgProp{
ui Hpixels;
ui Vpixels;
uch HeaderInfo[54];
ul Hbytes;
} ip;
#define IPHB ip.Hbytes
#define IPH ip.Hpixels
#define IPV ip.Vpixels
#define IMAGESIZE (IPHB*IPV)
#define IMAGEPIX (IPH*IPV)
__global__
void Hflip3S(uch *ImgDst, uch *ImgSrc, ui Hpixels, ui Vpixels, ui RowBytes, ui StartRow)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYrow = StartRow + blockIdx.y;
ui MYcol = MYbid*ThrPerBlk + MYtid;
if (MYcol >= Hpixels) return; // col out of range
if (MYrow >= Vpixels) return; // row out of range
ui MYmirrorcol = Hpixels - 1 - MYcol;
ui MYoffset = MYrow * RowBytes;
ui MYsrcIndex = MYoffset + 3 * MYcol;
ui MYdstIndex = MYoffset + 3 * MYmirrorcol;
// swap pixels RGB @MYcol , @MYmirrorcol
ImgDst[MYdstIndex] = ImgSrc[MYsrcIndex];
ImgDst[MYdstIndex + 1] = ImgSrc[MYsrcIndex + 1];
ImgDst[MYdstIndex + 2] = ImgSrc[MYsrcIndex + 2];
}
__global__
void BWKernel2S(double *ImgBW, uch *ImgGPU, ui Hpixels, ui Vpixels, ui RowBytes, ui StartRow)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui R, G, B;
ui MYrow = StartRow + blockIdx.y;
ui MYcol = MYbid*ThrPerBlk + MYtid;
if (MYcol >= Hpixels) return; // col out of range
if (MYrow >= Vpixels) return; // row out of range
ui MYsrcIndex = MYrow * RowBytes + 3 * MYcol;
ui MYpixIndex = MYrow * Hpixels + MYcol;
B = (ui)ImgGPU[MYsrcIndex];
G = (ui)ImgGPU[MYsrcIndex + 1];
R = (ui)ImgGPU[MYsrcIndex + 2];
ImgBW[MYpixIndex] = (double)(R + G + B) * 0.333333;
}
__constant__
double GaussC[5][5] = { { 2, 4, 5, 4, 2 },
{ 4, 9, 12, 9, 4 },
{ 5, 12, 15, 12, 5 },
{ 4, 9, 12, 9, 4 },
{ 2, 4, 5, 4, 2 } };
// Processes multiple rows (as many as blockIdx.y, starting at MYrow)
__global__
void GaussKernel3S(double *ImgGauss, double *ImgBW, ui Hpixels, ui Vpixels, ui StartRow)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
int row, col, indx, i, j;
double G;
ui MYrow = StartRow+blockIdx.y;
ui MYcol = MYbid*ThrPerBlk + MYtid;
if (MYcol >= Hpixels) return; // col out of range
if (MYrow >= Vpixels) return; // row out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<2) || (MYrow>Vpixels - 3) || (MYcol<2) || (MYcol>Hpixels - 3)) {
ImgGauss[MYpixIndex] = 0.0;
return;
}else{
G = 0.0;
for (i = -2; i <= 2; i++) {
for (j = -2; j <= 2; j++) {
row = MYrow + i;
col = MYcol + j;
indx = row*Hpixels + col;
G += (ImgBW[indx] * GaussC[i + 2][j + 2]); // use constant memory
}
}
ImgGauss[MYpixIndex] = G * 0.0062893; // (1/159)=0.0062893
}
}
__device__
double Gx[3][3] = { { -1, 0, 1 },
{ -2, 0, 2 },
{ -1, 0, 1 } };
__device__
double Gy[3][3] = { { -1, -2, -1 },
{ 0, 0, 0 },
{ 1, 2, 1 } };
__global__
void SobelKernel2S(double *ImgGrad, double *ImgTheta, double *ImgGauss, ui Hpixels, ui Vpixels, ui StartRow)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
int indx;
double GX,GY;
ui MYrow = StartRow + blockIdx.y;
ui MYcol = MYbid*ThrPerBlk + MYtid;
if (MYcol >= Hpixels) return; // col out of range
if (MYrow >= Vpixels) return; // row out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<1) || (MYrow>Vpixels - 2) || (MYcol<1) || (MYcol>Hpixels - 2)){
ImgGrad[MYpixIndex] = 0.0;
ImgTheta[MYpixIndex] = 0.0;
return;
}else{
indx=(MYrow-1)*Hpixels + MYcol-1;
GX = (-ImgGauss[indx-1]+ImgGauss[indx+1]);
GY = (-ImgGauss[indx-1]-2*ImgGauss[indx]-ImgGauss[indx+1]);
indx+=Hpixels;
GX += (-2*ImgGauss[indx-1]+2*ImgGauss[indx+1]);
indx+=Hpixels;
GX += (-ImgGauss[indx-1]+ImgGauss[indx+1]);
GY += (ImgGauss[indx-1]+2*ImgGauss[indx]+ImgGauss[indx+1]);
ImgGrad[MYpixIndex] = sqrt(GX*GX + GY*GY);
ImgTheta[MYpixIndex] = atan(GX / GY)*57.2957795; // 180.0/PI = 57.2957795;
}
}
// Kernel that calculates the threshold image from Gradient, Theta
// resulting image has an RGB for each pixel, same RGB for each pixel
__global__
void ThresholdKernel2S(uch *ImgResult, double *ImgGrad, double *ImgTheta, ui Hpixels, ui Vpixels, ui RowBytes, ui ThreshLo, ui ThreshHi, ui StartRow)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
unsigned char PIXVAL;
double L, H, G, T;
ui MYrow = StartRow + blockIdx.y;
ui MYcol = MYbid*ThrPerBlk + MYtid;
if (MYcol >= Hpixels) return; // col out of range
if (MYrow >= Vpixels) return; // row out of range
ui MYresultIndex = MYrow * RowBytes + 3 * MYcol;
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<1) || (MYrow>Vpixels - 2) || (MYcol<1) || (MYcol>Hpixels - 2)){
ImgResult[MYresultIndex] = NOEDGE;
ImgResult[MYresultIndex + 1] = NOEDGE;
ImgResult[MYresultIndex + 2] = NOEDGE;
return;
}else{
L = (double)ThreshLo; H = (double)ThreshHi;
G = ImgGrad[MYpixIndex];
PIXVAL = NOEDGE;
if (G <= L){ // no edge
PIXVAL = NOEDGE;
}else if (G >= H){ // edge
PIXVAL = EDGE;
}else{
T = ImgTheta[MYpixIndex];
if ((T<-67.5) || (T>67.5)){
// Look at left and right: [row][col-1] and [row][col+1]
PIXVAL = ((ImgGrad[MYpixIndex - 1]>H) || (ImgGrad[MYpixIndex + 1]>H)) ? EDGE : NOEDGE;
}
else if ((T >= -22.5) && (T <= 22.5)){
// Look at top and bottom: [row-1][col] and [row+1][col]
PIXVAL = ((ImgGrad[MYpixIndex - Hpixels]>H) || (ImgGrad[MYpixIndex + Hpixels]>H)) ? EDGE : NOEDGE;
}
else if ((T>22.5) && (T <= 67.5)){
// Look at upper right, lower left: [row-1][col+1] and [row+1][col-1]
PIXVAL = ((ImgGrad[MYpixIndex - Hpixels + 1]>H) || (ImgGrad[MYpixIndex + Hpixels - 1]>H)) ? EDGE : NOEDGE;
}
else if ((T >= -67.5) && (T<-22.5)){
// Look at upper left, lower right: [row-1][col-1] and [row+1][col+1]
PIXVAL = ((ImgGrad[MYpixIndex - Hpixels - 1]>H) || (ImgGrad[MYpixIndex + Hpixels + 1]>H)) ? EDGE : NOEDGE;
}
}
ImgResult[MYresultIndex] = PIXVAL;
ImgResult[MYresultIndex + 1] = PIXVAL;
ImgResult[MYresultIndex + 2] = PIXVAL;
}
}
// helper function that wraps CUDA API calls, reports any error and exits
void chkCUDAErr(cudaError_t error_id)
{
if (error_id != CUDA_SUCCESS){
printf("CUDA ERROR :::%s\n", cudaGetErrorString(error_id));
exit(EXIT_FAILURE);
}
}
// Read a 24-bit/pixel BMP file into a 1D linear array.
// Allocate memory to store the 1D image and return its pointer.
uch *ReadBMPlin(char* fn)
{
static uch *Img;
FILE* f = fopen(fn, "rb");
if (f == NULL) { printf("\n\n%s NOT FOUND\n\n", fn); exit(EXIT_FAILURE); }
uch HeaderInfo[54];
fread(HeaderInfo, sizeof(uch), 54, f); // read the 54-byte header
// extract image height and width from header
int width = *(int*)&HeaderInfo[18]; ip.Hpixels = width;
int height = *(int*)&HeaderInfo[22]; ip.Vpixels = height;
int RowBytes = (width * 3 + 3) & (~3); ip.Hbytes = RowBytes;
//save header for re-use
memcpy(ip.HeaderInfo, HeaderInfo, 54);
printf("\n Input File name: %17s (%u x %u) File Size=%u", fn,
ip.Hpixels, ip.Vpixels, IMAGESIZE);
// allocate memory to store the main image (1 Dimensional array)
Img = (uch *)malloc(IMAGESIZE);
if (Img == NULL) return Img; // Cannot allocate memory
// read the image from disk
fread(Img, sizeof(uch), IMAGESIZE, f);
fclose(f);
return Img;
}
// Read a 24-bit/pixel BMP file into a 1D linear array.
// Allocate PINNED memory to store the 1D image and return its pointer.
uch *ReadBMPlinPINNED(char* fn)
{
static uch *Img;
void *p;
cudaError_t AllocErr;
FILE* f = fopen(fn, "rb");
if (f == NULL){ printf("\n\n%s NOT FOUND\n\n", fn); exit(EXIT_FAILURE); }
uch HeaderInfo[54];
fread(HeaderInfo, sizeof(uch), 54, f); // read the 54-byte header
// extract image height and width from header
int width = *(int*)&HeaderInfo[18]; ip.Hpixels = width;
int height = *(int*)&HeaderInfo[22]; ip.Vpixels = height;
int RowBytes = (width * 3 + 3) & (~3); ip.Hbytes = RowBytes;
//save header for re-use
memcpy(ip.HeaderInfo, HeaderInfo,54);
printf("\n Input File name: %17s (%u x %u) File Size=%u", fn, IPH, IPV, IMAGESIZE);
// allocate PINNED memory to store the main 1D image
//Img = (uch *)malloc(IMAGESIZE);
AllocErr=cudaMallocHost((void**)&p, IMAGESIZE);
if (AllocErr == cudaErrorMemoryAllocation){
Img=NULL; // Cannot allocate memory
return Img;
}else{
Img=(uch *)p;
}
// read the image from disk
fread(Img, sizeof(uch), IMAGESIZE, f);
fclose(f);
return Img;
}
// Write the 1D linear-memory stored image into file.
void WriteBMPlin(uch *Img, char* fn)
{
FILE* f = fopen(fn, "wb");
if (f == NULL){ printf("\n\nFILE CREATION ERROR: %s\n\n", fn); exit(1); }
//write header
fwrite(ip.HeaderInfo, sizeof(uch), 54, f);
//write data
fwrite(Img, sizeof(uch), IMAGESIZE, f);
printf("\nOutput File name: %17s (%u x %u) File Size=%u", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE);
fclose(f);
}
// Print a separator between messages
void PrintSep()
{
printf("-----------------------------------------------------------------------------------------\n");
}
int main(int argc, char **argv)
{
char Operation = 'E';
float totalTime, Time12, Time23, Time34; // GPU code run times
cudaError_t cudaStatus;
cudaEvent_t time1, time2, time3, time4;
char InputFileName[255], OutputFileName[255], ProgName[255];
ui BlkPerRow, ThrPerBlk=256;
cudaDeviceProp GPUprop;
void *GPUptr; // Pointer to the bulk-allocated GPU memory
ul GPUtotalBufferSize;
ul SupportedKBlocks, SupportedMBlocks, MaxThrPerBlk; char SupportedBlocks[100];
int deviceOverlap, SMcount;
ul ConstMem, GlobalMem;
ui NumberOfStreams=1,RowsPerStream;
cudaStream_t stream[MAXSTREAMS];
void *p; // temporary pointer for the pinned memory
ui i;
strcpy(ProgName, "imGStr");
switch (argc){
case 6: NumberOfStreams = atoi(argv[5]);
case 5: ThrPerBlk = atoi(argv[4]);
case 4: Operation = toupper(argv[3][0]);
case 3: strcpy(InputFileName, argv[1]);
strcpy(OutputFileName, argv[2]);
break;
default: printf("\n\nUsage: %s InputFilename OutputFilename [H/E] [ThrPerBlk] [NumberOfStreams:0-32]", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp E 256 3 --- which means 3 streams", ProgName);
printf("\n\n (Note: 0 means Synchronous (no streaming) ... H=Hor. flip , E=Edge detection\n");
exit(EXIT_FAILURE);
}
// Operation is 'H' for Horizontal flip and 'E' for Edge Detection
if ((Operation != 'E') && (Operation != 'H')) {
printf("Invalid operation '%c'. Must be 'H', or 'E' ... \n", Operation);
exit(EXIT_FAILURE);
}
// Parse the "Threads per block" parameter
if ((ThrPerBlk < 32) || (ThrPerBlk > 1024)) {
printf("Invalid ThrPerBlk option '%u'. Must be between 32 and 1024. \n", ThrPerBlk);
exit(EXIT_FAILURE);
}
// Determine the number of streams
if (NumberOfStreams > 32) {
printf("Invalid NumberOfStreams option (%u). Must be between 0 and 32. \n", NumberOfStreams);
printf("0 means NO STREAMING (i.e., synchronous)\n");
exit(EXIT_FAILURE);
}
if (NumberOfStreams == 0) {
// 0 means, synchronous. No streams.
//printf("NumberOfStreams=0 ... Executing in non-streaming (synchronous) mode.\n");
TheImg = ReadBMPlin(InputFileName); // Read the input image into a regular memory
if (TheImg == NULL) {
printf("Cannot allocate memory for the input image...\n");
exit(EXIT_FAILURE);
}
CopyImg = (uch *)malloc(IMAGESIZE);
if (CopyImg == NULL) {
printf("Cannot allocate memory for the input image...\n");
free(TheImg);
exit(EXIT_FAILURE);
}
}else{
// Create CPU memory to store the input and output images
TheImg = ReadBMPlinPINNED(InputFileName); // Read the input image into a PINNED memory
if (TheImg == NULL){
printf("Cannot allocate PINNED memory for the input image...\n");
exit(EXIT_FAILURE);
}
// Allocate pinned memory for the CopyImg
cudaStatus=cudaMallocHost((void**)&p, IMAGESIZE);
if (cudaStatus == cudaErrorMemoryAllocation){
printf("Cannot allocate PINNED memory for the CopyImg ...\n");
cudaFreeHost(TheImg);
exit(EXIT_FAILURE);
}else{
CopyImg=(uch *)p;
}
}
// Choose which GPU to run on, change this on a multi-GPU system.
int NumGPUs = 0;
cudaGetDeviceCount(&NumGPUs);
if (NumGPUs == 0){
printf("\nNo CUDA Device is available\n");
//cudaFreeHost(TheImg);
//cudaFreeHost(CopyImg);
exit(EXIT_FAILURE);
}
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
//cudaFreeHost(TheImg);
//cudaFreeHost(CopyImg);
exit(EXIT_FAILURE);
}
cudaGetDeviceProperties(&GPUprop, 0);
SupportedKBlocks = (ui) GPUprop.maxGridSize[0] * (ui) GPUprop.maxGridSize[1] * (ui )GPUprop.maxGridSize[2]/1024;
SupportedMBlocks = SupportedKBlocks / 1024;
sprintf(SupportedBlocks, "%u %c", (SupportedMBlocks>=5) ? SupportedMBlocks : SupportedKBlocks, (SupportedMBlocks>=5) ? 'M':'K');
MaxThrPerBlk = (ui)GPUprop.maxThreadsPerBlock;
deviceOverlap = GPUprop.deviceOverlap; // Shows whether the device can transfer in both directions simultaneously
SMcount = GPUprop.multiProcessorCount;
ConstMem = (ul) GPUprop.totalConstMem;
GlobalMem = (ul) GPUprop.totalGlobalMem;
// CREATE EVENTS
cudaEventCreate(&time1);
cudaEventCreate(&time2);
cudaEventCreate(&time3);
cudaEventCreate(&time4);
// CREATE STREAMS
if(NumberOfStreams != 0){
for (i = 0; i < NumberOfStreams; i++) {
chkCUDAErr(cudaStreamCreate(&stream[i]));
}
}
//printf("%u streams created\n",NumberOfStreams);
cudaEventRecord(time1, 0); // Time stamp at the start of the GPU transfer
// Allocate GPU buffer for the input and output images and the imtermediate results
GPUtotalBufferSize = 4 * sizeof(double)*IMAGEPIX + 2 * sizeof(uch)*IMAGESIZE;
cudaStatus = cudaMalloc((void**)&GPUptr, GPUtotalBufferSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed! Can't allocate GPU memory\n");
//cudaFreeHost(TheImg);
//cudaFreeHost(CopyImg);
exit(EXIT_FAILURE);
}
GPUImg = (uch *)GPUptr;
GPUResultImg = GPUImg + IMAGESIZE;
GPUBWImg = (double *)(GPUResultImg + IMAGESIZE);
GPUGaussImg = GPUBWImg + IMAGEPIX;
GPUGradient = GPUGaussImg + IMAGEPIX;
GPUTheta = GPUGradient + IMAGEPIX;
BlkPerRow = CEIL(IPH, ThrPerBlk);
RowsPerStream = ((NumberOfStreams == 0) ? IPV : CEIL(IPV, NumberOfStreams));
dim3 dimGrid2D(BlkPerRow, IPV); // to process the entire stream
dim3 dimGrid2DS(BlkPerRow, RowsPerStream); // to process the rows of one stream
dim3 dimGrid2DS1(BlkPerRow, 1);
dim3 dimGrid2DS2(BlkPerRow, 2);
dim3 dimGrid2DS4(BlkPerRow, 4);
dim3 dimGrid2DS6(BlkPerRow, 6);
dim3 dimGrid2DS10(BlkPerRow, 10);
dim3 dimGrid2DSm1(BlkPerRow, RowsPerStream - 1);
dim3 dimGrid2DSm2(BlkPerRow, RowsPerStream - 2);
dim3 dimGrid2DSm3(BlkPerRow, RowsPerStream - 3);
dim3 dimGrid2DSm4(BlkPerRow, RowsPerStream - 4);
dim3 dimGrid2DSm5(BlkPerRow, RowsPerStream - 5);
dim3 dimGrid2DSm6(BlkPerRow, RowsPerStream - 6);
dim3 dimGrid2DSm10(BlkPerRow, RowsPerStream - 10);
uch *CPUstart, *GPUstart;
ui StartByte, StartRow;
ui RowsThisStream;
switch (NumberOfStreams) {
case 0: chkCUDAErr(cudaMemcpy(GPUImg, TheImg, IMAGESIZE, cudaMemcpyHostToDevice));
cudaEventRecord(time2, 0); // Time stamp at the beginning of kernel execution
switch(Operation){
case 'E': BWKernel2S <<< dimGrid2D, ThrPerBlk >>> (GPUBWImg, GPUImg, IPH, IPV, IPHB, 0);
GaussKernel3S <<< dimGrid2D, ThrPerBlk >>> (GPUGaussImg, GPUBWImg, IPH, IPV, 0);
SobelKernel2S <<< dimGrid2D, ThrPerBlk >>> (GPUGradient, GPUTheta, GPUGaussImg, IPH, IPV, 0);
ThresholdKernel2S <<< dimGrid2D, ThrPerBlk >>> (GPUResultImg, GPUGradient, GPUTheta, IPH, IPV, IPHB, ThreshLo, ThreshHi,0);
break;
case 'H': Hflip3S <<< dimGrid2D, ThrPerBlk >>> (GPUResultImg, GPUImg, IPH, IPV, IPHB, 0);
break;
}
cudaEventRecord(time3, 0); // Time stamp at the end of kernel execution
chkCUDAErr(cudaMemcpy(CopyImg, GPUResultImg, IMAGESIZE, cudaMemcpyDeviceToHost));
break;
case 1: chkCUDAErr(cudaMemcpyAsync(GPUImg, TheImg, IMAGESIZE, cudaMemcpyHostToDevice, stream[0]));
cudaEventRecord(time2, 0); // Time stamp at the beginning of kernel execution
switch(Operation) {
case 'E': BWKernel2S <<< dimGrid2D, ThrPerBlk, 0, stream[0] >>> (GPUBWImg, GPUImg, IPH, IPV, IPHB, 0);
GaussKernel3S <<< dimGrid2D, ThrPerBlk, 0, stream[0] >>> (GPUGaussImg, GPUBWImg, IPH, IPV, 0);
SobelKernel2S <<< dimGrid2D, ThrPerBlk, 0, stream[0] >>> (GPUGradient, GPUTheta, GPUGaussImg, IPH, IPV, 0);
ThresholdKernel2S <<< dimGrid2D, ThrPerBlk, 0, stream[0] >>> (GPUResultImg, GPUGradient, GPUTheta, IPH, IPV, IPHB, ThreshLo, ThreshHi, 0);
break;
case 'H': Hflip3S <<< dimGrid2D, ThrPerBlk, 0, stream[0] >>> (GPUResultImg, GPUImg, IPH, IPV, IPHB, 0);
break;
}
cudaEventRecord(time3, 0); // Time stamp at the end of kernel execution
chkCUDAErr(cudaMemcpyAsync(CopyImg, GPUResultImg, IMAGESIZE, cudaMemcpyDeviceToHost, stream[0]));
break;
default: // Check to see if it is horizontal flip
if (Operation == 'H') {
for (i = 0; i < NumberOfStreams; i++) {
StartRow = i*RowsPerStream;
StartByte = StartRow*IPHB;
CPUstart = TheImg + StartByte;
GPUstart = GPUImg + StartByte;
RowsThisStream = (i != (NumberOfStreams - 1)) ? RowsPerStream : (IPV - (NumberOfStreams - 1)*RowsPerStream);
chkCUDAErr(cudaMemcpyAsync(GPUstart, CPUstart, RowsThisStream*IPHB, cudaMemcpyHostToDevice, stream[i]));
cudaEventRecord(time2, 0); // time2 will time stamp at the end of CPU --> GPU transfer
Hflip3S <<< dimGrid2DS, ThrPerBlk, 0, stream[i] >>> (GPUResultImg, GPUImg, IPH, IPV, IPHB, StartRow);
cudaEventRecord(time3, 0); // time2 will time stamp at the end of kernel exec
CPUstart = CopyImg + StartByte;
GPUstart = GPUResultImg + StartByte;
chkCUDAErr(cudaMemcpyAsync(CPUstart, GPUstart, RowsThisStream*IPHB, cudaMemcpyDeviceToHost, stream[i]));
}
break;
}
// If not horizontal flip, do edge detection (STREAMING)
// Pre-process: 10 rows of B&W, 6 rows of Gauss, 4 rows of Sobel, 2 rows of Threshold
for (i = 0; i < (NumberOfStreams-1); i++) {
StartRow = (i+1)*RowsPerStream-5;
StartByte = StartRow*IPHB;
CPUstart = TheImg + StartByte;
GPUstart = GPUImg + StartByte;
// Transfer 10 rows between chunk boundaries
chkCUDAErr(cudaMemcpy(GPUstart, CPUstart, 10*IPHB, cudaMemcpyHostToDevice));
// Pre-process 10 rows for B&W
BWKernel2S <<< dimGrid2DS10, ThrPerBlk >>> (GPUBWImg, GPUImg, IPH, IPV, IPHB, StartRow);
// Calculate 6 rows of Gauss, starting @ the last 3 rows for every stream, except the very last one
StartRow += 2;
GaussKernel3S <<< dimGrid2DS6, ThrPerBlk >>> (GPUGaussImg, GPUBWImg, IPH, IPV, StartRow);
// Calculate 4 rows of Sobel starting @last-1 row of every stream, except the very last one
StartRow ++;
SobelKernel2S <<< dimGrid2DS4, ThrPerBlk >>> (GPUGradient, GPUTheta, GPUGaussImg, IPH, IPV, StartRow);
// Calculate 2 rows of Threshold starting @last row of every stream, except the very last one
//StartRow++;
//ThresholdKernel2S <<< dimGrid2DS2, ThrPerBlk >>> (GPUResultImg, GPUGradient, GPUTheta, IPH, IPV, IPHB, ThreshLo, ThreshHi, StartRow);
}
cudaEventRecord(time2, 0); // time2 will time stamp at the end of the pre-processing
// Stream data from CPU --> GPU
// Streaming B&W
// Streaming Gaussian
// Streaming Sobel
for (i = 0; i < NumberOfStreams; i++) {
if (i == 0) {
RowsThisStream = RowsPerStream - 5;
}else if (i == (NumberOfStreams - 1)) {
RowsThisStream = IPV - (NumberOfStreams - 1)*RowsPerStream - 5;
}else{
RowsThisStream = RowsPerStream - 10;
}
StartRow = ((i == 0) ? 0 : i*RowsPerStream + 5);
// printf("Stream=%u ... RowsThisStream=%u\n", i, RowsThisStream);
StartByte = StartRow*IPHB;
CPUstart = TheImg + StartByte;
GPUstart = GPUImg + StartByte;
chkCUDAErr(cudaMemcpyAsync(GPUstart, CPUstart, RowsThisStream * IPHB, cudaMemcpyHostToDevice, stream[i]));
if (i==0){
BWKernel2S <<< dimGrid2DSm5, ThrPerBlk, 0, stream[i] >>> (GPUBWImg, GPUImg, IPH, IPV, IPHB, StartRow);
GaussKernel3S <<< dimGrid2DSm3, ThrPerBlk, 0, stream[i] >>> (GPUGaussImg, GPUBWImg, IPH, IPV, StartRow);
SobelKernel2S <<< dimGrid2DSm2, ThrPerBlk, 0, stream[i] >>> (GPUGradient, GPUTheta, GPUGaussImg, IPH, IPV, StartRow);
//ThresholdKernel2S <<< dimGrid2DSm1, ThrPerBlk, 0, stream[i] >>> (GPUResultImg, GPUGradient, GPUTheta, IPH, IPV, IPHB, ThreshLo, ThreshHi, StartRow);
}else if (i == (NumberOfStreams - 1)) {
BWKernel2S <<< dimGrid2DSm5, ThrPerBlk, 0, stream[i] >>> (GPUBWImg, GPUImg, IPH, IPV, IPHB, StartRow);
StartRow -= 2;
GaussKernel3S <<< dimGrid2DSm3, ThrPerBlk, 0, stream[i] >>> (GPUGaussImg, GPUBWImg, IPH, IPV, StartRow);
StartRow--;
SobelKernel2S <<< dimGrid2DSm2, ThrPerBlk, 0, stream[i] >>> (GPUGradient, GPUTheta, GPUGaussImg, IPH, IPV, StartRow);
}else {
BWKernel2S <<< dimGrid2DSm10, ThrPerBlk, 0, stream[i] >>> (GPUBWImg, GPUImg, IPH, IPV, IPHB, StartRow);
StartRow -= 2;
GaussKernel3S <<< dimGrid2DSm6, ThrPerBlk, 0, stream[i] >>> (GPUGaussImg, GPUBWImg, IPH, IPV, StartRow);
StartRow--;
SobelKernel2S <<< dimGrid2DSm4, ThrPerBlk, 0, stream[i] >>> (GPUGradient, GPUTheta, GPUGaussImg, IPH, IPV, StartRow);
//ThresholdKernel2S <<< dimGrid2DSm2, ThrPerBlk, 0, stream[i] >>> (GPUResultImg, GPUGradient, GPUTheta, IPH, IPV, IPHB, ThreshLo, ThreshHi, StartRow);
}
}
//for (i = 0; i < NumberOfStreams; i++) cudaStreamSynchronize(stream[i]);
cudaEventRecord(time3, 0); // time3 will time stamp at the end of BW+Gauss+Sobel
// Streaming Threshold
for (i = 0; i < NumberOfStreams; i++) {
StartRow = i*RowsPerStream;
ThresholdKernel2S <<< dimGrid2DS, ThrPerBlk, 0, stream[i] >>> (GPUResultImg, GPUGradient, GPUTheta, IPH, IPV, IPHB, ThreshLo, ThreshHi, StartRow);
}
//for (i = 0; i < NumberOfStreams; i++) cudaStreamSynchronize(stream[i]);
// Stream data from GPU --> CPU
for (i = 0; i < NumberOfStreams; i++) {
StartRow = i*(RowsPerStream-5);
StartByte = StartRow*IPHB;
CPUstart = CopyImg + StartByte;
GPUstart = GPUResultImg + StartByte;
RowsThisStream = (i != (NumberOfStreams - 1)) ? (RowsPerStream - 5) : (IPV - (NumberOfStreams - 1)*(RowsPerStream - 5));
chkCUDAErr(cudaMemcpyAsync(CPUstart, GPUstart, RowsThisStream*IPHB, cudaMemcpyDeviceToHost, stream[i]));
}
}
cudaEventRecord(time4, 0);
cudaEventSynchronize(time2);
cudaEventSynchronize(time2);
cudaEventSynchronize(time3);
cudaEventSynchronize(time4);
cudaEventElapsedTime(&totalTime, time1, time4);
cudaEventElapsedTime(&Time12, time1, time2);
cudaEventElapsedTime(&Time23, time2, time3);
cudaEventElapsedTime(&Time34, time3, time4);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n Program failed after cudaDeviceSynchronize()!");
cudaFreeHost(TheImg);
cudaFreeHost(CopyImg);
cudaFree(GPUptr);
exit(EXIT_FAILURE);
}
WriteBMPlin(CopyImg, OutputFileName); // Write the flipped image back to disk
printf("\n\n"); PrintSep();
printf("%s ComputeCapab=%d.%d [max %s blocks; %d thr/blk; %d SMs] \n", GPUprop.name, GPUprop.major, GPUprop.minor, SupportedBlocks, MaxThrPerBlk, SMcount);
printf("Total Global Mem=%u MB Total Constant Mem=%u KB \n", DATAMB(GlobalMem), DATAKB(ConstMem));
PrintSep();
ui NumBlocks = IPV*BlkPerRow;
ui GPUDataTransfer;
printf("%s %s %s %c %u %u [%u BLOCKS, %u BLOCKS/ROW]\n", ProgName, InputFileName, OutputFileName, Operation, ThrPerBlk, NumberOfStreams, NumBlocks, BlkPerRow);
PrintSep();
switch (Operation) {
case 'E': GPUDataTransfer = 2 * IMAGESIZE;
break;
case 'H': ui GPUDataTfrBW = sizeof(double)*IMAGEPIX + sizeof(uch)*IMAGESIZE;
ui GPUDataTfrGauss = 2 * sizeof(double)*IMAGEPIX;
ui GPUDataTfrSobel = 3 * sizeof(double)*IMAGEPIX;
ui GPUDataTfrThresh = sizeof(double)*IMAGEPIX + sizeof(uch)*IMAGESIZE;
ui GPUDataTfrKernel = GPUDataTfrBW + GPUDataTfrGauss + GPUDataTfrSobel + GPUDataTfrThresh;
GPUDataTransfer = GPUDataTfrKernel + 2 * IMAGESIZE;
break;
}
if(NumberOfStreams==0){
printf("Synchronous Mode. NO STREAMING\n");
PrintSep();
printf("CPU->GPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", Time12, DATAMB(IMAGESIZE), DATABW(IMAGESIZE, Time12));
printf("Kernel Execution =%7.2f ms ... %4d MB ... %6.2f GB/s\n", Time23, DATAMB(GPUDataTransfer), DATABW(GPUDataTransfer, Time23));
printf("GPU->CPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", Time34, DATAMB(IMAGESIZE), DATABW(IMAGESIZE, Time34));
PrintSep();
printf("Total time elapsed =%7.2f ms %4d MB ... %6.2f GB/s\n", totalTime, DATAMB((2 * IMAGESIZE + GPUDataTransfer)), DATABW((2 * IMAGESIZE + GPUDataTransfer), totalTime));
PrintSep();
}else{
printf("Streaming Mode. NumberOfStreams=%u (deviceOverlap=%s)\n", NumberOfStreams, deviceOverlap ? "TRUE" : "FALSE");
printf("This device is %s capable of simultaneous CPU-to-GPU and GPU-to-CPU data transfers\n", deviceOverlap ? "" : "NOT");
PrintSep();
switch (Operation) {
case 'E': printf("Pre-processing =%7.2f ms \n", Time12);
printf("CPU--> GPU Transfer + BW+Gauss+Sobel =%7.2f ms\n", Time23);
printf("Threshold + GPU--> CPU Transfer =%7.2f ms\n", Time34);
break;
case 'H': printf("CPU--> GPU Transfer =%7.2f ms \n", Time12);
printf("Flip kernel =%7.2f ms\n", Time23);
printf("GPU--> CPU Transfer =%7.2f ms\n", Time34);
break;
}
PrintSep();
printf("Total time elapsed =%7.2f ms %4d MB ... %6.2f GB/s\n", totalTime, DATAMB((2 * IMAGESIZE + GPUDataTransfer)), DATABW((2 * IMAGESIZE + GPUDataTransfer), totalTime));
PrintSep();
}
// Deallocate CPU, GPU memory
cudaFree(GPUptr);
// DESTROY EVENTS
cudaEventDestroy(time1);
cudaEventDestroy(time2);
cudaEventDestroy(time3);
cudaEventDestroy(time4);
// DESTROY STREAMS
if (NumberOfStreams != 0) {
for (i = 0; i < NumberOfStreams; i++) {
chkCUDAErr(cudaStreamDestroy(stream[i]));
}
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Parallel Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
cudaFreeHost(TheImg);
cudaFreeHost(CopyImg);
exit(EXIT_FAILURE);
}
cudaFreeHost(TheImg);
cudaFreeHost(CopyImg);
return(EXIT_SUCCESS);
}
|
11,000 | extern "C"
{
__global__ void vabs(const int n, const double *a, double *b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<n)
{b[i]=fabs(a[i]);}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.