serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
10,401 | #include "DFT.cuh"
using byte = unsigned char;
inline void checkCudaErrors(cudaError err, char* tag) //cuda error handle function
{
if (cudaSuccess != err)
{
fprintf(stderr, "CUDA Runtime API error:%s. %s\n", cudaGetErrorString(err), tag);
return;
}
}
__global__ void DFT_kernel(byte *GPU_source, byte *GPU_result, int HandleWidth, int HandleHeight, int SourceWidth, int SourceHeight, int pitch, int pixelSize)
{
//Ƶϵµuv꼴ΪӦ߳threadxy
int v = blockIdx.x*blockDim.x + threadIdx.x;
int u = blockIdx.y*blockDim.y + threadIdx.y;
if (v >= HandleWidth || u >= HandleHeight)
{
return;
}
ComplexNumber result;
double realpart=0;
double imaginepart =0;
double greyValue;
for (int x = 0; x < SourceHeight; x++)
{
for (int y = 0; y < SourceWidth; y++)
{
greyValue = (double)GPU_source[x*SourceWidth + y];
if ((x + y) & 1)
greyValue = -1.0*greyValue;
double factor = (double)u*x / (double)SourceHeight + (double)v * y / (double)SourceWidth;
double realpart_buf = cos(-2 * PI*(factor));
double imaginepart_buf =sin(-2 * PI*(factor));
realpart += realpart_buf * greyValue;
imaginepart += imaginepart_buf * greyValue;
}
}
double result_norm = 15 * log(std::sqrt(realpart*realpart+ imaginepart * imaginepart) + 1);
result_norm = result_norm < 0.0 ? 0.0 : result_norm;
result_norm = result_norm > 255.0 ? 255.0 : result_norm;
GPU_result[(SourceHeight - 1 - u)*(-1)*pitch + v * pixelSize] = (byte)result_norm;
GPU_result[(SourceHeight - 1 - u)*(-1)*pitch + v * pixelSize + 1] = (byte)result_norm;
GPU_result[(SourceHeight - 1 - u)*(-1)*pitch + v * pixelSize + 2] = (byte)result_norm;
//GPU_result[u*SourceWidth + v] = GPU_source[u*SourceWidth +v];
}
extern "C" void DFT_host(byte* source, byte* result_buf, int HandleWidth, int HandleHeight, int SourceWidth, int SourceHeight, int pitch, int pixelSize)
{
cudaDeviceSetLimit(cudaLimitPrintfFifoSize, (size_t)1024 * 1024 * 1024);
//ָGPUռ䷽ʽ
dim3 DimBlock(BlockXMaxThreadNum, BlockYMaxThreadNum);
dim3 DimGrid(HandleWidth / BlockXMaxThreadNum + 1, HandleHeight / BlockYMaxThreadNum + 1);
byte* result;
//Դнвָ
byte* GPU_source;
//ԴΪԭͼռ
checkCudaErrors(cudaMalloc((void **)&GPU_source, sizeof(byte)*SourceWidth*SourceHeight), "a");
checkCudaErrors(cudaMalloc((void **)&result, sizeof(byte)*HandleHeight*((-1)*pitch)), "b");
checkCudaErrors(cudaMemcpy(GPU_source, source, sizeof(byte)*SourceHeight*SourceWidth, cudaMemcpyHostToDevice), "c");
cudaThreadSynchronize();
DFT_kernel <<< DimGrid, DimBlock >>> (GPU_source, result, HandleWidth, HandleHeight, SourceWidth, SourceHeight, pitch, pixelSize);
cudaThreadSynchronize();
checkCudaErrors(cudaMemcpy(result_buf, result, sizeof(byte)*HandleHeight*((-1) * pitch), cudaMemcpyDeviceToHost), "d");
cudaFree(GPU_source);
cudaFree(result);
} |
10,402 | #include <curand.h>
#include <curand_kernel.h>
#define DIM 1600
#define PI 3.14159265
__global__ void Backup(unsigned char *R_input, unsigned char *G_input,
unsigned char *B_input, size_t i_size,
unsigned char *R_output, unsigned char *G_output,
unsigned char *B_output){
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int offset = x + y * i_size;
R_output[offset] = R_input[offset];
G_output[offset] = G_input[offset];
B_output[offset] = B_input[offset];
}
|
10,403 | #include <stdio.h>
__global__ void printHello() {
printf("The Device says \"Hello World\"\n");
}
int main()
{
printHello<<<1,1>>>();
return 0;
} |
10,404 | #define int8 unsigned char
#include <cstdlib>
#include <iostream>
using std::cout;
inline void gpuAssert(cudaError_t code) {
if (code != cudaSuccess) {
std::cout << cudaGetErrorString(code) << "\n";
}
}
__global__ void adjust_hue_hwc(const int height, const int width,
int8 * const input, int8 * const output) {
// multiply by 3 since we're dealing with contiguous RGB bytes for each pixel
const int idx = (blockDim.x * blockIdx.x + threadIdx.x) * 3;
// bounds check
if (idx > height * width * 3) {
return;
}
// rgb_to_hsv
const float r = input[idx];
const float g = input[idx + 1];
const float b = input[idx + 2];
const float M = max(r, max(g, b));
const float m = min(r, min(g, b));
const float chroma = M - m;
float h = 0.0, s = 0.0, v = 0.0;
if (chroma > 0) {
if (M == r) {
h = fmod((g - b) / chroma, 6.0f);
} else if (M == g) {
h = (b - r) / chroma + 2.0;
} else {
h = (r - g) / chroma + 4.0;
}
}
if (M > 0.0) {
s = chroma / M;
}
v = M;
// hsv2rgb
const float new_chroma = v * s;
const float x = chroma * (1.0 - fabs(fmod(h, 2.0f) - 1.0f));
const float new_m = v - chroma;
const int between_0_and_1 = h >= 0.0 && h < 1;
const int between_1_and_2 = h >= 1.0 && h < 2;
const int between_2_and_3 = h >= 2 && h < 3;
const int between_3_and_4 = h >= 3 && h < 4;
const int between_4_and_5 = h >= 4 && h < 5;
const int between_5_and_6 = h >= 5 && h < 6;
// red channel
const int red_chroma_mask = between_0_and_1 || between_5_and_6;
const int red_x_mask = between_1_and_2 || between_4_and_5;
const int8 new_r = new_chroma * red_chroma_mask + x * red_x_mask + new_m;
// green channel
const int green_chroma_mask = between_1_and_2 || between_2_and_3;
const int green_x_mask = between_0_and_1 || between_3_and_4;
const int8 new_g = new_chroma * green_chroma_mask + x * green_x_mask
+ new_m;
// blue channel
const int blue_chroma_mask = between_3_and_4 || between_4_and_5;
const int blue_x_mask = between_2_and_3 || between_5_and_6;
const int8 new_b = new_chroma * blue_chroma_mask + x * blue_x_mask + new_m;
output[idx] = new_r;
output[idx + 1] = new_g;
output[idx + 2] = new_b;
}
int main(void) {
srand(1);
const int h = 1300;
const int w = 1300;
const int total = h * w * 3;
const int size_bytes = h * w * 3 * sizeof(int8);
int8 * mat_h = (int8 *) malloc(size_bytes);
int8 * mat_h2 = (int8 *) calloc(h * w * 3, sizeof(int8));
int8 * mat_d = NULL;
int8 * mat_d2 = NULL;
gpuAssert(cudaMalloc(&mat_d, size_bytes));
gpuAssert(cudaMalloc(&mat_d2, size_bytes));
for (int i = 0; i < total; i++) {
mat_h[i] = abs(rand() % 256);
}
gpuAssert(cudaMemcpy(mat_d, mat_h, size_bytes, cudaMemcpyHostToDevice));
const int threads_per_block = 1024;
const int blocks = (h * w + (threads_per_block - 1)) / threads_per_block;
adjust_hue_hwc<<<blocks, threads_per_block>>>(h, w, mat_d, mat_d2);
gpuAssert(cudaMemcpy(mat_h2, mat_d2, size_bytes, cudaMemcpyDeviceToHost));
int error_ctr = 0;
int channel_ctr = 0;
using std::cout;
const char * lookup[3] { "red", "green", "blue" };
for (int i = 0; i < total; i++) {
channel_ctr = (channel_ctr + 1) % 3;
if (abs(mat_h[i] - mat_h2[i]) > 1) {
// std::cout << "BAD PIXEL: index " << i << "\n";
// std::cout << "channel = " << lookup[channel_ctr]
// << ", original = " << (int) mat_h[i]
// << ", after GPU RGB->HSV->RGB = " << (int) mat_h2[i]
// << "\n";
// std::cout<< "h pixels before it: [" << (int) mat_h[i - 2] << ", " << (int) mat_h[i - 1] << "]\n";
// std::cout<< "h pixels after it: [" << (int) mat_h[i + 1] << ", " << (int) mat_h[i + 2] << "]\n\n";
error_ctr++;
}
}
cout << "\nThere were " << error_ctr << " bad pixels out of " << total << "\n";
cout << "This represents " << (100.0 * error_ctr / total) << "%\n\n";
return 0;
}
|
10,405 | #include "includes.h"
__global__ void transposeFineGrained(float *odata, float *idata, int width, int height)
{
__shared__ float block[TILE_DIM][TILE_DIM+1];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index = xIndex + (yIndex)*width;
for (int i=0; i < TILE_DIM; i += BLOCK_ROWS)
{
block[threadIdx.y+i][threadIdx.x] = idata[index+i*width];
}
__syncthreads();
for (int i=0; i < TILE_DIM; i += BLOCK_ROWS)
{
odata[index+i*height] = block[threadIdx.x][threadIdx.y+i];
}
} |
10,406 | extern "C"
{
__global__ void gradalex_32(const int n, const float *a, const float *b, float *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<n)
{
if (b[i]>-0.5)
{c[i] += a[i];}
else
{c[i] -= 0.5*a[i]/b[i];}
}
}
} |
10,407 | #define BLOCK_SIZE 1024
#define N 1024
#define MAXELEMS 200000
#define FREQBANDWIDTH 50
#define MAXSONGS 10
__device__ char nthdigit(int x, int n);
__device__ int generate_hash_string (char* buffer, int a, int b, int c, int d, int e);
__device__ void four1(float* data, int nn);
__global__ void calc_scores(int * hash_songs, int * songscores_d)
{
__shared__ int data[MAXSONGS+1];
if (threadIdx.x < MAXSONGS+1) data[threadIdx.x] = 0;
__syncthreads();
int i = threadIdx.x + blockIdx.x*blockDim.x;
int hashsongs_base_index = i*(MAXSONGS+1);
int temp;
if(i<MAXELEMS && hash_songs[hashsongs_base_index]>0)
{
int n;
for(n = 1; n <= MAXSONGS; n++)
{
temp = (hash_songs[hashsongs_base_index+n]>=hash_songs[hashsongs_base_index]) ? hash_songs[hashsongs_base_index] : hash_songs[hashsongs_base_index+n];
atomicAdd(&(data[n]),temp);
}
}
__syncthreads();
if (threadIdx.x < MAXSONGS+1) atomicAdd(&(songscores_d[threadIdx.x]),data[threadIdx.x]);
}
__device__ char nthdigit(int x, int n)
{
int powersof10[] = {1, 10, 100, 1000};
return ((x / powersof10[n]) % 10) + '0';
}
__device__ int generate_hash_string (char* buffer, int a, int b, int c, int d, int e)
{
int i = 0;
if(buffer == NULL) return 0;
//Process first int
if(a >= 10) buffer[i++] = nthdigit(a,1);
else buffer[i++] = nthdigit(a,0);
//Second int
if(b >= 100) buffer[i++] = nthdigit(b,0);
else if(b >= 10) buffer[i++] = nthdigit(b,1);
else buffer[i++] = nthdigit(b,2);
//Third int
if(c >= 100) buffer[i++] = nthdigit(c,0);
else if(c >= 10) buffer[i++] = nthdigit(c,1);
else buffer[i++] = nthdigit(c,2);
//Fourth int
if(d >= 100) buffer[i++] = nthdigit(d,0);
else if(d >= 10) buffer[i++] = nthdigit(d,1);
else buffer[i++] = nthdigit(d,2);
//Fifth int
if(e >= 100) buffer[i++] = nthdigit(e,0);
else if(e >= 10) buffer[i++] = nthdigit(e,1);
else buffer[i++] = nthdigit(e,2);
return i;
}
__global__ void parallelhash(float* in, int n, int* hash_table, int song_id)
{
int i, k;
float freq1, freq2, freq3, freq4, freq5;
float tempfreq, magnitude;
int pt1,pt2,pt3,pt4, pt5, key;
i = threadIdx.x + blockIdx.x*blockDim.x; //My chunk
if(i < n){ //if my chunk ID < total number of chunks we may continue
//Point Z to the right chunk location in the song
float* Z = &in[N*i];
four1(Z,N);
freq1 = freq2 = freq3 = freq4 = freq5 = 0;
pt1 = pt2 = pt3 = pt4 = pt5 = 0;
//Filter Frequency Bands
for(k=FREQBANDWIDTH; k<FREQBANDWIDTH*6; k++){
tempfreq = abs(Z[2*k]);
magnitude = log10(tempfreq+1);
if(k>=FREQBANDWIDTH && k<FREQBANDWIDTH*2 && magnitude>freq1) {freq1 = magnitude; pt1=k;}
else if(k>=FREQBANDWIDTH*2 && k<FREQBANDWIDTH*3 && magnitude>freq2) {freq2 = magnitude; pt2=k;}
else if(k>=FREQBANDWIDTH*3 && k<FREQBANDWIDTH*4 && magnitude>freq3) {freq3 = magnitude; pt3=k;}
else if(k>=FREQBANDWIDTH*4 && k<FREQBANDWIDTH*5 && magnitude>freq4) {freq4 = magnitude; pt4=k;}
else if(k>=FREQBANDWIDTH*5 && k<FREQBANDWIDTH*6 && magnitude>freq5) {freq5 = magnitude; pt5=k;}
}
//Hash the result inline
unsigned long long int hashresult = 0;
char buffer [15];
int k = 0, j = 0;
for(k = 0; k < 15; k ++) buffer[0] = 0;
j = generate_hash_string (buffer, pt1,pt2,pt3,pt4,pt5);
unsigned long long int hash = 5381;
k = 0;
for(k = 0; k < j; k ++)
hash = ((hash << 5) + hash) + buffer[k]; /* hash * 33 + c */
hashresult = hash % MAXELEMS;
//Write result to hash table
key = (int) hashresult;
atomicAdd(&(hash_table[(key*(MAXSONGS+1))+(song_id)]),1);
}
}
__device__ void four1(float* data, int nn)
{
int n, mmax, m, j, istep, i;
float wtemp, wr, wpr, wpi, wi, theta;
float tempr, tempi;
// reverse-binary reindexing
n = nn<<1;
j=1;
for (i=1; i<n; i+=2)
{
if (j>i) //data swap should be parallelizeable
{
float temp = data[j-1];
data[j-1] = data[i-1];
data[i-1] = temp;
temp = data[j];
data[j] = data[i];
data[i] = temp;
}
m = nn;
while (m>=2 && j>m)
{
j -= m;
m >>= 1;
}
j += m;
}
// here begins the Danielson-Lanczos section
mmax=2;
while (n>mmax) {
istep = mmax<<1;
theta = -(2*M_PI/mmax);
wtemp = sin(0.5*theta);
wpr = -2.0*wtemp*wtemp;
wpi = sin(theta);
wr = 1.0;
wi = 0.0;
for (m=1; m < mmax; m += 2) {
for (i=m; i <= n; i += istep) {
j=i+mmax;
tempr = wr*data[j-1] - wi*data[j];
tempi = wr * data[j] + wi*data[j-1];
data[j-1] = data[i-1] - tempr;
data[j] = data[i] - tempi;
data[i-1] += tempr;
data[i] += tempi;
}
wtemp=wr;
wr += wr*wpr - wi*wpi;
wi += wi*wpr + wtemp*wpi;
}
mmax=istep;
}
}
|
10,408 | #include "includes.h"
__global__ void convolve(unsigned char* imgInput, int width, int height, int paddingX, int paddingY, int kWidth, int kHeight, unsigned int offset, unsigned char* imgOutput)
{
// Calculate our pixel's location
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
float sum = 0.0;
int pWidth = kWidth / 2;
int pHeight = kHeight / 2;
//Solo ejecuta validos pixeles
if (x >= pWidth + paddingX && y >= pHeight + paddingY && x < (blockDim.x * gridDim.x) - pWidth - paddingX &&
y < (blockDim.y * gridDim.y) - pHeight - paddingY)
{
for (int j = -pHeight; j <= pHeight; j++)
{
for (int i = -pWidth; i <= pWidth; i++)
{
// Sample the weight for this location
int ki = (i + pWidth);
int kj = (j + pHeight);
float w = mask[(kj * kWidth) + ki + offset];
sum += w * float(imgInput[((y + j) * width) + (x + i)]);
}
}
}
imgOutput[(y * width) + x] = (unsigned char)sum;
} |
10,409 | #include <cuda.h>
#include <iostream>
#include <sys/time.h>
using namespace std;
// non coalesced
__global__ void addOne(int n, double *data) {
int nb = gridDim.x;
int nt = blockDim.x;
int compPerThread = n / (nb*nt);
int b = blockIdx.x;
int t = threadIdx.x;
int i = (b * nt + t)*compPerThread;
for (int j=0; j<compPerThread; j++)
data[i+j]++;
}
// colaesced
__global__ void addOne1(int n, double *data) {
int nb = gridDim.x;
int nt = blockDim.x;
int compPerThread = n / (nb*nt);
int b = blockIdx.x;
int t = threadIdx.x;
int i = b * nt + t;
for (int j=0; j<compPerThread; j++)
data[i+j*nb*nt]++;
}
// coalesced (distribute data to blocks)
__global__ void addOne2(int n, double *data) {
int nb = gridDim.x;
int nt = blockDim.x;
int compPerThread = n / (nb*nt);
int blockDataSize = compPerThread*nt;
int b = blockIdx.x;
int t = threadIdx.x;
for (int j=0; j<compPerThread; j++)
data[b*blockDataSize + j*nt + t]++;
}
int main() {
time_t sTime = time(NULL);
struct timeval tt1, tt2;
int ms;
double fms;
int n = 8388608;
double *data = (double*) malloc(n * sizeof(double));
for (int i=0; i<n; i++) {
data[i] = 0;
}
double *data_dev;
cudaMalloc((void**) &data_dev, n * sizeof(double));
cudaMemcpy(data_dev, data, n * sizeof(double) , cudaMemcpyHostToDevice);
cudaError_t error = cudaGetLastError();
cout << "copy to device = " << error << " : " << cudaGetErrorString(error) << endl;
// (*) modify execution parameters
dim3 nBlocks(1024,1,1);
dim3 nThreads(256,1,1);
cudaThreadSynchronize();
gettimeofday( &tt1, NULL );
// (*) call kernel for coalesced or non-coalesced versions
//addOne <<< nBlocks, nThreads >>>(n, data_dev);
addOne2 <<< nBlocks, nThreads >>>(n, data_dev);
error = cudaGetLastError();
cout << "run kernel = " << error << " : " << cudaGetErrorString(error) << endl;
cudaThreadSynchronize();
gettimeofday( &tt2, NULL );
ms = (tt2.tv_sec - tt1.tv_sec);
ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec);
fms = ((double)ms)/1000000.0;
cout << "Comp time = " << fms << endl;
cudaMemcpy(data, data_dev, n * sizeof(double) , cudaMemcpyDeviceToHost);
error = cudaGetLastError();
cout << "copy from device = " << error << " : " << cudaGetErrorString(error) << endl;
cudaFree(data_dev);
cout << "data[n-1] = " << data[n-1] << endl;
free(data);
}
|
10,410 | #include <iostream>
#include <fstream>
#include <cstdlib>
#include <ctime>
#include "cuda_runtime.h"
using namespace std;
__global__ void fast_radix_sort(int *array, int array_len) {
extern __shared__ int tmp_array[];
int *b_array = tmp_array + array_len;
int *s_array = tmp_array + array_len * 2;
int *t_array = tmp_array + array_len * 3;
tmp_array[threadIdx.x] = array[threadIdx.x + array_len * blockIdx.x];
__syncthreads();
for(int i = 0; i < sizeof(int) * 8; i++) {
b_array[threadIdx.x] = (tmp_array[threadIdx.x] >> i) & 1;
__syncthreads();
if (threadIdx.x == 0) {
s_array[0] = 0;
for (int i = 1; i < array_len + 1; i++) {
s_array[i] = s_array[i - 1] + b_array[i - 1];
}
}
__syncthreads();
if (b_array[threadIdx.x] == 0) {
t_array[threadIdx.x - s_array[threadIdx.x]] = tmp_array[threadIdx.x];
}
else {
t_array[s_array[threadIdx.x] + (array_len - s_array[array_len])] = tmp_array[threadIdx.x];
}
__syncthreads();
tmp_array[threadIdx.x] = t_array[threadIdx.x];
__syncthreads();
}
__syncthreads();
array[threadIdx.x + array_len * blockIdx.x] = tmp_array[threadIdx.x];
}
void merge(int *array1, int *array2, int array1_len, int array2_len) {
int i = 0, j = 0, total_array_len = array1_len + array2_len;
int *new_array = new int[total_array_len];
for (int k = 0; k < total_array_len; k++) {
if (i == array1_len) {
new_array[k] = array2[j++];
}
else if (j == array2_len) {
new_array[k] = array1[i++];
}
else if (array1[i] < array2[j]) {
new_array[k] = array1[i++];
}
else {
new_array[k] = array2[j++];
}
}
memcpy(array1, new_array, sizeof(int) * total_array_len);
delete[] new_array;
}
void cpu_radix_sort(int* array, int array_len) {
bool *b_array = new bool[array_len];
int *s_array = new int[array_len + 1];
int *tmp_array = new int[array_len];
int j;
for (int k = 0; k < sizeof(int) * 8; k++) {
for (int i = 0; i < array_len; i++) {
b_array[i] = (array[i] >> k) & 1;
}
s_array[0] = 0;
for (int i = 1; i < array_len + 1; i++) {
s_array[i] = s_array[i - 1] + b_array[i - 1];
}
for (int i = 0; i < array_len; i++) {
if (b_array[i] == 0) {
tmp_array[i - s_array[i]] = array[i];
}
else {
j = s_array[i] + (array_len - s_array[array_len]);
tmp_array[j] = array[i];
}
}
for (int i = 0; i < array_len; i++) {
array[i] = tmp_array[i];
}
};
delete[] b_array;
delete[] s_array;
delete[] tmp_array;
}
void array_print(int* array, int array_len, const char* message) {
cout << " " << message << ":\n [ ";
for (int i = 0; i < array_len; i++) {
cout << array[i] << " ";
}
cout << "]" << endl;
}
int main(int argc, char** argv) {
int const PRINTING_LIMIT = 101;
int array_len = 50, start = 0, stop = 101;
// Obtaining command line arguments
switch (argc) {
case 1:
cout << " #Warning# Default array size: " << array_len << endl;
cout << " #Warning# Default random start: " << start << endl;
cout << " #Warning# Default random stop: " << stop << endl;
break;
case 2:
array_len = atoi(argv[1]);
cout << " #Warning# Default random start: " << start << endl;
cout << " #Warning# Default random stop: " << stop << endl;
break;
case 4:
array_len = atoi(argv[1]);
start = atoi(argv[2]);
stop = atoi(argv[3]);
break;
default:
cout << " #Error# Wrong input! Default settings applied." << endl;
cout << " #Warning# Default array size: " << array_len << endl;
cout << " #Warning# Default random start: " << start << endl;
cout << " #Warning# Default random stop: " << stop << endl;
}
cout << endl;
if(array_len < 2) {
cout << " #Error# Array length is too small. at least 2!" << endl;
return 0;
}
int *init_array = new int[array_len];
int *gpu_array = new int[array_len];
clock_t c_start, c_end;
ofstream file_out("res.csv", ios_base::app);
// Randomizing array
srand(time(NULL));
for (int i = 0; i < array_len; i++) {
init_array[i] = start + rand() % (stop - 10);
}
if(array_len < PRINTING_LIMIT) {
array_print(init_array, array_len, "Initial array");
}
// GPU radix sort
int *d_array;
float working_time;
double gpu_time, cpu_time, cpu_merge_time;
int block_num = -1, thread_num = -1, subarray_len = -1;
// Splitting data to blocks
for (int f = 1024; f > 0; f--) {
if (array_len % f == 0) {
block_num = array_len / f;
thread_num = subarray_len = f;
break;
}
}
// Checking ability to split data
if (block_num == -1 || thread_num == -1 || subarray_len == -1) {
cout << "#Error# Can not split data!" << endl;
goto cuda_error;
}
cudaEvent_t e_start, e_stop;
cudaError_t cuda_status;
cudaEventCreate(&e_start);
cudaEventCreate(&e_stop);
cudaMalloc((void**)&d_array, sizeof(int) * array_len);
cudaMemcpy(d_array, init_array, sizeof(int) * array_len, cudaMemcpyHostToDevice);
cudaEventRecord(e_start);
fast_radix_sort<<<block_num, thread_num, (subarray_len * sizeof(int)) * 4>>>(d_array, subarray_len);
cudaEventRecord(e_stop);
cuda_status = cudaGetLastError();
if(cuda_status != cudaSuccess) {
cout << " #Error# CUDA kernel error!" << endl;
goto cuda_error;
}
cudaDeviceSynchronize();
cudaEventSynchronize(e_stop);
cudaEventElapsedTime(&working_time, e_start, e_stop);
cudaMemcpy(gpu_array, d_array, sizeof(int) * array_len, cudaMemcpyDeviceToHost);
// Merging sorted parts of array
c_start = clock();
for (int i = 0; i < block_num - 1; i++) {
merge(gpu_array, gpu_array + subarray_len * (i + 1), subarray_len * (i + 1), subarray_len);
}
c_end = clock();
// Printing if alloweded
if(array_len < PRINTING_LIMIT) {
array_print(gpu_array, array_len, "After GPU sort");
}
gpu_time = working_time / 1000;
cout << " GPU sorting time: " << gpu_time << " s" << endl;
cpu_merge_time = (double)(c_end - c_start) / CLOCKS_PER_SEC;
cout << " Merging time: ";
if (cpu_merge_time == 0) {
cout << "less then 0.001 s" << endl;
}
else {
cout << cpu_merge_time << " s" << endl;
}
// CPU radix sort
c_start = clock();
cpu_radix_sort(init_array, array_len);
c_end = clock();
if(array_len < PRINTING_LIMIT) {
array_print(init_array, array_len, "After CPU sort");
}
cpu_time = (double)(c_end - c_start) / CLOCKS_PER_SEC;
cout << " CPU sorting time: ";
if (cpu_merge_time == 0) {
cout << "less then 0.001 s" << endl;
}
else {
cout << cpu_time << " s" << endl;
}
// logging
file_out << array_len << ';' << gpu_time << ';' << cpu_merge_time << ';' << cpu_time << ';' << endl;
cuda_error:
file_out.close();
delete[] init_array;
delete[] gpu_array;
cudaEventDestroy(e_start);
cudaEventDestroy(e_stop);
cudaFree(d_array);
cudaDeviceReset();
return 0;
}
|
10,411 | #include <cuda.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#define CUDA_CHECK_RETURN(value) {\
cudaError_t _m_cudaStat = value;\
if(_m_cudaStat != cudaSuccess) {\
fprintf(stderr, "Error %s at line %d in file %s\n",\
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\
exit(1);\
}}
double wtime() {
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
__global__ void gpuSum(float *a, float *b, float *c) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
c[i] = a[i] + b[i];
// c[i] *= c[i];
}
int main() {
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float *h_a, *h_b, *h_c, *d_a, *d_b, *d_c, t;
int threads_per_block = 1;
int N = pow(2,23);
int num_of_blocks = N / threads_per_block;
h_a = (float*)calloc(N, sizeof(float));
h_b = (float*)calloc(N, sizeof(float));
h_c = (float*)calloc(N, sizeof(float));
for(int i = 0; i < N; i++) {
h_a[i] = i * 2;
h_b[i] = i * 3;
}
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_a, N*sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_b, N*sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_c, N*sizeof(float)));
CUDA_CHECK_RETURN(cudaMemcpy(d_a, h_a, N*sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_b, h_b, N*sizeof(float), cudaMemcpyHostToDevice));
//double t = wtime();
cudaEventRecord(start,0);
gpuSum<<<dim3(num_of_blocks),dim3(threads_per_block)>>>(d_a, d_b, d_c);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
//CUDA_CHECK_RETURN(cudaDeviceSynchronize());
CUDA_CHECK_RETURN(cudaGetLastError());
//t = wtime() - t;
cudaEventElapsedTime(&t, start, stop);
CUDA_CHECK_RETURN(cudaMemcpy(h_c, d_c, N*sizeof(float), cudaMemcpyDeviceToHost));
// for(int i = 0; i < N; i++) {
// printf("%g\n", h_c[i]);
// }
fprintf(stderr, "Elapsed Time %g\n", t);
//printf("Elapsed time: %.6fsec. \n", t);
cudaEventDestroy(start);
cudaEventDestroy(stop);
free(h_a);
free(h_b);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
10,412 | #include <stdio.h>
#define N 10 //Rows and columns of the matrix
#define SmallCols 2 // The col size of the tiny window
#define SmallRows 2 // The row size of the tiny window
#define ThreadCols 5 // The col size of a single window
#define ThreadRows 5 // The row size of a single window
/*
This was the code I used to run a matrix multiply in CUDA.
I used the idea of the SUMMA algorithm, where the psuedocode
for a serialized version is as follows:
C[N][N]; // Initialize the C matrix to 0
for (int k = 0; k < N; ++k) {
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
C[i][j] += A[i][k]*B[k][j];
}
}
}
There were three versions of the matrix mutiply that I decided
to test out, mostly to gain familiarity with how to access
a particular element of the matrix given the grid size
and the distribution of threads in each block:
1) SingleBlock: Here, each block represented a single element
of the N x N matrix.
2) SmallBlockMultiply: Here, each block had only one thread
and the size of the block matrix was smaller than the original
matrix.
3) ThreadBlockMatrixMultiply: Here, each block had TreadRows x ThreadCols
submatrices each and they represented a single element in the overall
matrix (the number of blocks was determined through division).
*/
/*
Returns M[i][j], since I used an array to store the matrix values.
*/
__host__ __device__ int GetMatrixEntry(int *M, int i, int j);
/*
Returns the index of the linear array corresponding to row i and column j.
*/
__host__ __device__ int GetLinearIndex(int i, int j);
/*
Displays the matrixM with an optional description attached.
*/
void DisplayMatrix(const char* descp, int *M);
/*
Generates the Iota matrix 1 to N and returns a pointer to it.
*/
int* GenerateIotaMatrix();
/*
Generates the zero matrix and returns a pointer to it.
*/
int* GenerateZeroMatrix();
/*
Returns a pointer to a device copy of the matrix M
*/
int* CreateDeviceMatrixCopy(int *M);
/*
Copies the contents of the device matrix, dev_M, to
the host matrix M.
*/
void CopyMatrixDeviceToHost(int *M, int *dev_M);
/*
Kernels for the three versions of matrix multiply I tried out (see long comment
above).
Note that k corresponds to the "k" value outlined in the outer loop in the
psuedocode above.
*/
__global__ void SingleBlockMatrixMultiply(int *A, int *B, int *C, int k);
__global__ void SmallBlockMatrixMultiply(int *A, int *B, int *C, int k);
__global__ void ThreadBlockMatrixMultiply(int *A, int *B, int *C, int k);
/*
Executes the memory allocation to host and device parts of the CUDA process
(to avoid clutting the calling function with repetitive code)
*/
void ExecuteInitialSteps(int* *A, int* *B, int* *C, int* *dev_A, int* *dev_B, int* *dev_C);
/*
Displays matrices A, B and C (to avoid cluttering with repetitive code)
*/
void DisplayMatrices(int *A, int *B, int *C);
/*
Frees all of of the memories. A, B and C are host matrices,
while their "dev_" prefixed versions are their device versions.
*/
void FreeMemories(int *A, int *B, int *C, int *dev_A, int *dev_B, int *dev_C);
/*
These functions run the procedures that end up calling their corresponding kernel functions.
*/
void RunSingleBlockMatrixMult();
void RunSmallBlockMatrixMult();
void RunThreadBlockMatrixMult();
int main(int argc, char* argv[]) {
//RunSingleBlockMatrixMult();
//RunSmallBlockMatrixMult();
RunThreadBlockMatrixMult();
return 0;
}
//Kernel functions
__global__ void SingleBlockMatrixMultiply(int *A, int *B, int *C, int k) {
int i = blockIdx.y;
int j = blockIdx.x;
// Each block is a single element of the matrix
C[GetLinearIndex(i, j)] += A[GetLinearIndex(i,k)]*B[GetLinearIndex(k,j)];
}
__global__ void SmallBlockMatrixMultiply(int *A, int *B, int *C, int k) {
int i = blockIdx.y;
int j = blockIdx.x;
while (i < N && j < N) { // Keep multiplying while we are within the main matrices' bounds
C[GetLinearIndex(i,j)] += A[GetLinearIndex(i,k)]*B[GetLinearIndex(k,j)];
int temp_i = i+SmallRows; //Move the window down
while (temp_i < N) {
C[GetLinearIndex(temp_i,j)] += A[GetLinearIndex(temp_i,k)]*B[GetLinearIndex(k,j)];
temp_i += SmallRows;
}
int temp_j = j+SmallCols; //Move the window to the right
while (temp_j < N) {
C[GetLinearIndex(i,temp_j)] += A[GetLinearIndex(i,k)]*B[GetLinearIndex(k,temp_j)];
temp_j += SmallCols;
}
i += SmallRows; //Now move the window down along the diagonal
j += SmallCols;
}
}
__global__ void ThreadBlockMatrixMultiply(int *A, int *B, int *C, int k) {
int i = blockIdx.y*blockDim.y+threadIdx.y;
int j = blockIdx.x*blockDim.x+threadIdx.x;
// Each thread represents a single element of the matrix, the threads
// dispersed along the blocks
if (i < N && j < N) {
C[GetLinearIndex(i, j)] += A[GetLinearIndex(i,k)]*B[GetLinearIndex(k,j)];
}
}
void ExecuteInitialSteps(int* *A, int* *B, int* *C, int* *dev_A, int* *dev_B, int* *dev_C) {
*A = GenerateIotaMatrix(); // A and B are the iota matrix
*B = GenerateIotaMatrix();
*C = GenerateZeroMatrix(); // C starts off as zero
*dev_A = CreateDeviceMatrixCopy(*A);
*dev_B = CreateDeviceMatrixCopy(*B);
*dev_C = CreateDeviceMatrixCopy(*C);
}
void DisplayMatrices(int *A, int *B, int *C) {
DisplayMatrix("A:", A); printf("\n");
DisplayMatrix("B:", B); printf("\n");
DisplayMatrix("C:", C); printf("\n");
}
void FreeMemories(int *A, int *B, int *C, int *dev_A, int *dev_B, int *dev_C) {
free(A); free(B); free(C);
cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_C);
}
void RunSingleBlockMatrixMult() {
int *A, *B, *C;
int *dev_A, *dev_B, *dev_C;
ExecuteInitialSteps(&A, &B, &C, &dev_A, &dev_B, &dev_C);
dim3 grid(N, N); // Here, each block is a single element of the matrix
for (int k = 0; k < N; ++k) {
SingleBlockMatrixMultiply<<<grid, 1>>>(dev_A, dev_B, dev_C, k);
}
CopyMatrixDeviceToHost(C, dev_C);
DisplayMatrices(A, B, C);
FreeMemories(A, B, C, dev_A, dev_B, dev_C);
}
void RunSmallBlockMatrixMult() {
int *A, *B, *C;
int *dev_A, *dev_B, *dev_C;
ExecuteInitialSteps(&A, &B, &C, &dev_A, &dev_B, &dev_C);
dim3 grid(SmallCols,SmallRows); // Here, we use a tiny sub matrix that traverses along the entire matrix
for (int k = 0; k < N; ++k) {
SmallBlockMatrixMultiply<<<grid, 1>>>(dev_A, dev_B, dev_C, k);
}
CopyMatrixDeviceToHost(C, dev_C);
DisplayMatrices(A, B, C);
FreeMemories(A, B, C, dev_A, dev_B, dev_C);
}
void RunThreadBlockMatrixMult() {
int *A, *B, *C;
int *dev_A, *dev_B, *dev_C;
ExecuteInitialSteps(&A, &B, &C, &dev_A, &dev_B, &dev_C);
dim3 grid((N+ThreadCols-1)/ThreadCols,(N+ThreadRows-1)/ThreadRows); // The number of blocks needed to capture the entire matrix
dim3 threads(ThreadCols, ThreadRows); // The size of the submatrix per block
for (int k = 0; k < N; ++k) {
ThreadBlockMatrixMultiply<<<grid, threads>>>(dev_A, dev_B, dev_C, k);
}
CopyMatrixDeviceToHost(C, dev_C);
DisplayMatrices(A, B, C);
FreeMemories(A, B, C, dev_A, dev_B, dev_C);
}
__host__ __device__ int GetMatrixEntry(int *M, int i, int j) {
return *(M+(i*N+j));
}
__host__ __device__ int GetLinearIndex(int i, int j) {
return (N*i+j);
}
void DisplayMatrix(const char* descp, int *M) {
printf("%s\n", descp);
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
printf("%d ", GetMatrixEntry(M, i, j));
}
printf("\n");
}
}
int* GenerateIotaMatrix() {
int* M = (int*)malloc(N*N*sizeof(int));
for (int i = 0; i < N*N; ++i) {
M[i] = i+1;
}
return M;
}
int* GenerateZeroMatrix() {
int* M = (int*)malloc(N*N*sizeof(int));
for (int i = 0; i < N*N; ++i) {
M[i] = 0;
}
return M;
}
int* CreateDeviceMatrixCopy(int *M) {
int *dev_M;
cudaMalloc((void**)&dev_M, N*N*sizeof(int));
cudaMemcpy(dev_M, M, N*N*sizeof(int), cudaMemcpyHostToDevice);
return dev_M;
}
void CopyMatrixDeviceToHost(int *M, int *dev_M) {
cudaMemcpy(M, dev_M, N*N*sizeof(int), cudaMemcpyDeviceToHost);
}
|
10,413 | // Matrix addition, CPU version
// gcc matrix_cpu.c -o matrix_cpu -std=c99
#include <stdio.h>
#define GRID_SIZE 64
#define BLOCK_SIZE 16
#define N 1024
__global__
void add_matrix(float *a, float *b, float *c)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int gsize = gridDim.x;
int bsize = blockDim.x;
int rowsize = gsize * bsize;
int idx = (rowsize * (by * bsize + ty)) + (bx * bsize + tx);
//int idx = (rowsize * (bx * bsize + tx)) + (by * bsize + ty);
c[idx] = a[idx] + b[idx];
}
int main()
{
float *a = new float[N*N];
float *b = new float[N*N];
float *c = new float[N*N];
float* ad;
float* bd;
float* cd;
cudaEvent_t beforeEvent;
cudaEvent_t afterEvent;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
a[i+j*N] = 10 + i;
b[i+j*N] = (float)j / N;
}
}
int size = N * N * sizeof(float);
cudaMalloc((void**)&ad, size);
cudaMalloc((void**)&bd, size);
cudaMalloc((void**)&cd, size);
dim3 dimBlock( BLOCK_SIZE, BLOCK_SIZE );
dim3 dimGrid( GRID_SIZE, GRID_SIZE );
cudaEventCreate(&beforeEvent);
cudaEventCreate(&afterEvent);
cudaMemcpy(ad, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(bd, b, size, cudaMemcpyHostToDevice);
cudaEventRecord(beforeEvent, 0);
add_matrix<<<dimGrid, dimBlock>>>(ad, bd, cd);
cudaThreadSynchronize();
cudaEventRecord(afterEvent, 0);
cudaEventSynchronize(afterEvent);
cudaMemcpy( c, cd, size, cudaMemcpyDeviceToHost );
float theTime;
cudaEventElapsedTime(&theTime, beforeEvent, afterEvent);
cudaFree( ad );
cudaFree( bd );
cudaFree( cd );
printf("Total time in ms: %f\n", theTime);
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
printf("%0.2f ", c[i+j*N]);
}
printf("\n");
}
return EXIT_SUCCESS;
}
|
10,414 | /*
* Written by : Amirul
* email : amirul.abdullah89@gmail.com
*/
#include <cuda.h>
#include <iostream>
#include <array>
#include <assert.h>
#include <math.h>
#ifndef SIZE
#define SIZE 10000
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 1000
#endif
#define STENCIL_SIZE 3
// Cuda error checker
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG)
if (result != cudaSuccess) {
std::cerr << "CUDA Runtime Error: " << cudaGetErrorString(result) << "\n";
assert(result == cudaSuccess);
}
#endif
return result;
}
__global__ void fd_kernel(double *a, double *b)
{
// Reusable shared memory
__shared__ double df[STENCIL_SIZE][BLOCK_SIZE];
const int i = threadIdx.x;
const int offset = blockIdx.x;
const int globalInd = (i + (blockIdx.x * blockDim.x) - offset);
const size_t rowOffset = SIZE;
int lastThread = blockDim.x - 1;
size_t row=0;
//Last block checker to disable thread overflow
if( blockIdx.x == (gridDim.x -1) ){
lastThread = rowOffset - ((blockIdx.x * blockDim.x) - offset) - 1;
}
//copy first two row to shared memory from global
if( globalInd < rowOffset ){
df[0][i] = a[globalInd + (rowOffset * row)];
df[1][i] = a[globalInd + (rowOffset * (row+1))] ;
}
for(; row < SIZE-2; ++row)
{
//copy third two row to shared memory from global
if( globalInd < rowOffset ){
df[2][i] = a[globalInd + (rowOffset * (row+2))];
}
__syncthreads();
if( globalInd < rowOffset ){
int id = globalInd + (rowOffset * (row+1));
if(i != 0 && i != lastThread) // branch divergence on first and last thread
{
b[id]= df[1][i]/2.0 + df[1][i+1]/8.0 + df[1][i-1]/8.0 + df[0][i]/8.0 + df[2][i]/8.0;
}
//move shared memory to load the next row.
df[0][i] = df[1][i];
df[1][i] = df[2][i];
}
}
}
void fd()
{
double *a_host = new double[SIZE*SIZE];
double *a_dev, *b_dev;
memset(a_host,0,sizeof(double)*SIZE*SIZE);
size_t ind(0);
const size_t ITER_SIZE = 10;
for(; ind < SIZE; ++ind)
{
a_host[ind] = 1.0; // Top boundary
a_host[ind*SIZE] = 3.0; // Top boundary
a_host[ind*SIZE + SIZE - 1] = 2.0; // Top boundary
a_host[SIZE * (SIZE-1) + ind] = 4.0; // Top boundary
}
// Unnecessary copy fron a to b
// b=a
// Allocate device memory
checkCuda( cudaMalloc( (void**)&a_dev, sizeof(double) * SIZE * SIZE ));
checkCuda( cudaMalloc( (void**)&b_dev, sizeof(double) * SIZE * SIZE ));
// Transfer device memory
checkCuda( cudaMemcpy(a_dev, a_host, sizeof(double) * SIZE * SIZE, cudaMemcpyHostToDevice) );
checkCuda( cudaMemcpy(b_dev, a_host, sizeof(double) * SIZE * SIZE, cudaMemcpyHostToDevice) );
dim3 grid( ceil(SIZE/(float)(BLOCK_SIZE-STENCIL_SIZE+1)) ), block(BLOCK_SIZE);
for (size_t iter = 0; iter < ITER_SIZE; ++iter)
if( iter % 2 == 0) // switcher to avoid memory copy between a_dev and b_dev
fd_kernel<<<grid,block>>>(a_dev,b_dev);
else
fd_kernel<<<grid,block>>>(b_dev,a_dev);
checkCuda( cudaPeekAtLastError() );
if((ITER_SIZE-1) % 2 == 0)
checkCuda( cudaMemcpy(a_host, b_dev, sizeof(double) * SIZE * SIZE, cudaMemcpyDeviceToHost) );
else
checkCuda( cudaMemcpy(a_host, a_dev, sizeof(double) * SIZE * SIZE, cudaMemcpyDeviceToHost) );
std::cout <<a_host[4 + (4*SIZE)] << " " <<
a_host[999 + (999*SIZE)] << " " <<
a_host[9994 + (9994*SIZE)] << "\n";
}
int main()
{
cudaDeviceProp prop;
checkCuda( cudaGetDeviceProperties(&prop, 0) );
std::cout << "\nDevice Name: \n" << prop.name;
std::cout << "Compute Capability: " << prop.major << "." << prop.minor << "\n";
fd();
return 0;
} |
10,415 | #include <cstdlib>
#include <sys/time.h>
#include <math.h>
#include <stdio.h>
#include <assert.h>
void writeBMP(const int x, const int y, const unsigned char* const bmp, const char* const name) {
const unsigned char bmphdr[54] = {66, 77, 255, 255, 255, 255, 0, 0, 0, 0, 54, 4, 0, 0, 40, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 1, 0, 8, 0, 0, 0, 0, 0, 255, 255, 255, 255, 196, 14, 0, 0, 196, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
unsigned char hdr[1078];
int i, j, c, xcorr, diff;
FILE* f;
xcorr = (x + 3) >> 2 << 2; // BMPs have to be a multiple of 4 pixels wide
diff = xcorr - x;
for (i = 0; i < 54; i++) hdr[i] = bmphdr[i];
*((int*)(&hdr[18])) = xcorr;
*((int*)(&hdr[22])) = y;
*((int*)(&hdr[34])) = xcorr * y;
*((int*)(&hdr[2])) = xcorr * y + 1078;
for (i = 0; i < 256; i++) {
j = i * 4 + 54;
hdr[j+0] = i; // blue ColorTable
hdr[j+1] = 0; // green
hdr[j+2] = 0; // red
hdr[j+3] = 0; // dummy
}
f = fopen(name, "wb"); assert(f != NULL);
c = fwrite(hdr, 1, 1078, f); assert(c == 1078);
if (diff == 0) {
c = fwrite(bmp, 1, x * y, f); assert(c == x * y);
} else {
*((int*)(&hdr[0])) = 0; // need up to three zero bytes
for (j = 0; j < y; j++) {
c = fwrite(&bmp[j * x], 1, x, f); assert(c == x);
c = fwrite(hdr, 1, diff, f); assert(c == diff);
}
}
fclose(f);
}
__global__ void buildPicture(int frames, unsigned char *pic) {
int row = blockIdx.x;
int col = threadIdx.x;
int width = blockDim.x;
for (int frame = 0; frame < frames; frame++) {
float fx = col - 1024 / 2;
float fy = row - 1024 / 2;
float d = sqrtf(fx * fx + fy * fy);
unsigned char color = (unsigned char)(160.0f + 127.0f *
cos(d / 10.0f - frame / 7.0f) /
(d / 50.0f + 1.0f));
pic[frame * width * width + row * width + col] = (unsigned char)color;
}
}
int main(int argc, char *argv[]) {
// check command line
if (argc != 3) {
fprintf(stderr, "usage: %s frame_width num_frames\n", argv[0]);
exit(-1);
}
int width = atoi(argv[1]);
if (width < 100) {
fprintf(stderr, "error: frame_width must be at least 100\n");
exit(-1);
}
int frames = atoi(argv[2]);
if (frames < 1) {
fprintf(stderr, "error: num_frames must be at least 1\n");
exit(-1);
}
printf("computing %d frames of %d by %d picture\n", frames, width, width);
// allocate picture array
int N = frames * width * width;
unsigned char *pic = new unsigned char[N];
cudaMallocManaged(&pic, N*sizeof(unsigned char));
// static value for blockSize, so we avoid problems if the width gets high
int blockSize = 128;
/**
* each line has (width / blockSize) blocks. We have 'width' lines. So,
* the total number of blocks is given by: width * (width / blockSize)
*/
int numBlocks = width * (width / blockSize);
buildPicture<<<numBlocks, blockSize>>>(frames, pic);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// verify result by writing frames to BMP files
if ((width <= 256) & (frames <= 100)) {
for (int frame = 0; frame < frames; frame++) {
char name[32];
sprintf(name, "wave%d.bmp", frame + 1000);
writeBMP(width, width, &pic[frame * width * width], name);
}
}
cudaFree(pic);
return 0;
}
|
10,416 | #include "window.cuh"
// counts the number of pixels in the window
__device__ unsigned int count_window_pixels(unsigned int index, unsigned int height, unsigned int width, int window_radius) {
int y, x;
unsigned int y_min, y_max, x_min, x_max;
y = index / width;
x = index % width;
y_min = MAX(0, y - window_radius);
x_min = MAX(0, x - window_radius);
y_max = MIN(height, y + window_radius + 1);
x_max = MIN(width, x + window_radius + 1);
return (y_max - y_min) * (x_max - x_min);
}
// returns the min value across the rgb channels for all pixels in the window
__device__ float find_window_min(unsigned int index, pixel_t *image_pixels, unsigned int height, unsigned int width, int window_radius) {
float min;
int y, x;
unsigned int y_min, y_max, x_min, x_max;
unsigned int i, j;
pixel_t pixel;
y = index / width;
x = index % width;
y_min = MAX(0, y - window_radius);
x_min = MAX(0, x - window_radius);
y_max = MIN(height, y + window_radius + 1);
x_max = MIN(width, x + window_radius + 1);
// min across window
min = image_pixels[y_min * width + x_min].r;
for (i = y_min; i < y_max; i++) {
for (j = x_min; j < x_max; j++) {
pixel = image_pixels[i * width + j];
min = MIN(min, MIN(pixel.r, MIN(pixel.g, pixel.b)));
}
}
return min;
}
// computes the mean for the rgb color channels across the pixels in the window
__device__ void compute_window_mean(pixel_t *mean, unsigned int index, pixel_t *pixels, unsigned int height, unsigned int width, int window_radius) {
int y, x;
unsigned int y_min, y_max, x_min, x_max;
unsigned int i, j;
unsigned int num_pixels;
pixel_t pixel;
y = index / width;
x = index % width;
y_min = MAX(0, y - window_radius);
x_min = MAX(0, x - window_radius);
y_max = MIN(height, y + window_radius + 1);
x_max = MIN(width, x + window_radius + 1);
mean->r = 0;
mean->g = 0;
mean->b = 0;
for (i = y_min; i < y_max; i++) {
for (j = x_min; j < x_max; j++) {
pixel = pixels[i * width + j];
mean->r += pixel.r;
mean->g += pixel.g;
mean->b += pixel.b;
mean->a = PIXEL_MAX_VALUE;
}
}
num_pixels = count_window_pixels(index, height, width, window_radius);
mean->r /= num_pixels;
mean->g /= num_pixels;
mean->b /= num_pixels;
}
// computes the variance for the rgb color channels across the pixels in the window
__device__ void compute_window_variance(pixel_t *variance, pixel_t *mean, unsigned int index, pixel_t *pixels, unsigned int height, unsigned int width, int window_radius) {
int y, x;
unsigned int y_min, y_max, x_min, x_max;
unsigned int i, j;
unsigned int num_pixels;
pixel_t pixel;
y = index / width;
x = index % width;
y_min = MAX(0, y - window_radius);
x_min = MAX(0, x - window_radius);
y_max = MIN(height, y + window_radius + 1);
x_max = MIN(width, x + window_radius + 1);
variance->r = 0;
variance->g = 0;
variance->b = 0;
for (i = y_min; i < y_max; i++) {
for (j = x_min; j < x_max; j++) {
pixel = pixels[i * width + j];
variance->r += (pixel.r - mean->r) * (pixel.r - mean->r);
variance->g += (pixel.g - mean->g) * (pixel.g - mean->g);
variance->b += (pixel.b - mean->b) * (pixel.b - mean->b);
variance->a = PIXEL_MAX_VALUE;
}
}
num_pixels = count_window_pixels(index, height, width, window_radius);
variance->r /= (num_pixels - 1);
variance->g /= (num_pixels - 1);
variance->b /= (num_pixels - 1);
}
// computes the dot product for a window across two sets of pixels
__device__ void compute_window_dot_product(pixel_t *dot_product, unsigned int index, pixel_t *pixels_X, pixel_t *pixels_Y, unsigned int height, unsigned int width, int window_radius) {
int y, x;
unsigned int y_min, y_max, x_min, x_max;
unsigned int i, j;
pixel_t pixel_x, pixel_y;
y = index / width;
x = index % width;
y_min = MAX(0, y - window_radius);
x_min = MAX(0, x - window_radius);
y_max = MIN(height, y + window_radius + 1);
x_max = MIN(width, x + window_radius + 1);
dot_product->r = 0;
dot_product->g = 0;
dot_product->b = 0;
for (i = y_min; i < y_max; i++) {
for (j = x_min; j < x_max; j++) {
pixel_x = pixels_X[i * width + j];
pixel_y = pixels_Y[i * width + j];
dot_product->r += pixel_x.r * pixel_y.r;
dot_product->g += pixel_x.g * pixel_y.g;
dot_product->b += pixel_x.b * pixel_y.b;
dot_product->a = PIXEL_MAX_VALUE;
}
}
}
|
10,417 | #include "includes.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
}
#endif
__global__ void mandelbrot(int* A, const int N, const int largeur, const int hauteur, const int start_hauteur, const int end_hauteur){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int y = idx / hauteur;
int x = idx - (y * largeur);
if (y < (end_hauteur-start_hauteur) && x < largeur)
{
int cpt = 0;
float x1 = 0.;
float y1 = 0.;
float x2 = 0.;
float y2 = 0.;
float a = 4. * x / largeur - 2.;
float b = 4. * (y+start_hauteur) / hauteur - 2.;
float val = x1* x1 + y1 * y1;
while (cpt < N && val <= 4.)
{
cpt ++;
x2 = x1* x1 - y1 * y1 + a;
y2 = 2. * x1 * y1 + b;
x1 = x2;
y1 = y2;
val = x1* x1 + y1 * y1;
}
A[y*hauteur+x] = cpt;
}
} |
10,418 | #include "includes.h"
__global__ void exclusivePrefixAdd(unsigned int* d_in, unsigned int* d_out)
{
//Belloch implementation
//NOTE: this is set up specifically for 1 block of 1024 threads
int thread_x = threadIdx.x;
d_out[thread_x] = d_in[thread_x];
__syncthreads();
//first, do the reduce:
for (unsigned int i = 2; i <= blockDim.x; i <<= 1)
{
if ((thread_x + 1) % i == 0)
{
d_out[thread_x] = d_out[thread_x] + d_out[thread_x - i / 2];
}
__syncthreads();
}
//now do the downsweep part:
if (thread_x == blockDim.x - 1)
{
d_out[thread_x] = 0;
}
//maybe need a syncthreads() here because of that write above? it's only 1 thread so idk if it affects it
for (unsigned int i = blockDim.x; i >= 2; i >>= 1)
{
if ((thread_x + 1) % i == 0)
{
unsigned int temp = d_out[thread_x - (i / 2)];
//the "left" copy
d_out[thread_x - (i / 2)] = d_out[thread_x];
//and the "right" operation
d_out[thread_x] = temp + d_out[thread_x];
}
__syncthreads();
}
} |
10,419 | __device__ float backwardSigmoid (float forward, float chain)
{
return forward * (1.0 - forward) * chain;
}
extern "C"
__global__ void backwardSigmoidKernel (int length, float *forward, float *chain, float *destination)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
destination[index] = backwardSigmoid(forward[index], chain[index]);
}
} |
10,420 | // #include <stdio.h>
// // #include <string.h>
// // #include <stdlib.h>
// #include <thrust/host_vector.h>
// #include <thrust/device_vector.h>
// #include <cuda.h>
// #include <cuda_runtime.h>
// // (char* T, char* BWT, int* SA, int n) {
// __global__ void test(char *A, char *B, int *C,int n) {
// int i = blockIdx.x * blockDim.x + threadIdx.x;
// if (i >= n) return;
// B[i] = C[i] == 0 ? '$' : A[C[i] - 1];
// }
// int main () {
// const int n = 12;
// char A[] = "mississippi$";
// thrust::host_vector<char> h_B(n);
// thrust::device_vector<char> d_B = h_B;
// // char *T = (char *)malloc((n + 1) * sizeof(char));
// // for (int i = 0; i < n; i++) {
// // T[i] = "mississippi$"[i];
// // }
// // char T[] = "mississippi$";
// // int SA_tmp[] = {11,10,7,4,1,0,9,8,6,3,5,2};
// // int *SA = (int *)malloc(n * sizeof(int));
// // for (int i = 0; i < n; i++) {
// // SA[i] = SA_tmp[i];
// // }
// // int SA[] = {11,10,7,4,1,0,9,8,6,3,5,2};
// int C[] = {11,10,7,4,1,0,9,8,6,3,5,2};
// // d_B = h_B;
// dim3 block(8, 1);
// dim3 grid((n + block.x - 1) / block.x, 1);
// char *pd_B = thrust::raw_pointer_cast(&d_B[0]);
// // test<<< grid, block >>>(T, pd_B, SA, n);
// test<<< grid, block >>>(A, pd_B, C, n);
// h_B = d_B;
// for (int i = 0; i < n; i++) {
// printf("%c ", h_B[i]);
// }
// }
#include <stdio.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void test(char* A, char* B, int* C, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n) return;
B[i] = C[i] != 0 ? A[C[i] - 1] : '$';
}
int main() {
const int n = 12;
char A[] = "mississippi$";
int C[] = { 11,10,7,4,1,0,9,8,6,3,5,2 };
thrust::device_vector<char> d_A(A, A+n);
thrust::device_vector<char> d_B(n);
thrust::device_vector<int> d_C(C, C+n);
dim3 block(8, 1);
dim3 grid((n + block.x - 1) / block.x, 1);
test<<<grid, block>>>(
thrust::raw_pointer_cast(&d_A[0]),
thrust::raw_pointer_cast(&d_B[0]),
thrust::raw_pointer_cast(&d_C[0]),
n);
// thrust::host_vector<char> h_B = d_B;
thrust::host_vector<char> h_B;
h_B = d_B;
for (int i = 0; i < n; i++) {
printf("%c ", h_B[i]);
}
}
|
10,421 | #include <stdio.h>
#include <time.h>
// by lectures and "CUDA by Example" book
#define ind(i, j, cols) (i * cols + j)
struct dim2 {
int rows;
int cols;
};
// device code: matrices mult calculation
__global__ void mult_matrices_kernel(int* m1, int* m2, int* m3, dim2 m3_dims, int inner_dim) {
int rows = m3_dims.rows;
int cols = m3_dims.cols;
// row and col that correspond to current thread
// process all elements that correspond to current thread
for (int i = blockIdx.y * blockDim.y + threadIdx.y;
i < rows; i += blockDim.y * gridDim.y)
for (int j = blockIdx.x * blockDim.x + threadIdx.x;
j < cols; j += blockDim.x * gridDim.x) {
int index_3 = ind(i, j, cols);
m3[index_3] = 0;
// iterating over row and down column
for (int k = 0; k < inner_dim; k++) {
int index_1 = ind(i, k, inner_dim);
int index_2 = ind(k, j, cols);
m3[index_3] += m1[index_1] * m2[index_2];
}
}
}
int* cuda_copy_mat(int* host_m, dim2 m_dims) {
int* dev_m;
// size of memory to allocate on device for matrix
long mem_size = m_dims.rows * m_dims.cols * sizeof(int);
// device memory allocation
cudaMalloc((void**) &dev_m, mem_size);
// copying data from host to device
cudaMemcpy(dev_m, host_m, mem_size, cudaMemcpyHostToDevice);
// returning pointer
return dev_m;
}
// host code: preparation
float mult_matrices_gpu(int* host_m1, dim2 m1_dims,
int* host_m2, dim2 m2_dims,
int* host_m3, dim2 m3_dims) {
// Step 1: moving data on device
int* dev_m1 = cuda_copy_mat(host_m1, m1_dims);
int* dev_m2 = cuda_copy_mat(host_m2, m3_dims);
int* dev_m3 = cuda_copy_mat(host_m3, m3_dims);
// Step 2
// grid (of blocks) dimensions
dim3 grid_dim(128, 128, 1);
// block (of threads) dimensions
dim3 block_dim(32, 32, 1);
// running kernel multiplication code
clock_t start = clock();
mult_matrices_kernel<<<grid_dim, block_dim>>>(dev_m1, dev_m2, dev_m3, m3_dims, m1_dims.cols);
cudaDeviceSynchronize();
clock_t end = clock();
float time = (float)(end - start) / CLOCKS_PER_SEC;
// Step 3
// copying result from device to host matrix
cudaMemcpy(host_m3, dev_m3, m3_dims.rows * m3_dims.cols * sizeof(int), cudaMemcpyDeviceToHost);
// freeing device memory
cudaFree(dev_m1);
cudaFree(dev_m2);
cudaFree(dev_m3);
return time;
}
float mult_matrices_cpu(int* m1, dim2 m1_dims,
int* m2, dim2 m2_dims,
int* m3, dim2 m3_dims) {
clock_t start = clock();
for (int i = 0; i < m1_dims.rows; i++)
for (int j = 0; j < m2_dims.cols; j++) {
int index_3 = ind(i, j, m3_dims.cols);
m3[index_3] = 0;
for (int k = 0; k < m1_dims.cols; k++) {
int index_1 = ind(i, k, m1_dims.cols);
int index_2 = ind(k, j, m2_dims.cols);
m3[index_3] += m1[index_1] * m2[index_2];
}
}
clock_t end = clock();
float time = (float)(end - start) / CLOCKS_PER_SEC;
return time;
}
// create matrix (array representation)
int* create_mat(dim2 dims, int k) {
int rows = dims.rows;
int cols = dims.cols;
int* mat = (int*)malloc(rows * cols * sizeof(int));
for (int i = 0; i < rows; i++)
for (int j = 0; j < cols; j++)
mat[ind(i, j, cols)] = k * (ind(i, j, cols) + 1);
return mat;
}
int main() {
for (double dim = 1e2; dim <= 1e4; dim *= 2) {
// first matrix
int m1_rows = int(dim);
int inner_dim = int(dim);
struct dim2 m1_dims = {m1_rows, inner_dim};
int* host_m1 = create_mat(m1_dims, 1);
// second matrix
int m2_cols = int(dim);
struct dim2 m2_dims = {inner_dim, m2_cols};
int* host_m2 = create_mat(m2_dims, 2);
// result matrix
struct dim2 m3_dims = {m1_dims.rows, m2_dims.cols};
int* host_m3 = create_mat(m3_dims, 0);
// multiplication
float gpu_time = mult_matrices_gpu(host_m1, m1_dims,
host_m2, m2_dims,
host_m3, m3_dims);
float cpu_time = mult_matrices_cpu(host_m1, m1_dims,
host_m2, m2_dims,
host_m3, m3_dims);
printf("Matrix shapes: (%.1e, %.1e) and (%.1e, %.1e)\n",
double(m1_dims.rows), double(m1_dims.cols),
double(m2_dims.rows), double(m2_dims.cols));
printf("CPU time: %.10f\n", cpu_time);
printf("GPU time: %.10f\n", gpu_time);
printf("CPU / GPU time: %.10f\n", cpu_time / gpu_time);
printf("--------------------------\n");
}
return 0;
} |
10,422 | // Copyright 2020 Marcel Wagenländer
#include <iostream>
#define SIZE 256
__global__ void add(float *a, float *b, int N) {
int i = threadIdx.x;
a[i] = a[i] + b[i];
}
void print_array(float *a, int N) {
for (int i = 0; i < N; i = i + 1) {
std::cout << a[i] << "\n";
}
}
void init_array(float *a, int N, float value) {
for (int i = 0; i < N; i = i + 1) {
a[i] = value * i;
}
}
int main() {
float *a, *b;
cudaMallocManaged(&a, SIZE * sizeof(float));
cudaMallocManaged(&b, SIZE * sizeof(float));
init_array(a, SIZE, 1);
init_array(b, SIZE, 2);
add<<<1, SIZE>>>(a, b, SIZE);
cudaDeviceSynchronize();
print_array(a, SIZE);
cudaFree(a);
cudaFree(b);
return 0;
}
|
10,423 |
#include <iostream>
#include <fstream>
#include <string>
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
__global__ void vector_add_kernel(const int* a, const int* b, int *c, int N){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
c[idx] = a[idx] + b[idx];
}
void print_to_file(const char* file_name, const int* a, const int *b, const int *c, int N){
std::ofstream fout(file_name);
if (fout.is_open()){
for (int i = 0; i < N; i++){
fout << a[i] << " " << b[i] << " " << c[i] << "\n";
}
fout.close();
}
else {
std::cout << "Unable to open file\n";
}
}
int main(){
int N = 512 * 32;
size_t size = N * sizeof(int);
int *a = new int[N];
int *b = new int[N];
int *c = new int[N];
for (int i = 0; i < N; i++) {
a[i] = rand() % 10 + 1;
b[i] = rand() % 10 + 1;
c[i] = 0;
}
print_to_file("input.txt", a, b, c, N);
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void **)&dev_a, size);
cudaMalloc((void **)&dev_b, size);
cudaMalloc((void **)&dev_c, size);
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, c, size, cudaMemcpyHostToDevice);
dim3 block(512, 1, 1);
dim3 grid(32, 1, 1);
vector_add_kernel<<<grid, block>>> (dev_a, dev_b, dev_c, N);
cudaMemcpy(a, dev_a, size, cudaMemcpyDeviceToHost);
cudaMemcpy(b, dev_b, size, cudaMemcpyDeviceToHost);
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
print_to_file("output.txt", a, b, c, N);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
delete[] a;
delete[] b;
delete[] c;
return 0;
} |
10,424 | #include <iostream>
#include <fstream>
#include <string>
#include <stdio.h>
#include <stdlib.h>
using namespace std;
#include <iterator>
#include <algorithm>
#include <random>
#include <math.h>
// Cada thread deberia calcular la carga de 1 punto
__global__ void calcular_carga(float* iones_x, float* iones_y, float* cargas, int cantidad) {
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if(tId < 8192*8192) {
float x = tId%8192;
float y = tId/8192;
float carga = 0;
float distancia;
float x_2, y_2;
for (int i = 0; i < cantidad; i++) {
x_2 = (x - iones_x[i]) * (x - iones_x[i]);
y_2 = (y - iones_y[i]) * (y - iones_y[i]);
distancia = sqrt(x_2 + y_2);
carga += distancia != 0 ? 1.0 / distancia : 1;
}
cargas[tId] = carga;
}
}
// cada thread calcula la menor carga en su fila y la guarda
__global__ void calcular_carga_fila(float* iones_x, float* iones_y, float* cargas, float*cargas_menores, int cantidad) {
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if(tId < 8192) {
float Q_menor = cargas[tId*8192];
float y = tId;
float x;
for (int i = tId*8192; i < tId*8192+8192; i++) {
if(cargas[i] <Q_menor){
Q_menor = cargas[i];
x = i%8192;
}
}
cargas_menores[tId*3] = Q_menor;
cargas_menores[tId*3+1] = x;
cargas_menores[tId*3+2] = y;
}
}
// Calculamos entre todas la menor y ponemos la carga ahí
__global__ void posicionar_ion(float* iones_x, float* iones_y, float*cargas_menores, int cantidad) {
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if(tId < 1) {
float Q_menor = cargas_menores[0];
float x = cargas_menores[1];
float y = cargas_menores[2];
for (int i = 0; i < 8192*3; i+=3) {
if(cargas_menores[i] < Q_menor){
Q_menor = cargas_menores[i];
x = cargas_menores[i+1];
y = cargas_menores[i+2];
}
// printf("%f %f %f %f\n", cargas_menores[i], Q_menor, cargas_menores[i+1], cargas_menores[i+2]);
}
iones_x[cantidad] = x;
iones_y[cantidad] = y;
}
}
int main(int argc, char const *argv[])
{
float *gpu_cargas, *cargas_menores, *gpu_iones_x, *gpu_iones_y, *iones_x, *iones_y;
cudaEvent_t ct1, ct2;
float dt;
int cantidad;
iones_x = new float[6000];
iones_y = new float[6000];
int block_size = 256;
int grid_size = (int) ceil( (float) 8192*8192 / block_size);
int grid_size_b = (int) ceil( (float) 8192 / block_size);
int grid_size_c = (int) ceil( (float) 1 / block_size);
FILE *in = fopen("dataset", "r");
for (int i = 0; i < 5000; i++)
{
fscanf(in, "%f %f", &iones_x[i], &iones_y[i]);
}
cudaMalloc(&gpu_iones_x, sizeof(float) * 6000);
cudaMalloc(&gpu_iones_y, sizeof(float) * 6000);
cudaMalloc(&gpu_cargas, sizeof(float) * 8192 * 8192);
cudaMalloc(&cargas_menores, sizeof(float) * 8192*3);
cudaMemcpy(gpu_iones_x, iones_x ,sizeof(float) * 6000, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_iones_y, iones_y ,sizeof(float) * 6000, cudaMemcpyHostToDevice);
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
cudaEventRecord(ct1);
for (cantidad = 5000; cantidad < 5010; cantidad++)
{
calcular_carga<<<grid_size, block_size>>>(gpu_iones_x, gpu_iones_y, gpu_cargas, cantidad);
cudaDeviceSynchronize();
calcular_carga_fila<<<grid_size_b, block_size>>>(gpu_iones_x, gpu_iones_y, gpu_cargas, cargas_menores, cantidad);
cudaDeviceSynchronize();
posicionar_ion<<<grid_size_c, block_size>>>(gpu_iones_x, gpu_iones_y, cargas_menores, cantidad);
cudaDeviceSynchronize();
cudaMemcpy(iones_x, gpu_iones_x,sizeof(float) * 6000, cudaMemcpyDeviceToHost);
cudaMemcpy(iones_y, gpu_iones_y,sizeof(float) * 6000, cudaMemcpyDeviceToHost);
cout << iones_x[cantidad] << " " << iones_y[cantidad] << endl;
}
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
cout << "Tiempo: " << dt << "[ms]" << '\n';
cudaFree(gpu_iones_x);
cudaFree(gpu_iones_y);
cudaFree(gpu_cargas);
cudaFree(cargas_menores);
cudaFree(gpu_iones_x);
cudaFree(gpu_iones_y);
delete iones_x;
delete iones_y;
return 0;
} |
10,425 | #include <stdio.h>
__global__ void add( int* a,int* b, int*c )
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
c[tid] = a[tid] + b[tid];
}
int main()
{
const int thread_per_block = 32; //Max 512 per block
const int number_of_block = 1; //Max 65,535 per grid
int size = thread_per_block * number_of_block;
int *host_a,*host_b,*host_c;
host_a = (int*)malloc(sizeof(int)*size);
host_b = (int*)malloc(sizeof(int) * size );
host_c = (int*)malloc(sizeof(int) * size );
for(int i=0;i<size;i++)
{
host_a[i]=i;
host_b[i]=i;
host_c[i]=0;
}
int *device_a;
int *device_b;
int *device_c;
cudaMalloc((void**)&device_a,size*sizeof(int));
cudaMalloc((void**)&device_b,size*sizeof(int));
cudaMalloc((void**)&device_c,size*sizeof(int));
cudaMemcpy(device_a, host_a,size*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(device_b, host_b,size*sizeof(int), cudaMemcpyHostToDevice);
add<<<number_of_block,thread_per_block >>>(device_a,device_b,device_c);
cudaMemcpy(host_c,device_c,size*sizeof(int),cudaMemcpyDeviceToHost);
// for(int i=0;i<number_of_block;i++) printf("ARRAY[%d] = %d\n",i*thread_per_block,host_c[i*thread_per_block]);
for(int i=0;i<size;i++)
printf("ARRAY[%d] = %d\n",i,host_c[i]);
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_c);
free(host_a);
free(host_b);
free(host_c);
return 0;
}
|
10,426 | #include <cuda.h>
#include <stdio.h>
#include <sys/time.h>
double wtime()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec + tv.tv_usec * 1E-6;
}
__global__ void gTest(float* a)
{
a[threadIdx.x + blockDim.x * blockIdx.x]
= (float)(threadIdx.x + blockDim.x * blockIdx.x);
}
void firstStart()
{
float *da, *ha;
int num_of_blocks = 2, threads_per_block = 32;
int N = num_of_blocks * threads_per_block;
ha = (float*) calloc(N, sizeof(float));
cudaMalloc((void**) &da, N * sizeof(float));
gTest<<<dim3(num_of_blocks), dim3(threads_per_block)>>>(da);
cudaDeviceSynchronize();
cudaMemcpy(ha, da, N * sizeof(float), cudaMemcpyDeviceToHost);
for(int i = 0; i < N; i++)
printf("%g\n", ha[i]);
free(ha);
cudaFree(da);
}
__global__ void gSGEVV(float* a, float* b, float* c)
{
c[threadIdx.x + blockDim.x * blockIdx.x] =
a[threadIdx.x + blockDim.x * blockIdx.x] +
b[threadIdx.x + blockDim.x * blockIdx.x];
}
__global__ void gInitArray(float* a, float x)
{
a[threadIdx.x + blockDim.x * blockIdx.x] = x;
}
__global__ void gSGEVV_iter(float* a, float* b, float* c, int n, int N)
{
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < n; i += N)
c[i] = a[i] + b[i];
}
__global__ void gInitArray_iter(float* a, float x, int n, int N)
{
for (int i = threadIdx.x + blockDim.x * blockIdx.x; i < n; i += N)
a[i] = x;
}
#define CUDA_CHECK_RETURN(value)\
{\
cudaError_t _m_cudaStat = value;\
if (_m_cudaStat != cudaSuccess) {\
fprintf(stderr, "Error %s at line %d in file %s\n",\
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\
exit(1);\
}\
}
int main(int argc, char *argv[])
{
float *a, *b, *c, *d;
int num_of_blocks = (argc > 1) ? atoi(argv[1]) : 1024;
// int threads_per_block = (argc > 2) ? atoi(argv[2]) : 32;
int threads_per_block = 1;
int N = num_of_blocks * threads_per_block;
int size_array;
double time;
float elapsedTime;
cudaEvent_t stop, start;
cudaEventCreate(&start);
cudaEventCreate(&stop);
#if 0
size_array = N;
cudaMalloc((void**) &a, size_array * sizeof(float));
cudaMalloc((void**) &b, size_array * sizeof(float));
cudaMalloc((void**) &c, size_array * sizeof(float));
d = (float*) calloc(size_array, sizeof(float));
for (int i = num_of_blocks; threads_per_block <= 128; i /= 2, threads_per_block *= 2) {
gInitArray<<<dim3(i), dim3(threads_per_block)>>>(a, 1.0);
gInitArray<<<dim3(i), dim3(threads_per_block)>>>(b, 2.0);
time = wtime();
gSGEVV<<<dim3(i), dim3(threads_per_block)>>>(a, b, c);
cudaDeviceSynchronize();
time = wtime() - time;
cudaMemcpy(d, c, size_array * sizeof(float), cudaMemcpyDeviceToHost);
if (argc > 2 && atoi(argv[2]) == 1) {
for (int j = 0; j < N; j++)
printf("%g ", d[j]);
printf("\n");
}
printf("Blocks: %d,\tThreads: %d,\t", i, threads_per_block);
printf("Time: %.8f sec.\n", time);
gInitArray<<<dim3(i), dim3(threads_per_block)>>>(c, 0.0);
}
free(d);
cudaFree(a);
cudaFree(b);
cudaFree(c);
#endif
#if 1
int rank = (argc > 3) ? atoi(argv[3]) : 10;
// size_array = 1 << rank;
size_array = 1 << 10;
CUDA_CHECK_RETURN(cudaMalloc((void**) &a, size_array * sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void**) &b, size_array * sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void**) &c, size_array * sizeof(float)));
d = (float*) calloc(size_array, sizeof(float));
printf("Size vector: %d(10^%d)\n", size_array, rank);
// int blocks = num_of_blocks;
// int threads = threads_per_block;
int blocks = 1;
int threads = 1025;
// for (; threads <= 128; blocks /= 2, threads *= 2) {
gInitArray_iter<<<dim3(blocks), dim3(threads)>>>(a, 1.0, size_array, N);
gInitArray_iter<<<dim3(blocks), dim3(threads)>>>(b, 2.0, size_array, N);
time = wtime();
cudaEventRecord(start, 0);
gSGEVV_iter<<<dim3(blocks), dim3(threads)>>>(a, b, c, size_array, N);
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
CUDA_CHECK_RETURN(cudaGetLastError());
time = wtime() - time;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
CUDA_CHECK_RETURN(cudaMemcpy(d, c, size_array * sizeof(float), cudaMemcpyDeviceToHost));
if (argc > 2 && atoi(argv[2]) == 1) {
for (int j = 0; j < size_array; j++)
printf("%g ", d[j]);
printf("\n");
}
printf("Blocks: %d\tThreads: %d\t", blocks, threads);
// printf("Time: %.8f sec.\t", time);
printf("Time(2): %.8f sec.\n", elapsedTime);
gInitArray_iter<<<dim3(blocks), dim3(threads)>>>(c, 0.0, size_array, N);
// }
free(d);
cudaFree(a);
cudaFree(b);
cudaFree(c);
#endif
cudaEventDestroy(start);
cudaEventDestroy(stop);
#if 0
FILE *out_1024_1 = fopen("1024_1.txt", "w");
FILE *out_512_2 = fopen("512_2.txt", "w");
FILE *out_128_8 = fopen("128_8.txt", "w");
FILE *out_32_32 = fopen("32_32.txt", "w");
FILE *out_8_128 = fopen("8_128.txt", "w");
for (int rank = 10; rank <= 23; rank++) {
size_array = 1 << rank;
cudaMalloc((void**) &a, size_array * sizeof(float));
cudaMalloc((void**) &b, size_array * sizeof(float));
cudaMalloc((void**) &c, size_array * sizeof(float));
d = (float*) calloc(size_array, sizeof(float));
printf("Size vector: %d(10^%d)\n", size_array, rank);
int blocks = num_of_blocks;
int threads = threads_per_block;
for (; threads <= 128; blocks /= 2, threads *= 2) {
gInitArray_iter<<<dim3(blocks), dim3(threads)>>>(a, 1.0, size_array, N);
gInitArray_iter<<<dim3(blocks), dim3(threads)>>>(b, 2.0, size_array, N);
time = wtime();
gSGEVV_iter<<<dim3(blocks), dim3(threads)>>>(a, b, c, size_array, N);
cudaDeviceSynchronize();
time = wtime() - time;
cudaMemcpy(d, c, size_array * sizeof(float), cudaMemcpyDeviceToHost);
if (argc > 2 && atoi(argv[2]) == 1) {
for (int j = 0; j < size_array; j++)
printf("%g ", d[j]);
printf("\n");
}
printf("Blocks: %d\tThreads: %d\t", blocks, threads);
printf("Time: %.8f sec.\n", time);
switch (threads) {
case 1:
fprintf(out_1024_1, "%d %.8f\n", size_array, time);
break;
case 2:
fprintf(out_512_2, "%d %.8f\n", size_array, time);
break;
case 8:
fprintf(out_128_8, "%d %.8f\n", size_array, time);
break;
case 32:
fprintf(out_32_32, "%d %.8f\n", size_array, time);
break;
case 128:
fprintf(out_8_128, "%d %.8f\n", size_array, time);
break;
default:
break;
}
gInitArray_iter<<<dim3(blocks), dim3(threads)>>>(c, 0.0, size_array, N);
}
free(d);
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
fclose(out_1024_1);
fclose(out_512_2);
fclose(out_128_8);
fclose(out_32_32);
fclose(out_8_128);
#endif
#if 0
time = wtime();
gSGEVV<<<dim3(num_of_blocks), dim3(threads_per_block)>>>(a, b, c);
cudaDeviceSynchronize();
time = wtime() - time;
cudaMemcpy(d, c, size_array * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
printf("%g\n", d[i]);
printf("Blocks: %d, Threads: %d, ", num_of_blocks, threads_per_block);
printf("Time: %.6f sec.\n", time);
#endif
#if 0
firstStart();
free(d);
cudaFree(a);
cudaFree(b);
cudaFree(c);
#endif
return 0;
}
|
10,427 | #include "includes.h"
__device__ void recover_results(short *results, const int search_depth, const int total_dl_matrix_row_num) {
for (int i = threadIdx.x; i < total_dl_matrix_row_num; i = i + blockDim.x) {
if (results[i] == search_depth) {
results[i] = 0;
}
}
}
__global__ void recover_results(int *results, const int search_depth, const int total_dl_matrix_row_num) {
for (int i = threadIdx.x; i < total_dl_matrix_row_num; i = i + blockDim.x) {
if (results[i] == search_depth) {
results[i] = 0;
}
}
} |
10,428 | #define t_max 1
#define t 1
/*
(ux[0][0][0][0][1]=(alpha*(u[1][0][0][0][0]+u[-1][0][0][0][0])))
(uy[0][0][0][0][2]=(beta*(u[0][1][0][0][0]+u[0][-1][0][0][0])))
(uz[0][0][0][0][3]=(gamma*(u[0][0][1][0][0]+u[0][0][-1][0][0])))
*/
__global__ void gradient(float * * ux_1_0_out, float * * uy_2_0_out, float * * uz_3_0_out, float * u_0_0, float * ux_1_0, float * uy_2_0, float * uz_3_0, float alpha, float beta, float gamma, int x_max, int y_max, int z_max, int cbx)
{
/*
float * const u__u_0[16] = { u_0_0 } ;
float * const u__ux_1[16] = { ux_1_0 } ;
float * const u__uy_2[16] = { uy_2_0 } ;
float * const u__uz_3[16] = { uz_3_0 } ;
*/
int _idx0;
int _idx1;
int _idx2;
int _idx3;
int _idx4;
int _idx5;
int _idx6;
int idx_1_2;
int pt_idx_x;
int pt_idx_y;
int pt_idx_z;
int size_1_1;
int size_1_2;
//int t;
int tmp;
int v_idx_x;
int v_idx_x_max;
int v_idx_y;
int v_idx_y_max;
int v_idx_z;
int v_idx_z_max;
/*
Initializations
*/
size_1_1=(y_max/blockDim.y);
size_1_2=(z_max/blockDim.z);
idx_1_2=(blockIdx.y/size_1_2);
tmp=(blockIdx.y-(idx_1_2*size_1_2));
v_idx_x=(cbx*(threadIdx.x+(blockDim.x*blockIdx.x)));
v_idx_x_max=(v_idx_x+cbx);
v_idx_y=(threadIdx.y+(tmp*blockDim.y));
v_idx_y_max=(v_idx_y+1);
v_idx_z=(threadIdx.z+(idx_1_2*blockDim.z));
v_idx_z_max=(v_idx_z+1);
/*
Implementation
*/
/*
for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... }
*/
//for (t=1; t<=t_max; t+=1)
{
/* Index bounds calculations for iterators in v[t=t, s=(cbx, 1, 1)][0] */
/*
for POINT pt[t=t, s=(1, 1, 1)][0] of size [1, 1, 1] in v[t=t, s=(:, :, :)][0] parallel 1 <level 1> schedule default { ... }
*/
{
/* Index bounds calculations for iterators in pt[t=t, s=(1, 1, 1)][0] */
pt_idx_z=v_idx_z;
pt_idx_y=v_idx_y;
for (pt_idx_x=v_idx_x; pt_idx_x<(v_idx_x_max-0); pt_idx_x+=1)
{
/* Index bounds calculations for iterators in pt[t=t, s=(1, 1, 1)][0] */
/*
v[t=(t+1), s=pt[t=?, s=?][0]][0]=stencil(v[t=t, s=pt[t=?, s=?][0]][0])
*/
/* _idx0 = (((((((((pt_idx_z+1)*x_max)+(((2*pt_idx_z)+2)*t))*y_max)+((((((2*pt_idx_z)+2)*t)+pt_idx_y)+1)*x_max))+(((4*pt_idx_z)+4)*(t*t)))+(((2*pt_idx_y)+2)*t))+pt_idx_x)+2) */
_idx0=(((((((((pt_idx_z+1)*x_max)+(((2*pt_idx_z)+2)*t))*y_max)+((((((2*pt_idx_z)+2)*t)+pt_idx_y)+1)*x_max))+(((4*pt_idx_z)+4)*(t*t)))+(((2*pt_idx_y)+2)*t))+pt_idx_x)+2);
/* _idx1 = ((((((((pt_idx_z+1)*x_max)+(((2*pt_idx_z)+2)*t))*y_max)+((((((2*pt_idx_z)+2)*t)+pt_idx_y)+1)*x_max))+(((4*pt_idx_z)+4)*(t*t)))+(((2*pt_idx_y)+2)*t))+pt_idx_x) */
_idx1=(_idx0-2);
/* _idx2 = ((((pt_idx_z*x_max)*y_max)+(pt_idx_y*x_max))+pt_idx_x) */
_idx2=((((_idx1+(((((-2*pt_idx_z)-2)*t)-x_max)*y_max))+(((((-2*pt_idx_z)-2)*t)-1)*x_max))+(((-4*pt_idx_z)-4)*(t*t)))+(((-2*pt_idx_y)-2)*t));
ux_1_0[_idx2]=(alpha*(u_0_0[_idx0]+u_0_0[_idx1]));
/* _idx3 = (((((((((pt_idx_z+1)*x_max)+(((2*pt_idx_z)+2)*t))*y_max)+((((((2*pt_idx_z)+2)*t)+pt_idx_y)+2)*x_max))+(((4*pt_idx_z)+4)*(t*t)))+(((2*pt_idx_y)+4)*t))+pt_idx_x)+1) */
_idx3=(((_idx1+x_max)+(2*t))+1);
/* _idx4 = (((((((((pt_idx_z+1)*x_max)+(((2*pt_idx_z)+2)*t))*y_max)+(((((2*pt_idx_z)+2)*t)+pt_idx_y)*x_max))+(((4*pt_idx_z)+4)*(t*t)))+((2*pt_idx_y)*t))+pt_idx_x)+1) */
_idx4=((_idx3-(2*x_max))-(4*t));
uy_2_0[_idx2]=(beta*(u_0_0[_idx3]+u_0_0[_idx4]));
/* _idx5 = (((((((((pt_idx_z+2)*x_max)+(((2*pt_idx_z)+4)*t))*y_max)+((((((2*pt_idx_z)+4)*t)+pt_idx_y)+1)*x_max))+(((4*pt_idx_z)+8)*(t*t)))+(((2*pt_idx_y)+2)*t))+pt_idx_x)+1) */
_idx5=((((_idx3+((x_max+(2*t))*y_max))+(((2*t)-1)*x_max))+(4*(t*t)))-(2*t));
/* _idx6 = ((((((((pt_idx_z*x_max)+((2*pt_idx_z)*t))*y_max)+(((((2*pt_idx_z)*t)+pt_idx_y)+1)*x_max))+((4*pt_idx_z)*(t*t)))+(((2*pt_idx_y)+2)*t))+pt_idx_x)+1) */
_idx6=((((_idx1+((( - x_max)-(2*t))*y_max))-((2*t)*x_max))-(4*(t*t)))+1);
uz_3_0[_idx2]=(gamma*(u_0_0[_idx5]+u_0_0[_idx6]));
}
}
}
}
__global__ void initialize(float * u_0_0, float * ux_1_0, float * uy_2_0, float * uz_3_0, float alpha, float beta, float gamma, int x_max, int y_max, int z_max, int cbx)
{
float * const u__u_0[16] = { u_0_0 } ;
float * const u__ux_1[16] = { ux_1_0 } ;
float * const u__uy_2[16] = { uy_2_0 } ;
float * const u__uz_3[16] = { uz_3_0 } ;
int _idx0;
int _idx1;
int _idx2;
int _idx3;
int _idx4;
int _idx5;
int _idx6;
int idx_1_2;
int pt_idx_x;
int pt_idx_y;
int pt_idx_z;
int size_1_1;
int size_1_2;
//int t;
int tmp;
int v_idx_x;
int v_idx_x_max;
int v_idx_y;
int v_idx_y_max;
int v_idx_z;
int v_idx_z_max;
/*
Initializations
*/
size_1_1=(y_max/blockDim.y);
size_1_2=(z_max/blockDim.z);
idx_1_2=(blockIdx.y/size_1_2);
tmp=(blockIdx.y-(idx_1_2*size_1_2));
v_idx_x=(cbx*(threadIdx.x+(blockDim.x*blockIdx.x)));
v_idx_x_max=(v_idx_x+cbx);
v_idx_y=(threadIdx.y+(tmp*blockDim.y));
v_idx_y_max=(v_idx_y+1);
v_idx_z=(threadIdx.z+(idx_1_2*blockDim.z));
v_idx_z_max=(v_idx_z+1);
/*
Implementation
*/
/*
for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... }
*/
//for (t=1; t<=t_max; t+=1)
{
/* Index bounds calculations for iterators in v[t=t, s=(cbx, 1, 1)][0] */
/*
for POINT pt[t=t, s=(1, 1, 1)][0] of size [1, 1, 1] in v[t=t, s=(:, :, :)][0] parallel 1 <level 1> schedule default { ... }
*/
{
/* Index bounds calculations for iterators in pt[t=t, s=(1, 1, 1)][0] */
pt_idx_z=v_idx_z;
pt_idx_y=v_idx_y;
for (pt_idx_x=v_idx_x; pt_idx_x<(v_idx_x_max-0); pt_idx_x+=1)
{
/* Index bounds calculations for iterators in pt[t=t, s=(1, 1, 1)][0] */
/*
v[t=(t+1), s=pt[t=?, s=?][0]][0]=stencil(v[t=t, s=pt[t=?, s=?][0]][0])
*/
/* _idx0 = ((((((((pt_idx_z+1)*x_max)+(((2*pt_idx_z)+2)*t))*y_max)+((((((2*pt_idx_z)+2)*t)+pt_idx_y)+1)*x_max))+(((4*pt_idx_z)+4)*(t*t)))+(((2*pt_idx_y)+2)*t))+pt_idx_x) */
_idx0=((((((((pt_idx_z+1)*x_max)+(((2*pt_idx_z)+2)*t))*y_max)+((((((2*pt_idx_z)+2)*t)+pt_idx_y)+1)*x_max))+(((4*pt_idx_z)+4)*(t*t)))+(((2*pt_idx_y)+2)*t))+pt_idx_x);
u_0_0[_idx0]=0.1;
/* _idx1 = (((((((((pt_idx_z+1)*x_max)+(((2*pt_idx_z)+2)*t))*y_max)+((((((2*pt_idx_z)+2)*t)+pt_idx_y)+1)*x_max))+(((4*pt_idx_z)+4)*(t*t)))+(((2*pt_idx_y)+2)*t))+pt_idx_x)+2) */
_idx1=(_idx0+2);
u_0_0[_idx1]=0.1;
/* _idx2 = ((((pt_idx_z*x_max)*y_max)+(pt_idx_y*x_max))+pt_idx_x) */
_idx2=((((_idx0+(((((-2*pt_idx_z)-2)*t)-x_max)*y_max))+(((((-2*pt_idx_z)-2)*t)-1)*x_max))+(((-4*pt_idx_z)-4)*(t*t)))+(((-2*pt_idx_y)-2)*t));
ux_1_0[_idx2]=0.2;
/* _idx3 = (((((((((pt_idx_z+1)*x_max)+(((2*pt_idx_z)+2)*t))*y_max)+(((((2*pt_idx_z)+2)*t)+pt_idx_y)*x_max))+(((4*pt_idx_z)+4)*(t*t)))+((2*pt_idx_y)*t))+pt_idx_x)+1) */
_idx3=(((_idx0-x_max)-(2*t))+1);
u_0_0[_idx3]=0.1;
/* _idx4 = (((((((((pt_idx_z+1)*x_max)+(((2*pt_idx_z)+2)*t))*y_max)+((((((2*pt_idx_z)+2)*t)+pt_idx_y)+2)*x_max))+(((4*pt_idx_z)+4)*(t*t)))+(((2*pt_idx_y)+4)*t))+pt_idx_x)+1) */
_idx4=((_idx3+(2*x_max))+(4*t));
u_0_0[_idx4]=0.1;
uy_2_0[_idx2]=0.30000000000000004;
/* _idx5 = ((((((((pt_idx_z*x_max)+((2*pt_idx_z)*t))*y_max)+(((((2*pt_idx_z)*t)+pt_idx_y)+1)*x_max))+((4*pt_idx_z)*(t*t)))+(((2*pt_idx_y)+2)*t))+pt_idx_x)+1) */
_idx5=((((_idx0+((( - x_max)-(2*t))*y_max))-((2*t)*x_max))-(4*(t*t)))+1);
u_0_0[_idx5]=0.1;
/* _idx6 = (((((((((pt_idx_z+2)*x_max)+(((2*pt_idx_z)+4)*t))*y_max)+((((((2*pt_idx_z)+4)*t)+pt_idx_y)+1)*x_max))+(((4*pt_idx_z)+8)*(t*t)))+(((2*pt_idx_y)+2)*t))+pt_idx_x)+1) */
_idx6=((((_idx4+((x_max+(2*t))*y_max))+(((2*t)-1)*x_max))+(4*(t*t)))-(2*t));
u_0_0[_idx6]=0.1;
uz_3_0[_idx2]=0.4;
}
}
}
}
|
10,429 | __global__ void actiune_thread(float* a_d, float* b_d,float *r_d,int N);
// Kernelul ce se executa pe device-ul CUDA
__global__ void actiune_thread(float* a_d, float* b_d,float *r_d,int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
//if(idx < N)
r_d[idx] = a_d[idx] + 1.f;
}
extern "C"
cudaError_t launch_actiune_thread(float* a_d, float* b_d,float *r_d,int N,dim3 DIM_GRID, dim3 DIM_BLOCK)
{
actiune_thread <<<DIM_GRID, DIM_BLOCK>>> (a_d, b_d,r_d,N);
return cudaGetLastError();
} |
10,430 | #include <cuda_runtime.h>
#include <cuda.h>
#include <iostream>
#include <assert.h>
//#include <ctime>
using namespace std;
#define WIDTH 5
#define MSIZE 25
#define numBlocks 1
dim3 threadsPerBlock(WIDTH, WIDTH);
__global__ void matAdd(float* Ad, float *Bd, float *Pd)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
float a = Ad[ty * WIDTH + tx];
float b = Bd[ty * WIDTH + tx];
Pd[ty * WIDTH + tx] = a + b;
}
__global__ void matSub(float* Ad, float *Bd, float *Pd)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
float a = Ad[ty * WIDTH + tx];
float b = Bd[ty * WIDTH + tx];
Pd[ty * WIDTH + tx] = a - b;
}
__global__ void matMul(float* Ad, float *Bd, float *Pd)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
float pValue = 0.0f;
for (int k = 0; k < WIDTH; k++)
{
float a = Ad[ty * WIDTH + k];
float b = Bd[k * WIDTH + tx];
pValue += a * b;
}
Pd[ty * WIDTH + tx] = pValue;
}
void matSerialAdd(float *A, float *B, float *P)
{
for (int r = 0; r < WIDTH; r++)
{
for (int c = 0; c < WIDTH; c++)
{
P[r * WIDTH + c] = A[r * WIDTH + c] + B[r * WIDTH + c];
}
}
}
void matSerialSub(float *A, float *B, float *P)
{
for (int r = 0; r < WIDTH; r++)
{
for (int c = 0; c < WIDTH; c++)
{
P[r * WIDTH + c] = A[r * WIDTH + c] - B[r * WIDTH + c];
}
}
}
void matSerialMul(float *A, float *B, float *P)
{
for (int r = 0; r < WIDTH; r++)
{
for (int c = 0; c < WIDTH; c++)
{
float pValue = 0.0f;
for (int k = 0; k < WIDTH; k++)
{
pValue += A[r * WIDTH + k] * B[k * WIDTH + c];
}
P[r * WIDTH + c] = pValue;
}
}
}
int main()
{
float *A = new float[MSIZE];
float *B = new float[MSIZE];
float *P = new float[MSIZE];
float *serialP = new float[MSIZE];
for (int i = 0; i < MSIZE; i++)
{
A[i] = i;
B[i] = i;
}
//clock_t start;
//double durationGPU, durationCPU;
//load A, B to device memory
int size = MSIZE * sizeof(float);
float *Ad, *Bd, *Pd;
cudaMalloc((void**)&Ad, size);
cudaMemcpy(Ad, A, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&Bd, size);
cudaMemcpy(Bd, B, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&Pd, size);
//add
//start = clock();
matAdd<<< numBlocks, threadsPerBlock >>>(Ad, Bd, Pd);
cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost);
//durationGPU = (clock() - start) / (double)CLOCKS_PER_SEC;
//start = clock();
matSerialAdd(A, B, serialP);
//durationCPU = (clock() - start) / (double)CLOCKS_PER_SEC;
for (int i = 0; i < MSIZE; i++)
assert(P[i] == serialP[i]);
cout << "Matrix Addition Success!" << endl;
//cout << "CPU Timing: " << durationCPU << endl;
//cout << "GPU Timing: " << durationGPU << endl<<endl;
//sub
//start = clock();
matSub<<< numBlocks, threadsPerBlock >>>(Ad, Bd, Pd);
cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost);
//durationGPU = (clock() - start) / (double)CLOCKS_PER_SEC;
//start = clock();
matSerialSub(A, B, serialP);
//durationCPU = (clock() - start) / (double)CLOCKS_PER_SEC;
for (int i = 0; i < MSIZE; i++)
assert(P[i] == serialP[i]);
std::cout << "Matrix Subtraction Success!" << std::endl;
//cout << "CPU Timing: " << durationCPU << endl;
//cout << "GPU Timing: " << durationGPU << endl<<endl;
//dot mul
//start = clock();
matMul<<< numBlocks, threadsPerBlock >>>(Ad, Bd, Pd);
cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost);
//durationGPU = (clock() - start) / (double)CLOCKS_PER_SEC;
//start = clock();
matSerialMul(A, B, serialP);
//durationCPU = (clock() - start) / (double)CLOCKS_PER_SEC;
for (int i = 0; i < MSIZE; i++)
assert(P[i] == serialP[i]);
std::cout << "Matrix Dot Multiplication Success!" << std::endl;
//cout << "CPU Timing: " << durationCPU << endl;
//cout << "GPU Timing: " << durationGPU << endl<<endl;
//free device memory
cudaFree(Ad);
cudaFree(Bd);
cudaFree(Pd);
return 0;
} |
10,431 | #include <math.h>
#include <stdio.h>
#include <sys/time.h>
#include <sys/resource.h>
# define N 8192
# define N_THRDS 256
__global__ void init_mat (size_t *A_array, size_t **A, const size_t cols){
size_t i = threadIdx.x;
while ( i < cols){
A[i] = (A_array + i*cols);
i += blockDim.x;
}
}
__global__ void transpose (size_t **A, size_t **B, const size_t n){
size_t x = threadIdx.x;
size_t y = blockIdx.x;
while (x < n){
B[x][y] = A[y][x];
x += blockDim.x;
}
}
__global__ void fast_transpose(size_t **A, size_t **B, const size_t dim){
__shared__ size_t a_block[N_THRDS];
a_block[threadIdx.y*dim + threadIdx.x] = A[dim*blockIdx.y + threadIdx.y][dim*blockIdx.x + threadIdx.x];
__syncthreads();
B[dim*blockIdx.x + threadIdx.y][dim*blockIdx.y + threadIdx.x]= a_block[dim*threadIdx.x + threadIdx.y];
}
void fill_mat(size_t *mat, const size_t rows, const size_t cols);
int is_transpose(size_t *mat, size_t *transp, const size_t n);
void print_is_transpose(size_t *mat, size_t *transp, const size_t n);
double seconds();
int main() {
size_t* mat_array = (size_t*) malloc(N*N*sizeof(size_t));
size_t* transp_array = (size_t*) malloc(N*N*sizeof(size_t));
fill_mat(mat_array, N, N);
size_t *dev_mat_array, *dev_transp_array;
size_t **dev_mat, **dev_transp;
cudaMalloc( (void**)&dev_mat_array, N*N*sizeof(size_t) );
cudaMalloc( (void**)&dev_transp_array, N*N*sizeof(size_t) );
cudaMalloc( (void***)&dev_mat, N*sizeof(size_t) );
cudaMalloc( (void***)&dev_transp, N*sizeof(size_t) );
cudaMemcpy( dev_mat_array, mat_array, N*N*sizeof(size_t), cudaMemcpyHostToDevice );
init_mat<<< 1, 1024 >>>(dev_mat_array, dev_mat,N);
init_mat<<< 1, 1024 >>>(dev_transp_array, dev_transp,N);
double start, elapsed;
start = seconds();
transpose<<<N, 1024>>>(dev_mat, dev_transp, N);
cudaDeviceSynchronize();
elapsed = seconds() - start;
cudaMemcpy( transp_array, dev_transp_array, N*N*sizeof(size_t), cudaMemcpyDeviceToHost );
printf("Transpose result is: %d (%lf seconds)\n", is_transpose(mat_array, transp_array, N), elapsed);
size_t dim= (size_t)sqrt(N_THRDS);
dim3 grid,block;
grid.x=N/dim;
grid.y=N/dim;
block.x=dim;
block.y=dim;
start = seconds();
fast_transpose<<< grid, block >>>(dev_mat, dev_transp,dim);
cudaDeviceSynchronize();
elapsed = seconds() - start;
cudaMemcpy( transp_array, dev_transp_array, N*N*sizeof(size_t), cudaMemcpyDeviceToHost );
printf("Fast transpose result is: %d (%lf seconds)\n", is_transpose(mat_array, transp_array, N), elapsed);
free(mat_array); free(transp_array);
cudaFree( dev_mat_array ); cudaFree( dev_transp_array ); cudaFree(dev_mat);cudaFree(dev_transp);
return 0;
}
void fill_mat(size_t *mat, const size_t rows, const size_t cols){
size_t i;
for (i = 0; i < rows*cols; ++i)
mat[i] = rand() % 100;
}
int is_transpose(size_t *mat, size_t *transp, const size_t n){
size_t i, j;
for (i = 0; i < n; ++i)
for (j = i + 1; j < n; ++j)
if (mat[i*n + j] != transp[j*n + i])
return 0;
return 1;
}
void print_is_transpose(size_t *mat, size_t *transp, const size_t n){
size_t i, j;
for (i = 0; i < n; ++i){
for (j = 0; j < n; ++j)
printf("%d",(mat[i*n + j] != transp[j*n + i]) ? 0 : 1);
putchar('\n');
}
}
double seconds()
/* Return the second elapsed since Epoch (00:00:00 UTC, January 1, 1970)
*/
{
struct timeval tmp;
double sec;
gettimeofday( &tmp, (struct timezone *)0 );
sec = tmp.tv_sec + ((double)tmp.tv_usec)/1000000.0;
return sec;
}
|
10,432 | #include "includes.h"
__global__ void InsertionSortDistances(float* dist, int* idMat, int n, int k)
{
// Get the index of the column that the current thread is responsible for
auto col = blockIdx.x * blockDim.x + threadIdx.x;
// IF col is out of bounds, then do nothing
if (col < n)
{
auto id = &idMat[col * n];
id[0] = 0;
auto distCol = &dist[col * n];
// Otherwise, sort column 'col'
auto i = 1;
while(i < n)
{
auto x = distCol[i];
auto currIndex = i;
auto j = i - 1;
while(j >= 0 && distCol[j] > x)
{
distCol[j + 1] = distCol[j];
id[j + 1] = id[j];
--j;
}
distCol[j + 1] = x;
id[j + 1] = currIndex;
++i;
}
}
} |
10,433 | #include "includes.h"
__global__ void mk_kernel(char* keep_mem, size_t bytes)
{
for (unsigned i=0; i<bytes; ++i)
{
keep_mem[i] = 0;
}
} |
10,434 | #include<iostream>
#include<string>
#include<fstream>
#include<cstdlib>
#include<chrono>
#include<thrust/device_vector.h>
using namespace std;
__global__ void brute(const char *v,const char *s,int *d,int *max,int *count){
int i=threadIdx.x+blockIdx.x*blockDim.x;
count[blockIdx.x]=0;
__shared__ int countl[1024];
int j=0;
while((j<(*d))&&(i<(*max))&&(v[i]==s[j]))
{
i++;
j++;
}
if(j==(*d))
countl[threadIdx.x]=1;
else
countl[threadIdx.x]=0;
__syncthreads();
for(i=0;i<blockDim.x;i++)
count[blockIdx.x]=count[blockIdx.x]+countl[i];
}
int main(int argc,char **argv){
int max,*d_max;
max=5000000;
vector<string> v(1000);
int *d_f,i,l1,j=0,num_blocks=max/1024+1,tot=0;
char *d_str,*str;
str=new char[max];
char *d_cmp;
int *d_count,*count;
count=new int[num_blocks];
for(i=0;i<num_blocks;i++)
count[i]=0;
char cmp[]="the";
int f=sizeof(cmp)/sizeof(cmp[0])-1;
ifstream fil("inp.txt");
string line;
while(getline(fil,line)&&j<max)
{ //cout<<line;
v.push_back(line);
l1=line.length();
for(i=0;i<l1&&j<max;i++)
str[j++]=line[i];
}
// cout<<"herre";
// cout<<f;
// for(i=0;i<max;i++)
// cout<<str[i];
// brute(str);
cudaMalloc((void**)&d_str,sizeof(char)*max);
cudaMalloc((void**)&d_cmp,sizeof(char)*f);
cudaMalloc((void**)&d_f,sizeof(int));
cudaMalloc((void**)&d_max,sizeof(int));
cudaMalloc((void**)&d_count,sizeof(int)*num_blocks);
cudaMemcpy(d_count,count,sizeof(int)*num_blocks,cudaMemcpyHostToDevice);
cudaMemcpy(d_f,&f,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_cmp,&cmp,sizeof(char)*f,cudaMemcpyHostToDevice);
cudaMemcpy(d_max,&max,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_str,str,max*sizeof(char),cudaMemcpyHostToDevice);
// auto start_time = std::chrono::high_resolution_clock::now();
brute<<<num_blocks,1024>>>(d_str,d_cmp,d_f,d_max,d_count);
cudaThreadSynchronize();
cudaFree(d_str);
cudaFree(d_cmp);
cudaFree(d_max);
cudaMemcpy(count,d_count,sizeof(int)*num_blocks,cudaMemcpyDeviceToHost);
for(i=0;i<num_blocks;i++){
tot+=count[i];
}
cudaFree(d_count);
cout<<tot<<endl;
delete[] count;
delete[] str;
// auto end_time = std::chrono::high_resolution_clock::now();
// auto time = end_time - start_time;
// cout<<std::chrono::duration_cast<std::chrono::milliseconds>(time).count()<<" to run.\n";
return 0;
} |
10,435 | #include "includes.h"
// Device input vectors
int *d_a;
//Device output vector
int *d_b;
__global__ void naivePrefixSum(int *A, int *B, int size, int iteration) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
if (index >= (1 << (iteration - 1)))
A[index] = B[(int) (index - (1 << (iteration - 1)))] + B[index];
else
A[index] = B[index];
}
} |
10,436 | #include "includes.h"
// Copyright 2019, Dimitra S. Kaitalidou, All rights reserved
#define N 256
#define THR_PER_BL 8
#define BL_PER_GR 32
__global__ void kernel2(int* D, int* Q){
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Assign the values of the output array back to the input array
D[i] = Q[i];
} |
10,437 | #include "includes.h"
#define rows 1000
#define cols 1000
// CUDA kernel. Each thread takes care of one element of c
__global__ void matricesMul(double *m1, double *m2, double *m3)
{
// Get our global thread ID
int ti = blockIdx.y*blockDim.y+threadIdx.y;
int tj = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if(ti < rows && tj < cols){
double data= 0.0;
for(int k=0;k<rows;k++) data += m1[ti*rows+k] * m2[k*cols+tj];
m3[ti*rows+tj] = data;
}
} |
10,438 | #include "includes.h"
__global__ void histogram(unsigned int *input, unsigned int *bins, unsigned int num_elements, unsigned int num_bins) {
//@@ Using privitization technique
__shared__ unsigned int hist[NUM_BINS];
int numOfElementsPerThread = NUM_BINS / BLOCK_SIZE;
int i = blockDim.x * blockIdx.x + threadIdx.x;
for (int j = 0; j < numOfElementsPerThread; ++j)
hist[threadIdx.x + blockDim.x*j] = 0;
__syncthreads();
if (i < num_elements)
atomicAdd(&hist[input[i]], 1);
__syncthreads();
for (int k = 0; k < numOfElementsPerThread; ++k)
atomicAdd(&bins[threadIdx.x + blockDim.x*k], hist[threadIdx.x+blockDim.x*k]);
} |
10,439 | #include <stdio.h>
#include "cuda.h"
const int N=10000000;
__global__
void add(int *A, int *B,int *R)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
R[id] = A[id]*B[id]*id;
}
int main()
{
int *h_A,*h_B,*h_R;
h_A=(int*)malloc(sizeof(int)*N);
h_B=(int*)malloc(sizeof(int)*N);
h_R=(int*)malloc(sizeof(int)*N);
int i;
for(i=0;i<N;i++){
h_A[i]=i;
h_B[i]=i;
h_R[i]=88;
}
int *d_A,*d_B,*d_R;
cudaMalloc(&d_A,N*sizeof(int));
cudaMalloc(&d_B,N*sizeof(int));
cudaMalloc(&d_R,N*sizeof(int));
cudaMemcpy(d_A,h_A,N*sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy(d_B,h_B,N*sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy(d_R,h_R,N*sizeof(int), cudaMemcpyHostToDevice );
dim3 dimBlock( 512, 1, 1 );
dim3 dimGrid( 1000, 1000 );
add<<<dimGrid, dimBlock>>>(d_A, d_B,d_R);
cudaThreadSynchronize();
cudaMemcpy(h_R,d_R,N*sizeof(int), cudaMemcpyDeviceToHost );
cudaFree( d_R );
cudaFree( d_A );
cudaFree( d_A );
for(i=0;i<10;i++)
printf("%d\n",h_R[i]);
free(h_A);
free(h_B);
free(h_R);
return EXIT_SUCCESS;
}
|
10,440 | #include <iostream>
#include <cstdlib>
#include <fstream>
#include <sstream>
#include <ctime>
#include <cfloat>
#include <cmath>
#include "cuda.h"
using namespace std;
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
static void CheckCudaErrorAux(const char *file, unsigned line, const char *statement, cudaError_t err) {
if (err == cudaSuccess)
return;
cerr << statement << " returned " << cudaGetErrorString(err) << "(" << err
<< ") at " << file << ":" << line << endl;
exit(1);
}
void loadArray(const char * fileName, float * data, int size) {
ifstream inputFile(fileName);
if (!inputFile) {
cout << "Error: unable to open file" << endl;
exit(1);
}
string line;
int i = 0;
while (inputFile.good() && i < size) {
getline(inputFile, line);
data[i] = strtof(line.c_str(), NULL);
i++;
}
inputFile.close();
}
void storeArray(const char * fileName, int * data, int size) {
ofstream outputFile(fileName);
if (!outputFile) {
cout << "Error: unable to open file" << endl;
exit(1);
}
for (int i = 0; i < size; i++)
outputFile << data[i] << endl;
outputFile.close();
}
bool stoppingCriterion(float *oldClusters, float *newClusters, int length, float tolerance) {
for (int i = 0; i < length; i++) {
float difference = oldClusters[i] - newClusters[i];
if (abs(difference) > tolerance)
return false;
}
return true;
}
void sequentialKMeans(float *points, int *assignments, float *centroids, float *oldCentroids, int *counters, const int N, const int P, const int K, const int ITERATIONS) {
// Initialize centroids
for (int i = 0; i < K; i++)
for (int j = 0; j < P; j++)
centroids[i * P + j] = points[i * P + j];
bool converged = false;
int count = 0;
while (!converged) {
// Reset counters
for (int i = 0; i < K; i++)
counters[i] = 0;
// Compute nearest cluster
for (int i = 0; i < N; i++) {
float minDistance = FLT_MAX;
short int minIndex = -1;
for (int j = 0; j < K; j++) {
float distance = 0.0;
for (int l = 0; l < P; l++)
// The square root has not influence for the purpose of the results
distance += pow(points[i * P + l] - centroids[j * P + l], 2);
if (distance < minDistance) {
minDistance = distance;
minIndex = j;
}
}
assignments[i] = minIndex;
counters[minIndex]++;
}
// Store old centroids
for (int i = 0; i < K * P; i++)
oldCentroids[i] = centroids[i];
// Reset centroids
for (int i = 0; i < K * P; i++) {
centroids[i] = 0;
}
// Update centrois
for (int i = 0; i < N; i++) {
int clusterId = assignments[i];
for (int j = 0; j < P; j++)
centroids[clusterId * P + j] += points[i * P + j] / counters[clusterId];
}
// Stopping criterion
if (count == ITERATIONS)
converged = true;
count++;
}
}
__global__ void computeNearestCluster(float* points, float *centroids,
int* assignments, int* counter, int n, int p, int k) {
short int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < n) {
float minDistance = FLT_MAX;
short int minIndex = -1;
for (int i = 0; i < k; i++) {
float distance = 0.0;
for (int j = 0; j < p; j++)
// The square root has not influence for the purpose of the results
distance += pow(points[index * p + j] - centroids[i * p + j],
2);
bool compare = (minDistance <= distance);
minDistance = compare * minDistance + (1 - compare) * distance;
minIndex = compare * minIndex + (1 - compare) * i;
}
assignments[index] = minIndex;
atomicAdd(&(counter[minIndex]), 1);
}
}
__global__ void computeMean(float* points, float* devCentroids,
int* devAssignments, int* counter, int n, int p) {
short int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < n) {
short int clusterIndex = devAssignments[index];
for (int i = 0; i < p; i++)
atomicAdd(&(devCentroids[clusterIndex * p + i]),
points[index * p + i] / counter[clusterIndex]);
}
}
void parallelKMeans(float *hostData, int *hostAssignments, float *hostCentroids, float *hostOldCentroids, int *counter, const int N, const int P, const int K, const int ITERATIONS) {
// Allocate device (GPU) memories
float *devData, *devCentroids, *devOldCentroids;
int *devAssignments, *devCounter;
CUDA_CHECK_RETURN(cudaMalloc((void** )&devData, N * P * sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void** )&devCentroids, P * K * sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void** )&devOldCentroids, P * K * sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void** )&devAssignments, N * sizeof(int)));
CUDA_CHECK_RETURN(cudaMalloc((void** )&devCounter, K * sizeof(int)));
// Copy data from CPU memory to GPU memory
CUDA_CHECK_RETURN(cudaMemcpy(devData, hostData, N * P * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(devCentroids, hostData, K * P * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemset(devAssignments, 0, N * sizeof(int)));
cudaDeviceSynchronize();
// Invoke the kernels
dim3 DimBlock(1024);
dim3 DimGrid(N / 1024 + 1);
int count = 0;
bool converged = false;
while (!converged) {
CUDA_CHECK_RETURN(cudaMemcpy(devOldCentroids, devCentroids, P * K * sizeof(float), cudaMemcpyDeviceToDevice));
CUDA_CHECK_RETURN(cudaMemset(devCounter, 0, K * sizeof(int)));
cudaDeviceSynchronize();
computeNearestCluster<<<DimGrid, DimBlock>>>(devData, devCentroids, devAssignments, devCounter, N, P, K);
cudaDeviceSynchronize();
CUDA_CHECK_RETURN(cudaMemset(devCentroids, 0, P * K * sizeof(float)));
cudaDeviceSynchronize();
computeMean<<<DimGrid, DimBlock>>>(devData, devCentroids, devAssignments, devCounter, N, P);
cudaDeviceSynchronize();
if (count == ITERATIONS)
converged = true;
count++;
}
// Copy data back from GPU memory to CPU memory
CUDA_CHECK_RETURN(cudaMemcpy(hostAssignments, devAssignments, N * sizeof(int), cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaMemcpy(hostCentroids, devCentroids, P * K * sizeof(float), cudaMemcpyDeviceToHost));
// Destroy GPU memories
CUDA_CHECK_RETURN(cudaFree(devAssignments));
CUDA_CHECK_RETURN(cudaFree(devCentroids));
CUDA_CHECK_RETURN(cudaFree(devOldCentroids));
CUDA_CHECK_RETURN(cudaFree(devData));
CUDA_CHECK_RETURN(cudaFree(devCounter));
/*
// Store assignments
storeArray("assignments.txt", hostAssignments, N);
// Round centroids (the function storeArray takes int)
int *intCentroids = (int*)malloc(P * K * sizeof(int));
for(int i = 0; i < K * P; i++)
intCentroids[i] = int(hostCentroids[i]);
// Store centroids
storeArray("centroids.txt", intCentroids, K * P);
*/
}
int main() {
const int N = 200000; // No. of points
const int P = 2; // Features per point
const int K = 1024; // Number of clusters
const int MAX_ITERATIONS = 200; // Used as stopping criterion
float *points = (float*) malloc(N * P * sizeof(float));
// Generate points
for (int i = 0; i < N * P; i++)
points[i] = rand();
int *assignments = (int*) malloc(N * sizeof(int));
float *centroids = (float*) malloc(K * P * sizeof(float));
float *oldCentroids = (float*) malloc(K * P * sizeof(float));
int *counter = (int*) malloc(K * sizeof(int));
clock_t start, end;
double cpu_clocks;
double seqTime, parTime;
cout << "Running k-means..." << endl;
// Averaging results over "iterations" runs
const int iterations = 1;
for (int i = 0; i < iterations; i++) {
cout << "Iteration " << i + 1 << " of " << iterations << endl;
// Sequential version
start = clock();
sequentialKMeans(points, assignments, centroids, oldCentroids, counter, N, P, K, MAX_ITERATIONS);
end = clock();
cpu_clocks = end - start;
seqTime += cpu_clocks / CLOCKS_PER_SEC;
// Parallel version
start = clock();
parallelKMeans(points, assignments, centroids, oldCentroids, counter, N, P, K, MAX_ITERATIONS);
end = clock();
cpu_clocks = end - start;
parTime += cpu_clocks / CLOCKS_PER_SEC;
}
/*
// Store results
char fileName[50];
cout << "File name: ";
cin >> fileName;
storeArray(fileName, assignments, N);
*/
free(points);
free(centroids);
free(oldCentroids);
free(assignments);
free(counter);
cout << "Average sequential execution time: " << seqTime / iterations << "s" << endl;
cout << "Average parallel execution time: " << parTime / iterations << "s" << endl;
cout << "Speed-up: " << seqTime / parTime << endl;
return 0;
}
|
10,441 | #include <stdio.h>
__global__ void helloFromGPU() {
printf("Hello World from GPU (Thread #%d)!\n", threadIdx.x);
}
int main(void){
printf("Hello World from CPU!\n");
helloFromGPU<<<1,10>>>();
cudaDeviceSynchronize();
printf("Goodbye World from CPU!\n");
cudaDeviceReset();
return 0;
}
|
10,442 | #include <stdio.h>
#include <stdlib.h>
__global__ void kernel(double* d_array, double* d_sum){
// int i = (blockIdx.x*blockDim.x) + threadIdx.x;
// int x = i % 2560;
// int y = i % 2560;
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < 2560*2560; i += blockDim.x*gridDim.x){
d_array[i] = i+0.1;
printf("d_array[%d] = %.2f\n", i ,d_array[i] );
*d_sum += d_array[i];
}
}
int main(int argc, char** argv){
int N = 2560*2560;
double* array = (double*) calloc(N,sizeof(double));
double sum = 2.03;
double* d_array, *d_sum;
size_t size = N*sizeof(double);
cudaMalloc((void**) &d_array, size);
cudaMalloc((void**) &d_sum, sizeof(double));
cudaMemcpy(d_array,array,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_sum, &sum, sizeof(double), cudaMemcpyHostToDevice);
for (int k = 0; k < 100000; k++){
kernel<<<1,1024>>>(d_array,d_sum);
cudaMemcpy(array, d_array, size, cudaMemcpyDeviceToHost);
cudaMemcpy(&sum, d_sum, sizeof(double), cudaMemcpyDeviceToHost);
// for (int i=0; i<N; i++){
// printf("array[%d] = %.2f\n",i, array[i] );
// }
printf("sum = %.2f\n", sum );
}
cudaFree(d_array);
cudaFree(d_sum);
return 0;
} |
10,443 | /*
* CUDA Vector Addition Kernel
* J. Overbey (Spring 2016), based on NVIDIA_CUDA-6.0_Samples/0_Simple/vectorAdd
*
* Compile this with:
* nvcc -o vecAdd-CUDA vecAdd-CUDA.cu
*
* This program generates two random vectors (A and B) with NUM_ELTS elements
* each, computes their sum on the device (GPU) using CUDA, and then verifies
* the result by adding the vectors again on the CPU and comparing the results.
*/
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
/* Number of elements in the vectors to be added */
#define NUM_ELTS 10000
/* Number of CUDA threads per block */
#define THREADS_PER_BLOCK 256
/* Maximum difference allowed between the GPU and CPU result vectors */
#define EPSILON 1e-5
/* If a CUDA call fails, display an error message and exit */
#define CUDA_CHECK(e) { \
cudaError_t err = (e); \
if (err != cudaSuccess) \
{ \
fprintf(stderr, "CUDA error: %s, line %d, %s: %s\n", \
__FILE__, __LINE__, #e, cudaGetErrorString(err)); \
exit(EXIT_FAILURE); \
} \
}
/*
* Vector addition kernel. Takes as input two arrays A and B, each with
* NUM_ELTS elements, and stores their sum in C.
*/
__global__ static void vecAdd(const float *A, const float *B, float *C);
int main(void)
{
printf("Adding %d-element vectors\n", NUM_ELTS);
/* Create vectors on host; fill A and B with random numbers */
size_t size = NUM_ELTS * sizeof(float);
float *h_A = (float *)malloc(size);
float *h_B = (float *)malloc(size);
float *h_C = (float *)malloc(size);
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Unable to allocate host memory\n");
exit(EXIT_FAILURE);
}
for (int i = 0; i < NUM_ELTS; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
/* Allocate device global memory for vectors */
float *d_A, *d_B, *d_C;
CUDA_CHECK(cudaMalloc((void **)&d_A, size));
CUDA_CHECK(cudaMalloc((void **)&d_B, size));
CUDA_CHECK(cudaMalloc((void **)&d_C, size));
/* Copy vectors to device global memory */
CUDA_CHECK(cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice));
/* Launch the CUDA kernel */
int blocksPerGrid = (NUM_ELTS+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK;
printf("Launching CUDA Kernel (%d blocks, %d threads/block)\n",
blocksPerGrid, THREADS_PER_BLOCK);
vecAdd<<<blocksPerGrid, THREADS_PER_BLOCK>>>(d_A, d_B, d_C);
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
/* Copy result vector from device global memory back to host memory */
CUDA_CHECK(cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost));
/* Verify that the result vector is correct */
for (int i = 0; i < NUM_ELTS; ++i)
{
if (fabsf(h_A[i] + h_B[i] - h_C[i]) > EPSILON)
{
fprintf(stderr, "Result element %d is incorrect\n", i);
fprintf(stderr, " h_A[i] + h_B[i] = %f\n", h_A[i] + h_B[i]);
fprintf(stderr, " h_C[i] = %f\n", h_C[i]);
fprintf(stderr, " Difference is %f\n", fabsf(h_A[i] + h_B[i] - h_C[i]));
fprintf(stderr, " EPSILON is %f\n", EPSILON);
exit(EXIT_FAILURE);
}
}
/* Free device global memory */
CUDA_CHECK(cudaFree(d_A));
CUDA_CHECK(cudaFree(d_B));
CUDA_CHECK(cudaFree(d_C));
/* Free host memory */
free(h_A);
free(h_B);
free(h_C);
/* Reset the device (unnecessary if not profiling, but good practice) */
CUDA_CHECK(cudaDeviceReset());
printf("Done\n");
return 0;
}
__global__ static void vecAdd(const float *A, const float *B, float *C)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < NUM_ELTS)
{
C[i] = A[i] + B[i];
}
}
|
10,444 | #include "includes.h"
__global__ void matrixMultiply1(float *A, float *C, int size) {
int Col = blockDim.y * blockIdx.y + threadIdx.y;
int Row = blockDim.x * blockIdx.x + threadIdx.x;
for(int k = 0; k < size; k++)
C[Row * size + Col] += A[k * size + Row] * A[k * size + Col];
} |
10,445 | #include <stdio.h>
__global__
void inverse(int n, double *x, double *y, double *z) {
int i = threadIdx.x;
if (i < n) z[0] = 1/(x[0]+y[0]);
}
int main(void) {
int N = 333;
int bytes = N*sizeof(double);
double *x, *y, *z;
double *d_x, *d_y, *d_z;
x = (double*)malloc(bytes);
y = (double*)malloc(bytes);
z = (double*)malloc(bytes);
cudaMalloc(&d_x, bytes);
cudaMalloc(&d_y, bytes);
cudaMalloc(&d_z, bytes);
x[0] = 1.0f;
y[0] = 2.0f;
cudaMemcpy(d_x, x, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, bytes, cudaMemcpyHostToDevice);
float milli = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
inverse<<<1,1>>>(N, d_x, d_y, d_z);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli,start,stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
float micro = milli*1000;
cudaMemcpy(z, d_z, bytes, cudaMemcpyDeviceToHost);
double answer = 1/(x[0]+y[0]);
printf("answer = %f\n", answer);
printf("z = %f\n", z[0]);
printf("time = %f us\n",micro);
free(x);
free(y);
free(z);
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
return 0;
}
|
10,446 | #include "Data_kernel.cuh"
#define BLOCK_SIZE 32
__device__
bool Data_Push(
int *weightFrom,
int *weightTo,
int *capacityFrom,
int *capacityTo,
int *heightFrom,
int *heightTo
)
{
if (*heightFrom != *heightTo + 1) {
return false;
}
int value = min(*weightFrom, *capacityFrom);
*weightFrom -= value;
*capacityFrom -= value;
*weightTo += value;
*capacityTo += value;
*heightTo = (*capacityTo) > 0 ? 1 : 0;
return value > 0; // active
}
__device__
bool Data_PushLeft(
int *device_weightLeft,
int *device_weightRight,
int *device_height,
int *device_capacity,
int columnSize,
// parameters
int x,
int y
)
{
if (x == 0) {
return false;
}
const size_t indexFrom = x * columnSize + y;
const size_t indexTo = (x - 1) * columnSize + y;
return Data_Push(
&device_weightLeft[indexFrom],
&device_weightRight[indexTo],
&device_capacity[indexFrom],
&device_capacity[indexTo],
&device_height[indexFrom],
&device_height[indexTo]
);
}
__device__
bool Data_PushRight(
int *device_weightLeft,
int *device_weightRight,
int *device_height,
int *device_capacity,
int rowSize,
int columnSize,
// parameters
int x,
int y
)
{
if (x == rowSize - 1) {
return false;
}
const size_t indexFrom = x * columnSize + y;
const size_t indexTo = (x + 1) * columnSize + y;
return Data_Push(
&device_weightRight[indexFrom],
&device_weightLeft[indexTo],
&device_capacity[indexFrom],
&device_capacity[indexTo],
&device_height[indexFrom],
&device_height[indexTo]
);
}
__device__
bool Data_PushUp(
int *device_weightUp,
int *device_weightDown,
int *device_height,
int *device_capacity,
int columnSize,
// parameters
int x,
int y
)
{
if (y == columnSize - 1) {
return false;
}
const size_t indexFrom = x * columnSize + y;
const size_t indexTo = x * columnSize + (y + 1);
return Data_Push(
&device_weightUp[indexFrom],
&device_weightDown[indexTo],
&device_capacity[indexFrom],
&device_capacity[indexTo],
&device_height[indexFrom],
&device_height[indexTo]
);
}
__device__
bool Data_PushDown(
int *device_weightUp,
int *device_weightDown,
int *device_height,
int *device_capacity,
int columnSize,
// parameters
int x,
int y
)
{
if (y == 0) {
return false;
}
const size_t indexFrom = x * columnSize + y;
const size_t indexTo = x * columnSize + (y - 1);
return Data_Push(
&device_weightDown[indexFrom],
&device_weightUp[indexTo],
&device_capacity[indexFrom],
&device_capacity[indexTo],
&device_height[indexFrom],
&device_height[indexTo]
);
}
__device__
void Data_PushFromS(
int *device_weightS,
int *device_height,
int *device_capacity,
int columnSize,
int x,
int y
)
{
const size_t index = x * columnSize + y;
if (device_weightS[index] > 0) {
device_height[index] = 1;
}
device_capacity[index] += device_weightS[index];
//this->weightS[index] = 0;
}
__device__
void Data_PushToT(
int *device_weightT,
int *device_height,
int *device_capacity,
int columnSize,
int x,
int y
)
{
const size_t index = x * columnSize + y;
int value = min(device_capacity[index], device_weightT[index]);
device_capacity[index] -= value;
//this->flow += value;
}
__global__
void Data_PushLeftForLine(
bool *device_active,
int *device_weightLeft,
int *device_weightRight,
int *device_weightS,
int *device_weightT,
int *device_height,
int *device_capacity,
int rowSize,
int columnSize
)
{
bool local_active = false;
for (int i = BLOCK_SIZE - 1; i >= 1; i --) {
int x = blockIdx.x * BLOCK_SIZE + i;
int y = blockIdx.y * BLOCK_SIZE + threadIdx.x;
if (x < rowSize && y < columnSize) {
Data_PushFromS(
device_weightS,
device_height,
device_capacity,
columnSize,
x,
y
);
bool active = Data_PushLeft(
device_weightLeft,
device_weightRight,
device_height,
device_capacity,
columnSize,
// parameters
x,
y
);
local_active = local_active || active;
Data_PushToT(
device_weightT,
device_height,
device_capacity,
columnSize,
x,
y
);
}
}
__syncthreads();
{ // i = 0
int x = blockIdx.x * BLOCK_SIZE;
int y = blockIdx.y * BLOCK_SIZE + threadIdx.x;
if (x < rowSize && y < columnSize) {
Data_PushFromS(
device_weightS,
device_height,
device_capacity,
columnSize,
x,
y
);
bool active = Data_PushLeft(
device_weightLeft,
device_weightRight,
device_height,
device_capacity,
columnSize,
// parameters
x,
y
);
local_active = local_active || active;
Data_PushToT(
device_weightT,
device_height,
device_capacity,
columnSize,
x,
y
);
}
}
if (local_active) {
*device_active = true;
}
}
__global__
void Data_PushRightForLine(
bool *device_active,
int *device_weightLeft,
int *device_weightRight,
int *device_weightS,
int *device_weightT,
int *device_height,
int *device_capacity,
int rowSize,
int columnSize
)
{
bool local_active = false;
for (int i = 0; i < BLOCK_SIZE - 1; i ++) {
int x = blockIdx.x * BLOCK_SIZE + i;
int y = blockIdx.y * BLOCK_SIZE + threadIdx.x;
if (x < rowSize && y < columnSize) {
Data_PushFromS(
device_weightS,
device_height,
device_capacity,
columnSize,
x,
y
);
bool active = Data_PushRight(
device_weightLeft,
device_weightRight,
device_height,
device_capacity,
rowSize,
columnSize,
// parameters
x,
y
);
local_active = local_active || active;
Data_PushToT(
device_weightT,
device_height,
device_capacity,
columnSize,
x,
y
);
}
}
__syncthreads();
{ // i = BLOCK_SIZE - 1
int x = blockIdx.x * BLOCK_SIZE + (BLOCK_SIZE - 1);
int y = blockIdx.y * BLOCK_SIZE + threadIdx.x;
if (x < rowSize && y < columnSize) {
Data_PushFromS(
device_weightS,
device_height,
device_capacity,
columnSize,
x,
y
);
bool active = Data_PushRight(
device_weightLeft,
device_weightRight,
device_height,
device_capacity,
rowSize,
columnSize,
// parameters
x,
y
);
local_active = local_active || active;
Data_PushToT(
device_weightT,
device_height,
device_capacity,
columnSize,
x,
y
);
}
}
if (local_active) {
*device_active = true;
}
}
__global__
void Data_PushUpForLine(
bool *device_active,
int *device_weightUp,
int *device_weightDown,
int *device_weightS,
int *device_weightT,
int *device_height,
int *device_capacity,
int rowSize,
int columnSize
)
{
bool local_active = false;
for (int i = 0; i < BLOCK_SIZE - 1; i ++) {
int x = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int y = blockIdx.y * BLOCK_SIZE + i;
if (x < rowSize && y < columnSize) {
Data_PushFromS(
device_weightS,
device_height,
device_capacity,
columnSize,
x,
y
);
bool active = Data_PushUp(
device_weightUp,
device_weightDown,
device_height,
device_capacity,
columnSize,
// parameters
x,
y
);
local_active = local_active || active;
Data_PushToT(
device_weightT,
device_height,
device_capacity,
columnSize,
x,
y
);
}
}
__syncthreads();
{ // i = BLOCK_SIZE - 1
int x = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int y = blockIdx.y * BLOCK_SIZE + (BLOCK_SIZE - 1);
if (x < rowSize && y < columnSize) {
Data_PushFromS(
device_weightS,
device_height,
device_capacity,
columnSize,
x,
y
);
bool active = Data_PushUp(
device_weightUp,
device_weightDown,
device_height,
device_capacity,
columnSize,
// parameters
x,
y
);
local_active = local_active || active;
Data_PushToT(
device_weightT,
device_height,
device_capacity,
columnSize,
x,
y
);
}
}
if (local_active) {
*device_active = true;
}
}
__global__
void Data_PushDownForLine(
bool *device_active,
int *device_weightUp,
int *device_weightDown,
int *device_weightS,
int *device_weightT,
int *device_height,
int *device_capacity,
int rowSize,
int columnSize
)
{
bool local_active = false;
for (int i = BLOCK_SIZE - 1; i >= 1; i --) {
int x = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int y = blockIdx.y * BLOCK_SIZE + i;
if (x < rowSize && y < columnSize) {
Data_PushFromS(
device_weightS,
device_height,
device_capacity,
columnSize,
x,
y
);
bool active = Data_PushDown(
device_weightUp,
device_weightDown,
device_height,
device_capacity,
columnSize,
// parameters
x,
y
);
local_active = local_active || active;
Data_PushToT(
device_weightT,
device_height,
device_capacity,
columnSize,
x,
y
);
}
}
__syncthreads();
{ // i = 0
int x = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int y = blockIdx.y * BLOCK_SIZE;
if (x < rowSize && y < columnSize) {
Data_PushFromS(
device_weightS,
device_height,
device_capacity,
columnSize,
x,
y
);
bool active = Data_PushDown(
device_weightUp,
device_weightDown,
device_height,
device_capacity,
columnSize,
// parameters
x,
y
);
local_active = local_active || active;
Data_PushToT(
device_weightT,
device_height,
device_capacity,
columnSize,
x,
y
);
}
}
if (local_active) {
*device_active = true;
}
}
__global__
void Data_BfsFromT(
int *device_weightT,
int *device_height,
int rowSize,
int columnSize
)
{
int tid = (blockIdx.y * gridDim.x + blockIdx.x) * (blockDim.x * blockDim.y) + threadIdx.y * blockDim.y + threadIdx.x;
//int tid = threadIdx.y * blockDim.y + threadIdx.x;
if (tid < rowSize * columnSize) {
int x = tid / columnSize, y = tid % columnSize;
if (device_weightT[x * columnSize + y] > 0) {
device_height[x * columnSize + y] = 1;
} else {
device_height[x * columnSize + y] = -1;
}
}
}
__global__
void Data_BfsLevelK(
bool *device_active,
int *device_weightUp,
int *device_weightDown,
int *device_weightLeft,
int *device_weightRight,
int *device_height,
int rowSize,
int columnSize,
// parameter
int k
)
{
int tid = (blockIdx.y * gridDim.x + blockIdx.x) * (blockDim.x * blockDim.y) + threadIdx.y * blockDim.y + threadIdx.x;
if (tid < rowSize * columnSize) {
int x = tid / columnSize, y = tid % columnSize;
int centerIndex = x * columnSize + y;
int leftIndex = (x - 1) * columnSize + y;
int rightIndex = (x + 1) * columnSize + y;
int upIndex = x * columnSize + (y + 1);
int downIndex = x * columnSize + (y - 1);
if ( device_height[centerIndex] == -1 && (
( x != 0 && device_height[leftIndex] == k && device_weightLeft[centerIndex] > 0 ) || // left
( x != rowSize - 1 && device_height[rightIndex] == k && device_weightRight[centerIndex] > 0 ) || // right
( y != columnSize - 1 && device_height[upIndex] == k && device_weightUp[centerIndex] > 0 ) || // up
( y != 0 && device_height[downIndex] == k && device_weightDown[centerIndex] > 0 ) // down
) ) {
device_height[centerIndex] = k + 1;
*device_active = true;
}
}
}
|
10,447 | #include <stdint.h>
#include <stdio.h>
#define HISTOGRAM256_BIN_COUNT 256
#define HISTOGRAM256_THREADBLOCK_SIZE 1024
static const uint PARTIAL_HISTOGRAM256_COUNT = 240;
void sequential_Histogram(uint8_t * data,int length, uint32_t * histo){
for(int i = 0;i<length;i++){
int alphabet_position = data[i];
histo[alphabet_position]++;
}
}
__global__ void histo_kernel(unsigned char* input,long size, unsigned int* histo){
int i = threadIdx.x + blockIdx.x * blockDim.x;
int threadNum = blockDim.x * gridDim.x;
int section_size = (size+threadNum - 1)/threadNum;
int start = i * section_size;
//All threads handle blockDim.x * gridDim.x consecutive elements
for(int k = 0;k < section_size;k++){
if(start+k < size){
int alphabet_position = input[start + k];
atomicAdd(&(histo[alphabet_position]),1);
}
}
}
__global__ void histo_kernel_interleave(unsigned char* input,long size, unsigned int* histo){
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
for(unsigned int i = tid;i<size;i+=blockDim.x * gridDim.x){
int alphabet_position = input[i];
atomicAdd(&(histo[alphabet_position]),1);
}
}
__global__ void histogram_privatized_kernel(unsigned char* input, unsigned int* bins, unsigned int num_elements,unsigned int num_bins){
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ unsigned int histo_s[];
for(unsigned int binIdx = threadIdx.x;binIdx<num_bins;binIdx += blockDim.x){
histo_s[binIdx] = 0u;
}
__syncthreads();
for(unsigned int i = tid;i < num_elements;i+= blockDim.x * gridDim.x){
int alphabet_position = input[i];
atomicAdd(&(histo_s[alphabet_position]),1);
}
__syncthreads();
for(unsigned int binIdx = threadIdx.x ;binIdx < num_bins;binIdx += blockDim.x){
atomicAdd(&(bins[binIdx]),histo_s[binIdx]);
}
}
__global__ void histogram_privatized_aggregation_kernel(unsigned char* input, unsigned int* bins, unsigned int num_elements, unsigned int num_bins){
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ unsigned int histo_s[];
for(unsigned int binIdx = threadIdx.x;binIdx < num_bins;binIdx += blockDim.x){
histo_s[binIdx] = 0u;
}
__syncthreads();
unsigned int prev_index = 0;
unsigned int accumulator = 0;
for(unsigned int i = tid;i<num_elements;i+= blockDim.x * gridDim.x){
int alphabet_position = input[i];
unsigned int cur_index = alphabet_position;
if(cur_index != prev_index){
if(accumulator >= 0)atomicAdd(&(histo_s[alphabet_position]),accumulator);
accumulator = 1;
prev_index = cur_index;
}else{
accumulator++;
}
}
__syncthreads();
for(unsigned int binIdx = threadIdx.x ;binIdx < num_bins;binIdx += blockDim.x){
atomicAdd(&(bins[binIdx]),histo_s[binIdx]);
}
}
int main(){
int PassFailFlag = 1;
uint8_t *h_Data;
uint32_t *h_HistogramCPU,*h_HistogramGPU;
uint8_t *d_Data;
uint32_t *d_Historgram;
uint byteCount = 128 * 1048576;
int nIter = 1;
h_Data = (uint8_t*)malloc(byteCount);
h_HistogramCPU = (uint32_t *)malloc(HISTOGRAM256_BIN_COUNT * sizeof(uint32_t));
h_HistogramGPU = (uint32_t *)malloc(HISTOGRAM256_BIN_COUNT * sizeof(uint32_t));
printf("...generating input data\n");
srand(2019);
for(uint32_t i = 0;i<byteCount;i++){
h_Data[i] = rand() % 256;
}
memset(h_HistogramCPU,0,sizeof(uint32_t)*HISTOGRAM256_BIN_COUNT);
printf("...allocating GPU memory and copying input data\n");
cudaMalloc((void**)&d_Data,byteCount);
cudaMalloc((void**)&d_Historgram,HISTOGRAM256_BIN_COUNT* sizeof(uint));
cudaMemcpy(d_Data,h_Data,byteCount,cudaMemcpyHostToDevice);
//histo_kernel<<<PARTIAL_HISTOGRAM256_COUNT,HISTOGRAM256_THREADBLOCK_SIZE>>>(d_Data,byteCount,d_Historgram);
cudaDeviceSynchronize();
cudaEvent_t start;
cudaEventCreate(&start);
cudaEvent_t stop;
cudaEventCreate(&stop);
cudaEventRecord(start,NULL);
{
//for(int iter = 0;iter < nIter;iter++){
// cudaMemset(d_Historgram,0,byteCount);
//histo_kernel<<<PARTIAL_HISTOGRAM256_COUNT,HISTOGRAM256_THREADBLOCK_SIZE>>>(d_Data,byteCount,d_Historgram);
//histo_kernel_interleave<<<PARTIAL_HISTOGRAM256_COUNT,HISTOGRAM256_THREADBLOCK_SIZE>>>(d_Data,byteCount,d_Historgram);
histogram_privatized_kernel<<<PARTIAL_HISTOGRAM256_COUNT,HISTOGRAM256_THREADBLOCK_SIZE,256 * sizeof(int)>>>(d_Data,d_Historgram,byteCount,256);
//histogram_privatized_aggregation_kernel<<<PARTIAL_HISTOGRAM256_COUNT,HISTOGRAM256_THREADBLOCK_SIZE,256 * sizeof(int)>>>(d_Data,d_Historgram,byteCount,256);
//}
}
cudaEventRecord(stop, NULL);
cudaDeviceSynchronize();
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal,start,stop);
float gpuTime = (msecTotal / nIter) * 0.001;
printf("histogram basic time = %.5f sec, %.4f MB/sec \n",gpuTime,(byteCount*1e-6)/gpuTime);
cudaMemcpy(h_HistogramGPU,d_Historgram,HISTOGRAM256_BIN_COUNT * sizeof(int),cudaMemcpyDeviceToHost);
sequential_Histogram(h_Data,byteCount,h_HistogramCPU);
for(uint i = 0;i<HISTOGRAM256_BIN_COUNT;i++){
if(h_HistogramCPU[i] != h_HistogramGPU[i]){
PassFailFlag = 0;
printf("index i = %d, CPU = %d, GPU = %d\n",i,h_HistogramCPU[i],h_HistogramGPU[i]);
}
}
printf(PassFailFlag ? " ...histograms match\n\n" : " ***histograms do not match!!!***\n\n");
cudaFree(d_Historgram);
cudaFree(d_Data);
free(h_HistogramCPU);
free(h_HistogramGPU);
free(h_Data);
}
|
10,448 | #include "includes.h"
extern "C" {
#ifndef DTYPE
#define DTYPE float
#endif
}
__global__ void tensor_2d_equals (const int n, const int c, const DTYPE* x, const int offset_x, const int n_x, const int c_x, const DTYPE* y, const int offset_y, const int n_y, const int c_y, int* eq_flag) {
const int gid_n = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_c = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_n < n) && (gid_c < c);
if (valid) {
const int ix = offset_x + gid_n * n_x + gid_c * c_x;
const int iy = offset_y + gid_n * n_y + gid_c * c_y;
if (x[ix] != y[iy]){
eq_flag[0]++;
}
}
} |
10,449 | #include <iostream>
#include <iomanip>
#include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
// 计时器函数
double cpu_seconds()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
// 打印数组
void display_array(unsigned int *array, unsigned int N)
{
std::cout << "[ ";
for (int i = 0; i < N; i++)
{
std::cout << array[i] << " ";
}
std::cout << "]" << std::endl;
}
// 初始化数据
void initial_data(unsigned int *ip, unsigned int size)
{
// time_t t;
// srand((unsigned) time(&t));
for(int i = 0; i < size; i++)
{
ip[i] = (unsigned int)( (rand() & 0xFF) / 30 );
}
}
// GPU 数组加法
__global__ void sum_arrays_on_gpu(unsigned int *dev_a, unsigned int *dev_b, unsigned int *dev_c)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
dev_c[i] = dev_a[i] + dev_b[i];
}
// GPU 数组乘法
__global__ void multi_arrays_on_gpu(unsigned int *dev_a, unsigned int *dev_b, unsigned int *dev_c)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
dev_c[i] = dev_a[i] * dev_b[i];
}
// 在 CPU 上递归归约
int recursive_reduce(unsigned int *array, unsigned int N)
{
unsigned int sum = 0;
for (int i = 0; i < N; i++)
{
sum += array[i];
}
return sum;
}
// 在 GPU 上进行相邻归约(有线程束分化)
__global__ void reduce_neighbored(unsigned int *global_idata, unsigned int *global_odata, unsigned int N)
{
// 设置线程
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// 找到指向每个 block 的指针
unsigned int *idata = global_idata + blockIdx.x * blockDim.x;
// 边界检查
if (idx >= N) return;
// block 内归约
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
if ((tid % (2 * stride)) == 0)
{
idata[tid] += idata[tid + stride];
}
__syncthreads();
}
// 将每个 block 归约后的数据赋给小全局内存
if (tid == 0) global_odata[blockIdx.x] = idata[0];
}
// GPU 点积运算(有 wrap 分化的相邻归约)
__global__ void dot_on_gpu_1(unsigned int *dev_a, unsigned int *dev_b, unsigned int *dev_c, unsigned int *global_odata, unsigned int N)
{
// 线程 id
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// 向量乘法
dev_c[idx] = dev_a[idx] * dev_b[idx];
__syncthreads();
// 每个 block 中的 thread ID
unsigned int tid = threadIdx.x;
// 找到指向每个 block 的指针
unsigned int *idata = dev_c + blockIdx.x * blockDim.x;
// 边界检查
if (idx >= N) return;
// block 内归约
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
if ((tid % (2 * stride)) == 0)
{
idata[tid] += idata[tid + stride];
}
__syncthreads();
}
// 将每个 block 归约后的数据赋给小全局内存
if (tid == 0) global_odata[blockIdx.x] = idata[0];
}
// GPU 点积运算(减少 wrap 分化的相邻归约)
__global__ void dot_on_gpu_2(unsigned int *dev_a, unsigned int *dev_b, unsigned int *dev_c, unsigned int *global_odata, unsigned int N)
{
// 线程 id
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// 向量乘法
dev_c[idx] = dev_a[idx] * dev_b[idx];
__syncthreads();
// 每个 block 中的 thread ID
unsigned int tid = threadIdx.x;
// 找到指向每个 block 的指针
unsigned int *idata = dev_c + blockIdx.x * blockDim.x;
// 边界检查
if (idx >= N) return;
// block 内归约
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
// 将连续的 tid 映射到需要配对的元素上
unsigned int index = 2 * stride * tid;
if (index < blockDim.x)
{
idata[index] += idata[index + stride];
}
__syncthreads();
}
// 将每个 block 归约后的数据赋给小全局内存
if (tid == 0) global_odata[blockIdx.x] = idata[0];
}
// GPU 点积运算(交错配对的归约)
__global__ void dot_on_gpu_3(unsigned int *dev_a, unsigned int *dev_b, unsigned int *dev_c, unsigned int *global_odata, unsigned int N)
{
// 线程 id
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// 向量乘法
dev_c[idx] = dev_a[idx] * dev_b[idx];
__syncthreads();
// 每个 block 中的 thread ID
unsigned int tid = threadIdx.x;
// 找到指向每个 block 的指针
unsigned int *idata = dev_c + blockIdx.x * blockDim.x;
// 边界检查
if (idx >= N) return;
// block 内归约
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
__syncthreads();
}
// 将每个 block 归约后的数据赋给小全局内存
if (tid == 0) global_odata[blockIdx.x] = idata[0];
}
// GPU 点积运算(展开循环 2 个数据块)
__global__ void dot_on_gpu_4(unsigned int *dev_a, unsigned int *dev_b, unsigned int *dev_c, unsigned int *global_odata, unsigned int N)
{
// // 数组索引
// unsigned int idx0 = blockIdx.x * blockDim.x + threadIdx.x;
// // 向量乘法
// dev_c[idx0] = dev_a[idx0] * dev_b[idx0];
// __syncthreads();
// 重新构建数组索引
unsigned int idx = blockIdx.x + blockDim.x * 2 + threadIdx.x;
// 每个 block 中的 thread ID
unsigned int tid = threadIdx.x;
// 找到指向每个 block 的指针
unsigned int *idata = dev_c + blockIdx.x * blockDim.x * 2;
// 展开 2 个数据块
if (idx + blockDim.x < N)
{
dev_c[idx] += dev_c[idx + blockDim.x];
}
__syncthreads();
// block 内归约
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
__syncthreads();
}
// 将每个 block 归约后的数据赋给小全局内存
if (tid == 0) global_odata[blockIdx.x] = idata[0];
}
// GPU 点积运算(展开循环 8 个数据块)
__global__ void dot_on_gpu_5(unsigned int *dev_a, unsigned int *dev_b, unsigned int *dev_c, unsigned int *global_odata, unsigned int N)
{
// // 数组索引
// unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// // 向量乘法
// dev_c[idx] = dev_a[idx] * dev_b[idx];
// __syncthreads();
// 重新构建数组索引
unsigned int idx = blockIdx.x + blockDim.x * 8 + threadIdx.x;
// 每个 block 中的 thread ID
unsigned int tid = threadIdx.x;
// 找到指向每个 block 的指针
unsigned int *idata = dev_c + blockIdx.x * blockDim.x * 8;
// 展开 8 个数据块
if (idx + 7 * blockDim.x < N)
{
int a1 = dev_c[idx];
int a2 = dev_c[idx + blockDim.x];
int a3 = dev_c[idx + 2 * blockDim.x];
int a4 = dev_c[idx + 3 * blockDim.x];
int b1 = dev_c[idx + 4 * blockDim.x];
int b2 = dev_c[idx + 5 * blockDim.x];
int b3 = dev_c[idx + 6 * blockDim.x];
int b4 = dev_c[idx + 7 * blockDim.x];
dev_c[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads();
// block 内归约
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
__syncthreads();
}
// 将每个 block 归约后的数据赋给小全局内存
if (tid == 0) global_odata[blockIdx.x] = idata[0];
}
int main()
{
std::cout << "Strating...\n";
// 设置设备
int dev = 0;
cudaSetDevice(dev);
// 设置数组大小
unsigned int N = 1<<24;
// 指定 GPU 维度
dim3 block(512, 1);
dim3 grid((N+block.x-1)/block.x, 1);
std::cout << "grid " << grid.x << " block " << block.x << std::endl;
// 分配 host 内存
size_t data_size = N*sizeof(int);
unsigned int *host_a, *host_b, *gpu_ref, *h_odata;
host_a = (unsigned int*)malloc(data_size);
host_b = (unsigned int*)malloc(data_size);
gpu_ref = (unsigned int*)malloc(data_size);
h_odata = (unsigned int *)malloc(grid.x * sizeof(unsigned int));
// 给 host 内存赋值
initial_data(host_a, N);
initial_data(host_b, N);
memset(gpu_ref, 0, N);
memset(h_odata, 0, grid.x);
// 分配 device global 内存
unsigned int *dev_a, *dev_b, *dev_c, *global_odata;
cudaMalloc((unsigned int**)&dev_a, data_size);
cudaMalloc((unsigned int**)&dev_b, data_size);
cudaMalloc((unsigned int**)&dev_c, data_size);
cudaMalloc((unsigned int**)&global_odata, grid.x * sizeof(unsigned int));
// 从 host 向 device 复制数据
cudaMemcpy(dev_a, host_a, data_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, host_b, data_size, cudaMemcpyHostToDevice);
// 格式化输出性能参数
std::cout << std::setw(30) << "Type"
<< std::setw(20) << "Result"
<< std::setw(20) << "Time(ms)"
<< std::setw(20) << "1 step acceleration"
<< std::setw(20) << "Total acceleration"
<< std::endl;
// 在 CPU 上执行全部归约
cudaDeviceSynchronize();
double i_start = cpu_seconds();
unsigned int cpu_result = 0;
for (int i = 0; i < N; i++)
{
cpu_result += ( host_a[i] * host_b[i] );
}
double i_elaps = cpu_seconds() - i_start;
std::cout << std::setw(30) << "CPU reduce recursived"
<< std::setw(20) << cpu_result
<< std::setw(20) << i_elaps * 1000
<< std::endl;
// // dot 1
// cudaDeviceSynchronize();
// i_start = cpu_seconds();
// dot_on_gpu_1<<<grid, block>>>(dev_a, dev_b, dev_c, global_odata, N);
// cudaDeviceSynchronize();
// double i_elaps_1 = cpu_seconds() - i_start;
// cudaMemcpy(h_odata, global_odata, grid.x * sizeof(unsigned int), cudaMemcpyDeviceToHost);
// unsigned int gpu_result_1 = recursive_reduce(h_odata, grid.x); // 在 CPU 上进行最后的归约
// std::cout << std::setw(30) << "GPU reduce neighbored"
// << std::setw(20) << gpu_result_1
// << std::setw(20) << i_elaps_1 * 1000
// << std::setw(20) << i_elaps / i_elaps_1
// << std::setw(20) << i_elaps / i_elaps_1
// << std::endl;
// // dot 2
// cudaDeviceSynchronize();
// i_start = cpu_seconds();
// dot_on_gpu_2<<<grid, block>>>(dev_a, dev_b, dev_c, global_odata, N);
// cudaDeviceSynchronize();
// double i_elaps_2 = cpu_seconds() - i_start;
// cudaMemcpy(h_odata, global_odata, grid.x * sizeof(unsigned int), cudaMemcpyDeviceToHost);
// unsigned int gpu_result_2 = recursive_reduce(h_odata, grid.x); // 在 CPU 上进行最后的归约
// std::cout << std::setw(30) << "GPU reduce neighbored less"
// << std::setw(20) << gpu_result_2
// << std::setw(20) << i_elaps_2 * 1000
// << std::setw(20) << i_elaps_1 / i_elaps_2
// << std::setw(20) << i_elaps / i_elaps_2
// << std::endl;
// dot 3
// cudaDeviceSynchronize();
// i_start = cpu_seconds();
// dot_on_gpu_3<<<grid, block>>>(dev_a, dev_b, dev_c, global_odata, N);
// cudaDeviceSynchronize();
// double i_elaps_3 = cpu_seconds() - i_start;
// cudaMemcpy(h_odata, global_odata, grid.x * sizeof(unsigned int), cudaMemcpyDeviceToHost);
// unsigned int gpu_result_3 = recursive_reduce(h_odata, grid.x); // 在 CPU 上进行最后的归约
// std::cout << std::setw(30) << "GPU reduce interleaved"
// << std::setw(20) << gpu_result_3
// << std::setw(20) << i_elaps_3 * 1000
// << std::setw(20) << i_elaps / i_elaps_3
// << std::setw(20) << i_elaps / i_elaps_3
// << std::endl;
// dot 4
multi_arrays_on_gpu<<<grid, block>>>(dev_a, dev_b, dev_c);
cudaDeviceSynchronize();
i_start = cpu_seconds();
dot_on_gpu_4<<<grid.x/2, block>>>(dev_a, dev_b, dev_c, global_odata, N);
cudaDeviceSynchronize();
double i_elaps_4 = cpu_seconds() - i_start;
cudaMemcpy(h_odata, global_odata, grid.x/2 * sizeof(unsigned int), cudaMemcpyDeviceToHost);
unsigned int gpu_result_4 = recursive_reduce(h_odata, grid.x/2); // 在 CPU 上进行最后的归约
std::cout << std::setw(30) << "GPU unrolling 2 data blocks"
<< std::setw(20) << gpu_result_4
<< std::setw(20) << i_elaps_4 * 1000
<< std::setw(20) << i_elaps / i_elaps_4
<< std::setw(20) << i_elaps / i_elaps_4
<< std::endl;
// dot 5
multi_arrays_on_gpu<<<grid, block>>>(dev_a, dev_b, dev_c);
cudaDeviceSynchronize();
i_start = cpu_seconds();
dot_on_gpu_5<<<grid.x/8, block>>>(dev_a, dev_b, dev_a, global_odata, N);
cudaDeviceSynchronize();
double i_elaps_5 = cpu_seconds() - i_start;
cudaMemcpy(h_odata, global_odata, grid.x/8 * sizeof(unsigned int), cudaMemcpyDeviceToHost);
unsigned int gpu_result_5 = recursive_reduce(h_odata, grid.x/8); // 在 CPU 上进行最后的归约
std::cout << std::setw(30) << "GPU unrolling 8 data blocks"
<< std::setw(20) << gpu_result_5
<< std::setw(20) << i_elaps_5 * 1000
<< std::setw(20) << i_elaps / i_elaps_5
<< std::setw(20) << i_elaps / i_elaps_5
<< std::endl;
// 释放 device 内存
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaFree(global_odata);
// 释放 host 内存
free(host_a);
free(host_b);
free(gpu_ref);
free(h_odata);
std::cout << "End...\n";
return 0;
} |
10,450 | #include "includes.h"
__global__ void add(long* a, long* b, long* c, long N) { //core from ScalarMultiplication_example1
long baseIdx = threadIdx.x;
long idx = baseIdx;
while (idx < N)
{
c[idx] = a[idx] * b[idx];
idx += blockDim.x;
}
__syncthreads();
long step = N / 2;
while (step != 0) {
idx = baseIdx;
while (idx < step) {
c[idx] += c[idx + step];
idx += blockDim.x;
}
step /= 2;
__syncthreads();
}
} |
10,451 | #include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<sys/time.h>
#include<string.h>
#include<assert.h>
void safe_call(cudaError_t ret, int line)
{
if(ret!=cudaSuccess)
{
printf("Error at line %d : %s\n",line,cudaGetErrorString(ret));
exit(-1);
}
}
void fill_mat(double *arr, int len)
{
int i;
for(i=0;i<len;i++)
arr[i] = drand48();
}
int main(int argc, char **argv)
{
int SIZE, MODE, i; // 0=pageable 1=pinned
char memmode[10], tempmode[10];
if(argc<2 || argc>3)
{
printf("Syntax : exec -<memory mode> <size>\n");
exit(-1);
}
else if(argc==2)
{
MODE = 0;
SIZE = atoi(argv[1]);
}
else if(argc==3)
{
strcpy(tempmode,argv[1]);
i=0;
while(tempmode[i]=='-') { i++; }
if(i==0)
{
printf("Syntax : exec -<memory mode> <size>\n");
exit(-1);
}
strcpy(memmode,&tempmode[i]);
if(strcmp(memmode,"pinned") == 0)
MODE = 1;
else if(strcmp(memmode,"pageable") == 0)
MODE = 0;
else
{
printf("Memory modes pinned and pageable only\n");
exit(-1);
}
SIZE = atoi(argv[2]);
}
double *h_A, *h_B;
double *d_A, *d_B;
cudaEvent_t start, stop;
double time, bandwidth;
float diff;
double time_start, time_end;
struct timeval tv;
struct timezone tz;
safe_call(cudaEventCreate(&start),__LINE__);
safe_call(cudaEventCreate(&stop),__LINE__);
if(MODE==0) //if memory mode = pageable
{
h_A = (double *) malloc(SIZE*sizeof(double));
h_B = (double *) malloc(SIZE*sizeof(double));
if(h_A==NULL || h_B==NULL)
{
printf("Error : host memory allocation\n");
exit(-1);
}
safe_call(cudaMalloc((void **)&d_A, SIZE*sizeof(double)),__LINE__);
safe_call(cudaMalloc((void **)&d_B, SIZE*sizeof(double)),__LINE__);
fill_mat(h_A,SIZE);
printf("Pageable Memory\n");
gettimeofday(&tv, &tz);
time_start = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0;
memcpy((void *)h_B, (void *)h_A, SIZE*sizeof(double));
gettimeofday(&tv, &tz);
time_end = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0;
bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * ( time_end - time_start ) ) ;
printf("CPU Memcpy H2H Bandwidth = %f GB/s\n",bandwidth);
safe_call(cudaEventRecord(start, 0),__LINE__);
safe_call(cudaMemcpy((void *)d_A, (void *)h_A, SIZE*sizeof(double), cudaMemcpyHostToDevice),__LINE__);
safe_call(cudaEventRecord(stop, 0),__LINE__);
safe_call(cudaEventSynchronize(stop),__LINE__);
safe_call(cudaEventElapsedTime(&diff,start,stop),__LINE__);
time = diff*1.0e-3;
bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * time ) ;
printf("CUDA Memcpy H2D Bandwidth = %f GB/s\n",bandwidth);
safe_call(cudaEventRecord(start, 0),__LINE__);
safe_call(cudaMemcpy((void *)d_B, (void *)d_A, SIZE*sizeof(double), cudaMemcpyDeviceToDevice),__LINE__);
safe_call(cudaEventRecord(stop, 0),__LINE__);
safe_call(cudaEventSynchronize(stop),__LINE__);
safe_call(cudaEventElapsedTime(&diff,start,stop),__LINE__);
time = diff*1.0e-3;
bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * time ) ;
printf("CUDA Memcpy D2D Bandwidth = %f GB/s\n",bandwidth);
safe_call(cudaEventRecord(start, 0),__LINE__);
safe_call(cudaMemcpy((void *)h_B, (void *)d_B, SIZE*sizeof(double), cudaMemcpyDeviceToHost),__LINE__);
safe_call(cudaEventRecord(stop, 0),__LINE__);
safe_call(cudaEventSynchronize(stop),__LINE__);
safe_call(cudaEventElapsedTime(&diff,start,stop),__LINE__);
time = diff*1.0e-3;
bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * time ) ;
printf("CUDA Memcpy D2H Bandwidth = %f GB/s\n",bandwidth);
for(i=0;i<SIZE;i++)
assert(h_A[i]==h_B[i]);
safe_call(cudaFree(d_A),__LINE__);
safe_call(cudaFree(d_B),__LINE__);
free(h_A);
free(h_B);
}
else //if memory mode = pinned
{
safe_call(cudaMallocHost((void **)&h_A, SIZE*sizeof(double)),__LINE__);
safe_call(cudaMallocHost((void **)&h_B, SIZE*sizeof(double)),__LINE__);
safe_call(cudaMalloc((void **)&d_A, SIZE*sizeof(double)),__LINE__);
safe_call(cudaMalloc((void **)&d_B, SIZE*sizeof(double)),__LINE__);
fill_mat(h_A,SIZE);
printf("Pinned Memory\n");
gettimeofday(&tv, &tz);
time_start = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0;
memcpy((void *)h_B, (void *)h_A, SIZE*sizeof(double));
gettimeofday(&tv, &tz);
time_end = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0;
bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * ( time_end - time_start ) ) ;
printf("CPU Memcpy H2H Bandwidth = %f GB/s\n",bandwidth);
safe_call(cudaEventRecord(start, 0),__LINE__);
safe_call(cudaMemcpyAsync((void *)d_A, (void *)h_A, SIZE*sizeof(double), cudaMemcpyHostToDevice, 0),__LINE__);
safe_call(cudaEventRecord(stop, 0),__LINE__);
safe_call(cudaEventSynchronize(stop),__LINE__);
safe_call(cudaEventElapsedTime(&diff,start,stop),__LINE__);
time = diff*1.0e-3;
bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * time ) ;
printf("CUDA Memcpy H2D Bandwidth = %f GB/s\n",bandwidth);
safe_call(cudaEventRecord(start, 0),__LINE__);
safe_call(cudaMemcpyAsync((void *)d_B, (void *)d_A, SIZE*sizeof(double), cudaMemcpyDeviceToDevice, 0),__LINE__);
safe_call(cudaEventRecord(stop, 0),__LINE__);
safe_call(cudaEventSynchronize(stop),__LINE__);
safe_call(cudaEventElapsedTime(&diff,start,stop),__LINE__);
time = diff*1.0e-3;
bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * time ) ;
printf("CUDA Memcpy D2D Bandwidth = %f GB/s\n",bandwidth);
safe_call(cudaEventRecord(start, 0),__LINE__);
safe_call(cudaMemcpyAsync((void *)h_B, (void *)d_B, SIZE*sizeof(double), cudaMemcpyDeviceToHost, 0),__LINE__);
safe_call(cudaEventRecord(stop, 0),__LINE__);
safe_call(cudaEventSynchronize(stop),__LINE__);
safe_call(cudaEventElapsedTime(&diff,start,stop),__LINE__);
time = diff*1.0e-3;
bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * time ) ;
printf("CUDA Memcpy D2H Bandwidth = %f GB/s\n",bandwidth);
for(i=0;i<SIZE;i++)
assert(h_A[i]==h_B[i]);
safe_call(cudaFree(d_A),__LINE__);
safe_call(cudaFree(d_B),__LINE__);
safe_call(cudaFreeHost(h_A),__LINE__);
safe_call(cudaFreeHost(h_B),__LINE__);
}
safe_call(cudaEventDestroy(start),__LINE__);
safe_call(cudaEventDestroy(stop),__LINE__);
return 0;
}
|
10,452 | #include "stdio.h"
#include "RoomFusionDLL.cuh"
// lCUDAAoӨƤ]iHIsAϥCUDAw]ȴNOK
bool InitCUDA()
{
int count;
cudaGetDeviceCount(&count);
if (count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
int i;
for (i = 0; i < count; i++) {
cudaDeviceProp prop;
if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if (prop.major >= 1) {
break;
}
}
}
if (i == count) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
cudaSetDevice(i);
fprintf(stderr, "CUDA Ready!\n");
return true;
}
// WrapperơAIsGPU`M
void runGPUApplyDepth(unsigned char* image, float* depth, int imageWidth, int imageHeight, float threshold){
dim3 dimBlock(imageHeight, 1);
dim3 dimGrid(imageWidth, 1);
gpuApplyDepth << <dimGrid, dimBlock >> >(image, depth, imageWidth, imageHeight, threshold);
}
// WrapperơAIsGPU`ե(|έץ)
void runGPUApplyCorrection(float* depth, int imageWidth, int imageHeight,
float left_slope, float left_inter, float left_p1x, float left_p1y, float left_p2x, float left_p2y,
float right_slope, float right_inter, float right_p1x, float right_p1y, float right_p2x, float right_p2y,
float top_slope, float top_inter, float top_p1x, float top_p1y, float top_p2x, float top_p2y,
float down_slope, float down_inter, float down_p1x, float down_p1y, float down_p2x, float down_p2y
){
dim3 dimBlock(imageHeight, 1);
dim3 dimGrid(imageWidth, 1);
gpuApplyCorrection << <dimGrid, dimBlock >> >(depth, imageWidth, imageHeight,
left_slope, left_inter, left_p1x, left_p1y, left_p2x, left_p2y,
right_slope, right_inter, right_p1x, right_p1y, right_p2x, right_p2y,
top_slope, top_inter, top_p1x, top_p1y, top_p2x, top_p2y,
down_slope, down_inter, down_p1x, down_p1y, down_p2x, down_p2y
);
}
// WrapperơAIsGPUk`ץ
void runGPUDepthShift(float* dst, float* src, int imageWidth, int imageHeight){
dim3 dimBlock(imageHeight, 1);
dim3 dimGrid(imageWidth, 1);
gpuDepthShift << <dimGrid, dimBlock >> >(dst, src, imageWidth, imageHeight);
}
// bGPUWk`ץC`vAp⥭Xk`v
__global__ void gpuDepthShift(float* dst, float* src, int imageWidth, int imageHeight){
int w = blockIdx.x;
int h = threadIdx.x;
if (w < imageWidth && h < imageHeight){ // TOSWXd
int positionIndex = h * imageWidth + w;
float realdepth = src[positionIndex];
if (realdepth > 0.0f){
float shift_in_cm = (12 * 70) / (realdepth * 100);
int moveinpixel = (shift_in_cm*(11));
int new_w = w - moveinpixel;
if (new_w >= 0){
positionIndex = h * imageWidth + new_w;
}
}
dst[positionIndex] = realdepth;
}
}
// bGPUW`MΡAlv(imageA4-channel)P`(depthA1-channel)AWLthresholdܦz
__global__ void gpuApplyDepth(unsigned char* image, float* depth, int imageWidth, int imageHeight, float threshold){
int w = blockIdx.x;
int h = threadIdx.x;
if (w < imageWidth && h < imageHeight){ // TOSWXd
int positionIndex = h * imageWidth + w; // ѩimageO@}CAݭnۦpYpixelm
int pixelIndex = positionIndex * 4; // ]O4-channelA]@n4byte~O@pixel
float depthVal = depth[positionIndex]; // X`
if (depthVal > threshold){
// ܦz
image[pixelIndex + 0] = 0;
image[pixelIndex + 1] = 128; // oӺϰOեΪAbUnityOݤ쪺
image[pixelIndex + 2] = 0;
image[pixelIndex + 3] = 0;
}
}
}
// bGPUWi`ץAb|Τ
__global__ void gpuApplyCorrection(float* depth, int imageWidth, int imageHeight,
float left_slope, float left_inter, float left_p1x, float left_p1y, float left_p2x, float left_p2y,
float right_slope, float right_inter, float right_p1x, float right_p1y, float right_p2x, float right_p2y,
float top_slope, float top_inter, float top_p1x, float top_p1y, float top_p2x, float top_p2y,
float down_slope, float down_inter, float down_p1x, float down_p1y, float down_p2x, float down_p2y
)
{
int w = blockIdx.x;
int h = threadIdx.x;
if (w < imageWidth && h < imageHeight){
int positionIndex = h * imageWidth + w;
int y = imageHeight - h - 1; // YyмлݭnAˡAHYbѤUӤWWh
if (
// ObGPUWMΥ|䪺ץ
gpuIsRightSide(w, y, left_slope, left_inter, left_p1x, left_p1y, left_p2x, left_p2y) &&
gpuIsLeftSide(w, y, right_slope, right_inter, right_p1x, right_p1y, right_p2x, right_p2y) &&
gpuIsDownSide(w, y, top_slope, top_inter, top_p1x, top_p1y, top_p2x, top_p2y) &&
gpuIsUpSide(w, y, down_slope, down_inter, down_p1x, down_p1y, down_p2x, down_p2y)
)
{
// keep depth
}
else{
// no depth
depth[positionIndex] = 0.0f;
}
}
}
// HU|ӨƬOGPUˬdY@IOObY@uk/W//U
// ѼơGpxBpyGnˬdI
// slopeByInterceptGӱuײvPYIZ
// p1x, p1y, p2x, p2yGӱugLI(p1Pp2)y
__device__ bool gpuIsRightSide(float px, float py, float slope, float yIntercept, float p1x, float p1y, float p2x, float p2y){
if (nearlyEqual(p2y, p1y)){ // horz
return false;
}
else if (nearlyEqual(p2x, p1x)){ // vertical
return px > p1x;
}
float cSolution = (slope*px) + yIntercept;
if (py > cSolution){
return p2x <= p1x;
}
else{
return p2x > p1x;
}
}
__device__ bool gpuIsUpSide(float px, float py, float slope, float yIntercept, float p1x, float p1y, float p2x, float p2y){
if (nearlyEqual(p2x - p1x, 0)){ // vertical
return false;
}
if (slope > 0){
return gpuIsLeftSide(px, py, slope, yIntercept, p1x, p1y, p2x, p2y);
}
else{
return gpuIsRightSide(px, py, slope, yIntercept, p1x, p1y, p2x, p2y);
}
}
__device__ bool gpuIsLeftSide(float px, float py, float slope, float yIntercept, float p1x, float p1y, float p2x, float p2y){
return !gpuIsRightSide(px, py, slope, yIntercept, p1x, p1y, p2x, p2y);
}
__device__ bool gpuIsDownSide(float px, float py, float slope, float yIntercept, float p1x, float p1y, float p2x, float p2y){
return !gpuIsUpSide(px, py, slope, yIntercept, p1x, p1y, p2x, p2y);
} |
10,453 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda.h>
#define THREAD 128
//texture<int2, 1, cudaReadModeElementType> yoshi;
__global__ void gemv(int n, double *adim, double *b, double *d_ans);
void cgemv(int n, double *adim, double *b, double *d_ans);
double gettime()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec + (double)tv.tv_usec*1.0e-6;
}
int main(int argc, char **argv)
{
/* for CPU */
int i, j;
double *bdim, *c, *ans;
//double start, stop;
//double cpu_time, gpu_time;
int n = 1024;
bdim = (double *)malloc(sizeof(double) *n*n);
c = (double *)malloc(sizeof(double) *n);
ans = (double *)malloc(sizeof(double) *n);
/* for GPU */
double *d_bdim, *d_c, *d_ans;
cudaMalloc((void **)&d_bdim, sizeof(double)*n*n);
cudaMalloc((void **)&d_c, sizeof(double)*n);
cudaMalloc((void **)&d_ans, sizeof(double)*n);
for(i = 0; i < n; i++)
{
c[i] = 1.0;
for(j = 0; j < n; j++)
bdim[i*n+j] = 1.0;
}
/*start = gettime();
cgemv(n, bdim, c, ans);
stop = gettime();
cpu_time=stop - start;
*/
cudaMemcpy(d_bdim, bdim, sizeof(double)*n*n, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, sizeof(double)*n, cudaMemcpyHostToDevice);
//cudaBindTexture(0, yoshi, d_c, sizeof(double)*n);
//start = gettime();
gemv<<<n, THREAD>>>(n, d_bdim, d_c, d_ans);
//stop = gettime();
//gpu_time=stop - start;
cudaMemcpy(ans, d_ans, sizeof(double)*n, cudaMemcpyDeviceToHost);
//printf("cpu_time : %.6f[sec]\n",cpu_time);
//printf("gpu_time : %.6f[sec]\n",gpu_time);
//printf("%f x\n", cpu_time / gpu_time);
for(i = 0; i < n; i++)
printf("%f\n", ans[i]);
free(bdim);
free(c);
free(ans);
cudaFree(d_bdim);
cudaFree(d_c);
cudaFree(d_ans);
return 0;
}
__global__ void gemv(int n, double *adim, double *b, double *d_ans)
{
int i;
int div = n/THREAD;
//int2 fjt;
__shared__ double tmp[THREAD];
tmp[threadIdx.x] = 0.0;
for(i = 0; i < div; i++)
{
/*fjt = tex1Dfetch(yoshi, i*THREAD+threadIdx.x); */
tmp[threadIdx.x] += adim[blockIdx.x*n+i*THREAD+threadIdx.x] * b[i * THREAD + threadIdx.x];
}
//fjt = tex1Dfetch(yoshi,div*THREAD+threadIdx.x);
if(threadIdx.x < n%THREAD)
tmp[threadIdx.x] += adim[blockIdx.x*n+THREAD*div+threadIdx.x] * b[THREAD * div + threadIdx.x];
__syncthreads();
for(i = THREAD / 2; i > 31; i = i / 2)
{
if(threadIdx.x < i)
tmp[threadIdx.x] += tmp[threadIdx.x + i];
__syncthreads();
}
if(threadIdx.x < 16)
{
tmp[threadIdx.x] += tmp[threadIdx.x + 16];
__syncthreads();
tmp[threadIdx.x] += tmp[threadIdx.x + 8];
__syncthreads();
tmp[threadIdx.x] += tmp[threadIdx.x + 4];
__syncthreads();
tmp[threadIdx.x] += tmp[threadIdx.x + 2];
__syncthreads();
tmp[threadIdx.x] += tmp[threadIdx.x + 1];
__syncthreads();
}
if(threadIdx.x == 0)
d_ans[blockIdx.x] = tmp[0];
}
void cgemv(int n, double *adim, double *b, double *d_ans)
{
int i, j;
for(i = 0; i < n; i++)
for(j = 0; j < n; j++)
d_ans[i] = adim[i*n+j] * b[i];
}
|
10,454 | /**
* jrc3_cuda_delta.cu
* block loading delta calculation. should be much faster
* system('nvcc -ptx -m 64 -arch sm_35 jrc3_cuda_rho.cu')
* iA is multiple of CHUNK (16)
* J. James Jun, Vidrio Technologies, LLC., 2017 Jun 11
*/
#include <cuda_runtime.h>
// #include "cublas_v2.h"
#include <math.h>
#define ABS(my_val) ((my_val) < 0) ? (-1*(my_val)) : (my_val)
#define MIN(A,B) ((A)<(B)) ? (A) : (B)
#define MAX(A,B) ((A)>(B)) ? (A) : (B)
#define NTHREADS 128
#define NC 45 // number of Channels
#define CHUNK 16 //previously defined as CHUNK
#define SINGLE_INF (3.402E+38)
/** Main entry point.
* Works out where the current thread should read/write to global memory
* and calls doIterations to do the actual work.
* Step through one B at a time
* 7/13/17: fDc_spk option added, which uses spike-specific distance cut-off (dc)
*/
// % Matlab syntax
// mrDist12_ = eucl2_dist_(mrFet12, mrFet12(:,1:n1)); %not sqrt
// mlRemove12_ = bsxfun(@ge, viiRho12_ord, viiRho12_ord(1:n1)') ...
// | abs(bsxfun(@minus, viiSpk12_ord_, viiSpk12_ord_(1:n1)')) > dn_max;
// mrDist12_(mlRemove12_) = nan;
// [vrDelta1, viNneigh1] = min(mrDist12_);
__global__ void jrc3_cuda_delta(float * vrDelta1, unsigned int * viNneigh1, const float * mrFet12, const int * viiSpk12_ord, const int * viiRho12_ord, const int * vnConst, const float dc2){
// int iA = blockIdx.x * CHUNK;
int i1 = (blockIdx.x + blockIdx.y * gridDim.x) * CHUNK; // base index of i1
int tx = threadIdx.x;
int i1_tx = i1 + tx;
int n1 = vnConst[0];
int n12 = vnConst[1];
int nC = vnConst[2];
int dn_max = vnConst[3];
int fDc_spk = vnConst[4];
__shared__ int viiSpk1_ord_[CHUNK];
__shared__ int viiRho1_ord_[CHUNK];
__shared__ float mrFet1_[NC][CHUNK];
__shared__ float mrDelta1_[NTHREADS][CHUNK];
__shared__ unsigned int miNneigh1_[NTHREADS][CHUNK];
__shared__ float vrDc1_[CHUNK]; // use if fDc_spk=1
// cache shared memory
if (tx < nC){ //use tx as iC
for (int i_c = 0; i_c < CHUNK; ++i_c){
int i1_c = i_c + i1;
if (i1_c < n1){
mrFet1_[tx][i_c] = mrFet12[tx + i1_c * nC];
}else{
mrFet1_[tx][i_c] = 0.0f;
}
}
}
if (tx < CHUNK && i1_tx < n1){
viiSpk1_ord_[tx] = viiSpk12_ord[i1_tx];
viiRho1_ord_[tx] = viiRho12_ord[i1_tx];
}
float vr_minDist1[CHUNK];
unsigned int vi_minIdx1[CHUNK];
for (int i_c = 0; i_c < CHUNK; ++i_c){
vr_minDist1[i_c] = SINGLE_INF;
vi_minIdx1[i_c] = i1 + i_c; // self
}
// calculate spike-specific distance cut-off vrDc1_ only if fDc_spk==1
if (tx < CHUNK && fDc_spk==1){
vrDc1_[tx] = 0.0f; //init
//for (int iC = 0; iC < 1; ++iC){ //center only scale
for (int iC = 0; iC < nC; ++iC){
float temp_ = mrFet1_[iC][tx];
vrDc1_[tx] += (temp_ * temp_);
}
vrDc1_[tx] *= dc2;
}
__syncthreads();
// fill in the shared memory A
for (int i12_tx = tx; i12_tx < n12; i12_tx += blockDim.x){
//for (int i12_tx = 1; i12_tx < n12; ++i12_tx){
// compute time difference
char vlDist_c[CHUNK];
int iiSpk12_ord_tx = viiSpk12_ord[i12_tx];
int iiRho12_ord_tx = viiRho12_ord[i12_tx];
for (int i_c = 0; i_c < CHUNK; ++i_c){
char di_rho_ = (iiRho12_ord_tx < viiRho1_ord_[i_c]);
int di_spk_ = ABS(viiSpk1_ord_[i_c] - iiSpk12_ord_tx);
vlDist_c[i_c] = (di_spk_ <= dn_max) && di_rho_;
}
// compute distance
float vrDist_c[CHUNK];
for (int i_c = 0; i_c < CHUNK; ++i_c) vrDist_c[i_c] = 0.0f;
for (int iC = 0; iC < nC; ++iC){
float fet12_tx = mrFet12[iC + i12_tx * nC];
for (int i_c = 0; i_c < CHUNK; ++i_c){
float temp = fet12_tx - mrFet1_[iC][i_c];
vrDist_c[i_c] += temp * temp;
}
}
// Compare the index and distance
for (int i_c = 0; i_c < CHUNK; ++i_c){
if (vrDist_c[i_c] < vr_minDist1[i_c]){
if (vlDist_c[i_c] == 1){
vr_minDist1[i_c] = vrDist_c[i_c];
vi_minIdx1[i_c] = i12_tx;
}
}
}
} // while
// collect result from each thread
for (int i_c = 0; i_c < CHUNK; ++i_c){
mrDelta1_[tx][i_c] = vr_minDist1[i_c];
miNneigh1_[tx][i_c] = vi_minIdx1[i_c];
}
__syncthreads();
// final count
//if (tx < CHUNK && i1_tx < n1){
if (tx < CHUNK){
float minDist1 = SINGLE_INF;
unsigned int minIdx1 = i1_tx;
for (int tx1=0; tx1<blockDim.x; ++tx1){
if (mrDelta1_[tx1][tx] < minDist1){
minDist1 = mrDelta1_[tx1][tx];
minIdx1 = miNneigh1_[tx1][tx];
}
}
//vrDelta1[i1_tx] = sqrtf(minDist1);
if (i1_tx < n1){
// vrDelta_ = sqrt(abs(single(vrDelta_) / vrDc2_site(iSite))); %normalize and convert dist
if (fDc_spk==0){
vrDelta1[i1_tx] = sqrtf(ABS(minDist1) / dc2);
}else{
vrDelta1[i1_tx] = sqrtf(ABS(minDist1) / vrDc1_[tx]);
//vrDelta1[i1_tx] = sqrtf(ABS(minDist1));
}
viNneigh1[i1_tx] = minIdx1 + 1; //Matlab index output
}
}
} // func |
10,455 | // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#introduction
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cstdio>
#include <chrono>
typedef std::chrono::high_resolution_clock Clock;
#define ITER_ROW 1024
#define ITER_COL 1024
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void matrix_add_cpu(int *a, int *b, int *c, int m, int n) {
// Add the vector elements a and b to the vector c
for (int row = 0; row < m ; row++) {
for (int col = 0; col < n ; col++) {
int loc = n*row+col;
*(c+loc) = *(a+loc) + *(b+loc);
}
}
}
__global__ void matrix_add_gpu(int *gpu_a, int *gpu_b, int *gpu_c) {
int blockId = gridDim.x * blockIdx.y + blockIdx.x;
int loc = blockId * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
*(gpu_c+loc) = (*(gpu_a+loc)) + (*(gpu_b+loc));
}
void print_matrix(int *m, int rows, int cols) {
for (int row = 0; row < rows; ++row) {
for (int col = 0; col < cols; ++col) {
int loc = (cols*row)+col;
int val = *(m+loc);
printf("\t%d", val);
}
printf("\n");
}
}
int main(int argc, char **argv) {
int rows = ITER_ROW;
int cols = ITER_COL;
int *a, *b, *c;
int *gpu_a, *gpu_b, *gpu_c;
size_t mem_size = rows * cols * sizeof(int);
a = (int *)malloc(mem_size);
b = (int *)malloc(mem_size);
c = (int *)malloc(mem_size);
// We need variables accessible to the GPU,
// so cudaMallocManaged provides these
gpuErrchk(cudaMalloc(&gpu_a, mem_size));
gpuErrchk(cudaMalloc(&gpu_b, mem_size));
gpuErrchk(cudaMalloc(&gpu_c, mem_size));
for (int row = 0; row < rows; ++row) {
for (int col = 0; col < cols; ++col) {
int loc = cols*row+col;
*(a+loc) = row+col;
*(b+loc) = row+col;
}
}
gpuErrchk(cudaMemcpy(gpu_a, a, mem_size, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(gpu_b, b, mem_size, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(gpu_c, c, mem_size, cudaMemcpyHostToDevice));
// Call the CPU function and time it
auto cpu_start = Clock::now();
matrix_add_cpu(a, b, c, rows, cols);
auto cpu_end = Clock::now();
std::cout << "vector_add_cpu: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(cpu_end - cpu_start).count()
<< " nanoseconds.\n";
// print_matrix(c, rows, cols);
printf("CPU (0, 0): %d\n", *c);
printf("CPU (3, 17): %d\n", *(c+(13*cols)+17));
printf("CPU last: %d\n", *(c+(rows*cols)-1));
*(c+(rows*cols)-1) = 0;
for (int row = 0; row < rows; ++row) {
for (int col = 0; col < cols; ++col) {
int loc = cols*row+col;
*(c+loc) = 0;
}
}
// Call the GPU function and time it
// The triple angle brakets is a CUDA runtime extension that allows
// parameters of a CUDA kernel call to be passed.
// In this example, we are passing one thread block with ITER GPU threads.
// why, these dimensions: just for fun: overall we wanted 1024x1024 threads and we can have only 1024 threads/block
dim3 blocksPerGrid(16, 64);
dim3 threadsPerBlock(32, 32);
auto gpu_start = Clock::now();
matrix_add_gpu <<<blocksPerGrid, threadsPerBlock>>> (gpu_a, gpu_b, gpu_c);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk(cudaDeviceSynchronize());
auto gpu_end = Clock::now();
std::cout << "vector_add_gpu: "
<< std::chrono::duration_cast<std::chrono::nanoseconds>(gpu_end - gpu_start).count()
<< " nanoseconds.\n";
gpuErrchk(cudaMemcpy(c, gpu_c, mem_size, cudaMemcpyDeviceToHost));
printf("GPU (0, 0): %d\n", *c);
printf("GPU (3, 17): %d\n", *(c+(13*cols)+17));
printf("GPU last: %d\n", *(c+(rows*cols)-1));
//print_matrix(c, rows, cols);
// Free the GPU-function based memory allocations
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_c);
// Free the CPU-function based memory allocations
free(a);
free(b);
free(c);
return 0;
}
|
10,456 | extern "C"
{
__global__ void A_emult_Bg0_32(const int n, const float *a, const float *b, float *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<n)
{
if (b[i]>0.0)
{c[i] += a[i];}
else
{c[i] += 0.0;}
}
}
} |
10,457 | __global__ void stencil(int *A, int *B, int radius, unsigned n) {
unsigned tid = blockDim.x * blockIdx.x + threadIdx.x;
int sum = 0;
for (int i=-radius; i<=radius; i++) {
int idx = tid + i;
if (0 <= idx && idx < n) {
sum += A[idx];
}
}
if (tid < n) B[tid] = sum;
}
void hostStencil(int *A, int *B, int radius, unsigned n) {
// device copies of A and B
int *d_A; int *d_B;
// allocate arrays on device
size_t sz = sizeof(int)*n;
cudaMalloc(&d_A, sz); cudaMalloc(&d_B, sz);
// copy input to device
cudaMemcpy(d_A, A, sz, cudaMemcpyHostToDevice);
// launch kernel
stencil<<<1,n>>>(d_A, d_B, radius, n);
// copy output from device
cudaMemcpy(B, d_B, sz, cudaMemcpyDeviceToHost);
// free allocated arrays
cudaFree(d_A); cudaFree(d_B);
}
|
10,458 | #include <math.h>
//x[i] and y[i]
__device__ double op(double d1,double d2,double *params);
__device__ double op(double d1,double *params);
__device__ void transform(int n,int xOffset,int yOffset, double *dx, double *dy,int incx,int incy,double *params,double *result) {
int totalThreads = gridDim.x * blockDim.x;
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + tid;
if (incy == 0) {
if ((blockIdx.x == 0) && (tid == 0)) {
int ix = (incx < 0) ? ((1 - n) * incx) : 0;
for (; i < n; i++) {
result[i * incx] = op(dx[i * incx],params);
}
}
} else if ((incx == incy) && (incx > 0)) {
/* equal, positive, increments */
if (incx == 1) {
/* both increments equal to 1 */
for (; i < n; i += totalThreads) {
result[i] = op(dx[i],dy[i],params);
}
} else {
/* equal, positive, non-unit increments. */
for (; i < n; i += totalThreads) {
result[i * incy] = op(dx[i * incx],dy[i * incy],params);
}
}
} else {
/* unequal or nonpositive increments */
for (; i < n; i += totalThreads) {
result[i] = op(dx[i * incx],dy[i * incy],params);
}
}
}
|
10,459 |
// CUDA kernel to add elements of two arrays
__global__
void init(unsigned int n, bool *x)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < n){
x[index] = 0;
}
}
|
10,460 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "math.h"
#define MAX_RANGE_DIFF 255
//Gauss fggvnyt szmt. Fontos: x ngyzett kell tadni neki, meg a sigma-t.
__device__ float gauss(float x_square, float sigma)
{
return expf(- x_square / (2 * sigma * sigma));
}
//A trbeli kernelt elre kiszmtom, hogy ne kelljen egy adott pixel esetn a szummzs egy adott lpsben exponencilis fggvnyt
//szmtani, mert ez drga mvlet. Egyszerbb az, ha a lehetsges rtkeket kiszmtjuk, ezt betesszk egy mtrixba (illetve egy tmbbe)
//ezt a tmbt bettjk a shared memriba is innen szedjk majd el az rtkeket.
//a spatialKernel tmb tartalmazza a lehetsges trbeli eltrsekhez tartoz Gauss fggvny rtkeket.
//r: a trbeli kernel sugara (vagyis kt pixel kztti legnagyobb trbeli eltrs, amit figyelembe vesznk, r).
//sigma: a spatial Gaussian-hoz tartot sigma.
__global__ void createSpatialKernel(float *spatialKernel, int r, float sigma)
{
int n = 2 * r + 1; //a kernel oldalnak hossza
int i = blockIdx.x - r; //oszlop index a spatial kernelben
int j = blockIdx.y - r; //sor index a spatial kernelben
float x_square = (float)(i * i + j * j);
spatialKernel[blockIdx.x + n * blockIdx.y] = gauss(x_square, sigma);
}
//Kt pixel intenzitsnak klnbsge 255*2+1 = 511 fle rtk lehet (a legkisebb 0-255 = -255, a legnagyobb 255 - 0 = 255)
//rdemes az ezekhez tartoz Gauss rtkeket is kiszmtani, mert adott kt pixelhez tartoz G(I_i - I_j) (az inenzts klnbsghez tartoz Gauss)
//kiszmtsa kltsges mvelet, 511 pedig nem olyan nagy szm. Ez hasonl az elz spatial kernelhez.
//a lehetsges intenzits klnbsgekhez tartoz Gauss rtkeket trol tmbt rangeKernel-nek nevezem (nem precz).
//az intenzits klnbsg abszolt rtknek maximuma MAX_RANGE_DIFF
__global__ void createRangeKernel(float *rangeKernel, float sigma)
{
//elszr csak a pozitv delte I -khez tartoz Gausst szmtjuk ki, mert szimmetrikus a fggvny
int tid = threadIdx.x;
if (tid >= MAX_RANGE_DIFF) {
int deltaI = tid - MAX_RANGE_DIFF;
rangeKernel[tid] = gauss((float)(deltaI * deltaI), sigma);
}
__syncthreads();
//tmsoljuk a negatv intenzits klnbsg rtkekhez tartoz Gauss rtkeket
int last = MAX_RANGE_DIFF * 2; //=510
if (tid < MAX_RANGE_DIFF) {
rangeKernel[tid] = rangeKernel[last - tid];
}
}
//A bilaterel filtert megvalst cuda kernel.
//es kt argumentum: a bemen is kimen kp pixeleinek intenzits rtkeit tartalmaz tmbk
//spatialKernel, rangeKernel: lehetsges a trbeli s intenzitsbeli klnbsgekhez tartoz Gauss rtkeket trol tmbk.
//Ezekbl sokszor olvasunk, ezrt ezeket a shared memriba msoljuk.
//r: a spatial kernel sugara ; width, height: a kp szlessge s magassg, pixelben.
__global__ void bilateralFilter(unsigned char *in, unsigned char *out, float *spatialKernel, float *rangeKernel, int r,
int width, int height)
{
int n = 2 * r + 1; //a spatial kernel oldalnak hossza
int spatialKernelSize = n * n;
extern __shared__ float sharedData[]; //A shared memory trolja a spatial kernel s a rangeKernel rtkeit is, egyms utn folytonosan
float *pSpatialKernel = &sharedData[r * n + r]; //a shared memory spatial kernelt trol rsznek kzepre mutat pointer
float *pRangeKernel = &sharedData[spatialKernelSize + MAX_RANGE_DIFF]; //a shared memory range kernelt trol rsznek kzepre mutat
//A shared memory feltltse:
//1. minden thread tmsolja a megfelel spatialKernel elemet
int index = threadIdx.x + blockDim.x * threadIdx.y;
int step = blockDim.x * blockDim.y; //az sszes thread szma a blockban
while (index < spatialKernelSize) {
sharedData[index] = spatialKernel[index];
index += step;
}
//2. minden thread tmsolja a megfelel rangeKernel elemet
index = threadIdx.x + blockDim.x * threadIdx.y;
int rangeKernelSize = 2 * MAX_RANGE_DIFF + 1; //=511
while (index < rangeKernelSize) {
sharedData[index + spatialKernelSize] = rangeKernel[index];
index += step;
}
__syncthreads();
//megvagyunk a shared memory feltltsvel, jhet a lnyeg:
int x = threadIdx.x + blockIdx.x * blockDim.x; //pixel koordintk kiszmtsa
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < width && y < height) { //csak az rvnyes pixeleket nzzk
int offset = x + y * width; //a pixel intenzitst trol memria indexe az in s out tmbkben
float summa = 0.0f, weightSumma = 0.0f;
int intensity = in[offset]; //az adott pixel intenzitsa
for (int j = -r; j <= r; ++j) { //j: sorindex
int yj = y + j; //az aktulisan vizsglt pixel y koordintja
for (int i = -r; i <= r; ++i) { //i: oszlopindex
int xi = x + i; //az aktulisan vizsglt pixel x koordintja
if (xi >= 0 && xi < width && yj >= 0 && yj < height) {
int offsetij = xi + yj * width; //az xi , yj pixel intenzitst trol memria indexe
int intensityij = in[offsetij]; //az xi, yj pixel intenzitsa
int deltaI = intensityij - intensity; //az intenzitsok klnbsge
float temp = pSpatialKernel[i + j * n] * pRangeKernel[deltaI];
weightSumma += temp;
summa += temp * intensityij;
}
}
}
out[offset] = (weightSumma == 0.0f) ? 0 : ((unsigned char)(summa / weightSumma)); //TODO: inkbb kerektsen, mint levgjon
}
}
|
10,461 | #include "unroll.cuh"
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void unrollKernel(int C, int H, int W, int K, float* X, float* X_out)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int H_out = H - K + 1;
int W_out = W - K + 1;
int W_unroll = H_out*W_out;
int c, s, h_out, w_out, h_unroll, w_unroll, w_base, k1, k2;
if (tid < C*W_unroll) {
c = tid / W_unroll;
s = tid % W_unroll;
h_out = s / W_out;
w_out = s % W_out;
h_unroll = h_out*W_out + w_out;
w_base = c*K*K;
for (k1 = 0; k1 < K; k1++) {
for (k2 = 0; k2 < K; k2++) {
w_unroll = w_base + k1*K + k2;
X_out[w_unroll*W_unroll+h_unroll] = X[c*H*W + (h_out + k1)*W + w_out + k2];
}
}
}
}
void unrollWithCuda(float* X, float* X_out, int C, int H, int W, int K) {
float* dev_X;
float* dev_X_out;
cudaSetDevice(0);
cudaMalloc((void**)&dev_X, (size_t)(H*W*C * sizeof(float)));
cudaMalloc((void**)&dev_X_out, (size_t)((H-K+1)*(W-K+1)*K*K*C*sizeof(float)));
cudaMemcpy(dev_X, X, (size_t)(H*W*C * sizeof(float)), cudaMemcpyHostToDevice);
dim3 blockDim(H*W*C, 1, 1);
dim3 gridDim(1, 1, 1);
unrollKernel << <gridDim, blockDim >> > (C, H, W, K, dev_X, dev_X_out);
cudaMemcpy(X_out, dev_X_out, (size_t)((H - K + 1)*(W - K + 1)*K*K*C * sizeof(float)), cudaMemcpyDeviceToHost);
cudaFree(dev_X);
cudaFree(dev_X_out);
}
void unrollWithCpu(float* X, float* X_out, int C, int H, int W, int K) {
int w_start, w_unroll, h_unroll;
int H_out = H - K + 1;
int W_out = W - K + 1;
int W_unroll = H_out*W_out;
for (int c = 0; c < C; c++) {
w_start = c*K*K;
for (int k1 = 0; k1 < K; k1++) {
for (int k2 = 0; k2 < K; k2++) {
for (int h = 0; h < H_out; h++) {
for (int w = 0; w < W_out; w++) {
w_unroll = w_start + k1*K + k2;
h_unroll = h*W_out + w;
X_out[w_unroll*W_unroll + h_unroll] = X[c*H*W + (h + k1)*W + w + k2];
}
}
}
}
}
} |
10,462 | #include "includes.h"
__global__ void conv_1d(int* a, int* c, int n) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
//cal the radius of the mask(mid point)
int r = MASK_LEN / 2;
//cal the start point of for the element
int start = id - r;
int temp = 0;
for (int j = 0; j < MASK_LEN; j++)
{
if ((start + j >= 0) && (start + j < n))
{
temp += a[start + j] * mask[j];
}
}
c[id] = temp;
} |
10,463 | #include "includes.h"
__global__ void segCountSum_shared(int *counter, int *segcounter, const int countlength)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
extern __shared__ int s_counter[];
if (xIndex < countlength){
for (int jj=0; jj<countlength; jj++){
s_counter[xIndex] = s_counter[xIndex] + segcounter[xIndex + jj*countlength];
}
}
counter[xIndex] = s_counter[xIndex];
} |
10,464 | /* Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <iostream>
__device__ int x;
__global__ void unaligned_kernel(void)
{
*(int*) ((char*)&x + 1) = 42;
}
__device__ void out_of_bounds_function(void)
{
*(int*) 0x87654320 = 42;
}
__global__ void out_of_bounds_kernel(void)
{
out_of_bounds_function();
}
static void run_unaligned(void)
{
std::cout << "Running unaligned_kernel: ";
unaligned_kernel<<<1,1>>>();
std::cout << cudaGetErrorString(cudaDeviceSynchronize()) << std::endl;
}
static void run_out_of_bounds(void)
{
std::cout << "Running out_of_bounds_kernel: ";
out_of_bounds_kernel<<<1,1>>>();
std::cout << cudaGetErrorString(cudaDeviceSynchronize()) << std::endl;
}
int main() {
int *devMem = nullptr;
std::cout << "Mallocing memory" << std::endl;
cudaMalloc((void**)&devMem, 1024);
run_unaligned();
run_out_of_bounds();
// Omitted to demo leakcheck
// cudaFree(devMem);
return 0;
}
|
10,465 | #include <stdio.h>
#include <stdlib.h>
#define N 512
void host_add(int *a, int *b, int *c)
{
for(int idx = 0; idx < N; idx++) {
c[idx] = a[idx] + b[idx];
}
}
void fill_array(int *data) {
for(int idx = 0; idx < N; idx++) {
data[idx] = idx;
}
}
void print_output(int *a, int *b, int *c)
{
for(int idx = 0; idx < N; idx++) {
printf("%d + %d = %d\n", a[idx], b[idx], c[idx]);
}
}
int main(void)
{
int *a, *b, *c;
int size = N * sizeof(int);
// メモリを確保
a = (int *)malloc(size);
fill_array(a);
b = (int *)malloc(size);
fill_array(b);
c = (int *)malloc(size);
host_add(a, b, c);
print_output(a, b, c);
free(a);
free(b);
free(c);
return 0;
} |
10,466 | #include "includes.h"
__global__ void initTempNodeArray( const int hitNum, const int allowableGap, int* tempNodeArray_score, int* tempNodeArray_vertical, int* tempNodeArray_horizontal, int* tempNodeArray_matchNum) {
const int bIdx = gridDim.x * blockIdx.y + blockIdx.x;
const int idx = blockDim.x * bIdx + threadIdx.x;
const int halfTempNodeWidth = allowableGap + MARGIN;
const int tempNodeWidth = 1 + 2 * halfTempNodeWidth;
if(idx < hitNum * tempNodeWidth) {
const int bandIdx = idx / hitNum;
if(bandIdx < halfTempNodeWidth) {
tempNodeArray_score [idx] = -30000;
tempNodeArray_vertical [idx] = -30000;
tempNodeArray_horizontal[idx] = -30000;
tempNodeArray_matchNum [idx] = -30000;
} else if(bandIdx == halfTempNodeWidth) {
tempNodeArray_score [idx] = 0;
tempNodeArray_vertical [idx] = GAP_OPEN_POINT;
tempNodeArray_horizontal[idx] = GAP_OPEN_POINT;
tempNodeArray_matchNum [idx] = 0;
} else {
const int i = bandIdx - halfTempNodeWidth;
const int tempScore = i * GAP_POINT + GAP_OPEN_POINT;
tempNodeArray_score [idx] = tempScore;
tempNodeArray_vertical [idx] = tempScore + GAP_OPEN_POINT;
tempNodeArray_horizontal[idx] = tempScore;
tempNodeArray_matchNum [idx] = 0;
}
}
} |
10,467 | #include <stdio.h>
//#include <cuda_runtime.h>
__global__ void plusOne(int *a,int nbelem)
{
if(threadIdx.x < nbelem)
a[threadIdx.x]++;
}
int main(){
int N=100;
int *a;
int *a_d;
a=(int*)malloc(N*sizeof(int));
cudaMalloc(&a_d,N*sizeof(int));
for(int i=0;i<N;i++){
a[i]=i;
}
cudaMemcpy(a_d,a,N*sizeof(int),cudaMemcpyHostToDevice);
plusOne<<<1,128>>>(a_d,N);
cudaMemcpy(a,a_d,N*sizeof(int),cudaMemcpyDeviceToHost);
bool ok=true;
for(int i=0;i<N;i++){
if(a[i] != i+1)ok=false;
}
printf("%s\n",ok?"ok":"not ok");
free(a);
cudaFree(a_d);
}
|
10,468 | // Luke Rinehart
// GPU parallel processing Assignment 2
// GPU N average sort
#include <cuda.h>
#include <cuda_runtime.h>
#include <time.h>
#include <stdio.h>
const int size = 100;
void gpu_avg(int [size][size], int *, int);
__global__ void n_avg(int A[size][size], int *B, int k){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int sum = 0;
int count = 0;
printf("{%i}", row);
if(row < size) {
for(int i = 0; i < k; ++i) {
sum += A[row][i];
count = count + 1;
}
B[count] = sum;
}
}
void gpu_avg(int A[size][size], int *B, int k){
int Ad[size][size];
int *Bd;
dim3 blocks(32,32);
dim3 grids(1,1);
cudaMalloc((void**)&Ad, k);
cudaMemcpy(Ad,A,k*k,cudaMemcpyHostToDevice);
cudaMalloc((void**)&Bd, k); /*Allocate Space for A & B on Device*/
cudaMemcpy(Bd,B,k,cudaMemcpyHostToDevice);
n_avg<<<grids,blocks>>>(Ad,Bd,k); /* Run Average*/
cudaFree(Ad);
cudaFree(Bd); /* Free mem */
}
int main()
{
//int size = 100;
int A[size][size]; // 10,000 values
int result[size];
srand(time(0));
for(int i = 0; i < size; ++i){
result[i] = 0;
for(int j = 0; j < size; ++j){
A[i][j] = rand() % 100;
}
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
gpu_avg(A,result,size);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop); /* Report Time */
printf("%f ms\n", milliseconds);
return 0;
}
|
10,469 | #include<stdio.h>
#define ARRAY_SIZE 16
__global__ void unique_idx_calc_threadIdx(int * input) {
int tid = threadIdx.x+blockIdx.x*blockDim.x;
printf("threadIdx.x: %d, blockIdx.x: %d, blockDim.x: %d, value: %d\n", tid, blockIdx.x, blockDim.x, input[tid]);
}
__global__ void unique_gid_calc(int * input) {
int tid = threadIdx.x;
int offset = blockDim.x * blockIdx.x;
int gid = tid + offset;
printf("threadIdx.x: %d, blockIdx.x: %d, gid.x: %d, value: %d\n", tid, blockIdx.x, gid, input[gid]);
}
int main() {
int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
int h_data[] = {23, 9, 7, 14, 27, 4, 3, 11, 10, 13, 61, 42, 50, 67, 83, 22};
for(int i=0; i<ARRAY_SIZE; i++) {
printf("%d ", h_data[i]);
}
printf("\n \n");
int * d_data;
cudaMalloc((void**)&d_data, ARRAY_BYTES);
cudaMemcpy(d_data, h_data, ARRAY_BYTES, cudaMemcpyHostToDevice);
dim3 grid(4);
dim3 block(4);
// unique_idx_calc_threadIdx<<<grid,block>>>(d_data);
unique_gid_calc<<<grid,block>>>(d_data);
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
} |
10,470 | #include "includes.h"
__global__ void gpu_maskPointCloud(float4* verts, const int width, const int height, const int* mask) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
const int index = x + y*width;
int m = mask[index];
if (m == 0) {
verts[index].w = -1;
}
} |
10,471 | #include <cuda_runtime.h>
#define THREADS 128
#define BLOCKS 2
__global__ void vectorAdd(int *v) {
int tx = threadIdx.x + blockDim.x * blockIdx.x;
v[tx] += tx;
}
int main(int argc, char **argv) {
int *d_vec = NULL;
cudaMalloc((void**)&d_vec, sizeof(int) * BLOCKS * THREADS);
cudaMemset(d_vec, 0, BLOCKS * THREADS);
vectorAdd<<<BLOCKS, THREADS>>>(d_vec);
cudaDeviceSynchronize();
cudaFree(d_vec);
return 0;
} |
10,472 | #include <cuda.h>
#include <assert.h>
#include <stdio.h>
template <int channel_per_thread, int filter_per_thread, int batch_per_block>
__global__ static void _cwc_kern_convolutional_backward_propagate_coefficient_default(const int strides, const int border, const int batch,
float* input, const int rows, const int cols, const int channels,
float* out_grad, const int out_rows, const int out_cols,
float* coeff, const int filter_rows, const int filter_cols, const int count)
{
assert(gridDim.x == filter_cols);
assert(gridDim.y == filter_rows);
extern __shared__ float shared[];
float* shared_input = &shared[0];
float* shared_out_grad = &shared[channels * batch_per_block];
const int thidx = threadIdx.x + threadIdx.y * blockDim.x;
const int thcnt = blockDim.x * blockDim.y;
assert(blockDim.x * filter_per_thread == count);
assert(blockDim.y * channel_per_thread == channels);
assert(thcnt >= channels * batch_per_block);
assert(thcnt >= count);
const int origin_x = blockIdx.x;
const int origin_y = blockIdx.y;
const int batch_group_idx = blockIdx.z / out_rows;
const int start_x = max(origin_x - border, 0) - (origin_x - border);
const int end_x = min(out_cols, (cols + border - origin_x + strides - 1) / strides);
input += (rows * cols * channels * batch_group_idx + (origin_y * cols + origin_x) * channels) * batch_per_block;
out_grad += out_rows * out_cols * count * batch_group_idx * batch_per_block;
int i, j, c, x;
const int y = blockIdx.z % out_rows;
float prod[channel_per_thread][filter_per_thread];
#pragma unroll
for (i = 0; i < channel_per_thread; i++)
#pragma unroll
for (j = 0; j < filter_per_thread; j++)
prod[i][j] = 0;
const int iy = origin_y + y * strides - border;
if (iy >= 0 && iy < rows)
{
input += (y * strides - border) * cols * channels * batch_per_block;
out_grad += y * out_cols * count * batch_per_block;
for (x = start_x; x < end_x; x++)
{
if (thidx < count)
#pragma unroll
for (c = 0; c < batch_per_block; c++)
shared_out_grad[c * count + thidx] = out_grad[x * count * batch_per_block + c * count + thidx];
if (thidx < channels * batch_per_block)
shared_input[thidx] = input[(x * strides - border) * channels * batch_per_block + thidx];
__syncthreads();
#pragma unroll
for (i = 0; i < channel_per_thread; i++)
#pragma unroll
for (j = 0; j < filter_per_thread; j++)
{
float sum = 0;
#pragma unroll
for (c = 0; c < batch_per_block; c++)
sum += shared_input[c * channels + i + threadIdx.y * channel_per_thread] * shared_out_grad[c * count + j + threadIdx.x * filter_per_thread];
prod[i][j] += sum;
}
__syncthreads();
}
}
const int cocnt = filter_cols * filter_rows * count;
coeff += cocnt * channels * blockIdx.z + (origin_y * filter_cols + origin_x) * count;
#pragma unroll
for (i = 0; i < channel_per_thread; i++)
#pragma unroll
for (j = 0; j < filter_per_thread; j++)
coeff[(i + threadIdx.y * channel_per_thread) * cocnt + j + threadIdx.x * filter_per_thread] = prod[i][j];
}
/*
__global__ static void _cwc_kern_convolutional_backward_propagate_coefficient_whole(const int strides, const int border, const int batch,
float* input, const int rows, const int cols, const int channels,
float* out_grad, const int out_rows, const int out_cols,
float* coeff, const int filter_rows, const int filter_cols, const int count)
{
assert(gridDim.x == channels);
assert(gridDim.y == count);
extern __shared__ float shared[];
float* shared_input = &shared[0];
float* shared_out_grad = &shared[(cols + border * 2) * filter_rows * 4];
const int thidx = threadIdx.x + threadIdx.y * blockDim.x;
const int batch_group_idx = blockIdx.z;
const int origin_x = threadIdx.x;
const int origin_y = threadIdx.y;
const int channel_idx = blockIdx.x;
const int count_idx = blockIdx.y;
input += channel_idx * rows * cols * batch_group_idx * 4;
out_grad += count_idx * out_rows * out_cols * batch_group_idx * 4;
int i, j, c;
float prod = 0;
for (i = 0; i < filter_rows - 1 - border; i++)
for (j = 0; j < cols; j += 8)
if (thidx < 32)
shared_input[(i * (cols + border * 2) + j + border) * 4 + thidx] = input[(i * cols + j) * 4 + thidx];
for (i = 0; i < out_rows; i++)
{
if (thidx < 32)
for (j = 0; j < out_cols; j += 8)
shared_out_grad[j * 4 + thidx] = out_grad[j * 4 + thidx];
if (thidx < 32)
#pragma unroll
for (c = 0; c < strides; c++)
#pragma unroll
for (j = 0; j < cols; j += 8)
shared_input[(((i * strides + c + filter_rows - 1 - border) % filter_rows) * (cols + border * 2) + j + border) * 4 + thidx] = input[(c * cols + j) * 4 + thidx];
__syncthreads();
float* input_thread = shared_input + ((origin_y + i * strides) % filter_rows) * (cols + border * 2) * 4 + origin_x;
for (j = 0; j < out_cols; j++)
{
#pragma unroll
for (c = 0; c < 4; c++)
prod += shared_out_grad[j * 4 + c] * input_thread[j * strides * 4 + c];
}
input += cols * batch * strides;
out_grad += out_cols * batch;
}
coeff[(channel_idx * count + count_idx) * filter_rows * filter_cols + origin_y * filter_cols + origin_x] = prod;
}
*/
template<int count_per_block, int batch_per_block>
__global__ static void _cwc_kern_convolutional_backward_propagate_coefficient_another(const int strides, const int border, const int batch,
float* input, const int rows, const int cols, const int channels,
float* out_grad, const int out_rows, const int out_cols,
float* coeff, const int filter_rows, const int filter_cols, const int count)
{
assert(gridDim.x == filter_cols);
assert(gridDim.y == out_rows);
extern __shared__ float shared[];
float* shared_input = &shared[0];
float* shared_out_grad = &shared[channels * batch_per_block];
const int thidx = threadIdx.x + threadIdx.y * blockDim.x;
const int thcnt = blockDim.x * blockDim.y;
assert(thcnt >= channels * batch_per_block);
assert(thcnt >= count);
const int channel_idx = threadIdx.x;
const int count_idx = threadIdx.y;
const int origin_x = blockIdx.x;
const int y = blockIdx.y;
const int batch_group_count = batch / batch_per_block;
const int start_x = max(origin_x - border, 0) - (origin_x - border);
const int end_x = min(out_cols, (cols + border - origin_x + strides - 1) / strides);
input += origin_x * channels * batch_per_block;
out_grad += out_rows * out_cols * count * batch_per_block;
int i, j, c, x;
float prod[3][7];
#pragma unroll
for (i = 0; i < 3; i++)
#pragma unroll
for (j = 0; j < 7; j++)
prod[i][j] = 0;
const int iy = y * strides - border;
for (x = start_x; x < end_x; x++)
{
if (thidx < channels * batch_per_block)
#pragma unroll
for (i = 0; i < 7; i++)
shared_input[i * channels * batch_per_block + thidx] = (i + iy >= 0 && i + iy < rows) ? input[((i + y * strides - border) * cols + x * strides - border) * channels * batch_per_block + thidx] : 0;
#pragma unroll
for (i = 0; i < batch_per_block; i++)
{
shared_out_grad[thidx] = out_grad[(y * out_cols + x) * count * batch_per_block + i * count + thidx];
__syncthreads();
#pragma unroll
for (c = 0; c < 7; c++)
#pragma unroll
for (j = 0; j < 3; j++)
prod[c][j] += shared_out_grad[count_idx + j * count_per_block] * shared_input[c * channels * batch_per_block + i * channels + channel_idx];
__syncthreads();
}
}
#pragma unroll
for (j = 0; j < 7; j++)
#pragma unroll
for (i = 0; i < 3; i++)
coeff[(j * filter_cols + origin_x) * count * channels * batch_per_block + channel_idx * count + count_idx + i * count_per_block] = prod[i][j];
}
int main(int argc, char** argv)
{
float* in = 0;
float* out = 0;
cudaMalloc(&in, sizeof(float) * (227 * 225 * 3 * 128));
cudaMalloc(&out, sizeof(float) * (111 * 111 * 96 * 128));
float* in_host = 0;
float* out_host = 0;
int i, j, c, k;
cudaMallocHost(&in_host, sizeof(float) * 225 * 225 * 3 * 128);
for (i = 0; i < 225; i++)
for (j = 0; j < 225; j++)
for (c = 0; c < 3; c++)
for (k = 0; k < 128; k++)
in_host[i * 225 * 3 * 128 + j * 3 * 128 + c * 128 + k] = c * k;
cudaMemcpy(in, in_host, sizeof(float) * 225 * 225 * 3 * 128, cudaMemcpyHostToDevice);
cudaMallocHost(&out_host, sizeof(float) * 111 * 111 * 96 * 128);
for (i = 0; i < 111; i++)
for (j = 0; j < 111; j++)
for (c = 0; c < 96; c++)
for (k = 0; k < 128; k++)
out_host[i * 111 * 96 * 128 + j * 96 * 128 + c * 128 + k] = c * k;
cudaMemcpy(out, out_host, sizeof(float) * 111 * 111 * 96 * 128, cudaMemcpyHostToDevice);
float* w = 0;
dim3 thread_per_block(96, 1);
dim3 num_blocks(7, 7, 111 * 16);
cudaMalloc(&w, sizeof(float) * 3 * 96 * 7 * 7 * 111 * 16);
int shared_memory_size = sizeof(float) * 8 * (3 + 96);
cudaFuncSetCacheConfig(_cwc_kern_convolutional_backward_propagate_coefficient_default<3, 1, 8>, cudaFuncCachePreferShared);
_cwc_kern_convolutional_backward_propagate_coefficient_default
<3, 1, 8>
<<<num_blocks, thread_per_block, shared_memory_size>>>
(2, 1, 128,
in, 225, 225, 3,
out, 111, 111,
w, 7, 7, 96);
thread_per_block = dim3(3, 32);
num_blocks = dim3(7, 111, 4);
shared_memory_size = sizeof(float) * (32 * 7 * 3 + 96);
cudaFuncSetCacheConfig(_cwc_kern_convolutional_backward_propagate_coefficient_another<32, 32>, cudaFuncCachePreferShared);
_cwc_kern_convolutional_backward_propagate_coefficient_another
<32, 32>
<<<num_blocks, thread_per_block, shared_memory_size>>>
(2, 1, 128,
in, 225, 225, 3,
out, 111, 111,
w, 7, 7, 96);
/*
thread_per_block = dim3(7, 7);
num_blocks = dim3(3, 96, 32);
shared_memory_size = sizeof(float) * 4 * (7 * (225 + 2) + 111);
cudaFuncSetCacheConfig(_cwc_kern_convolutional_backward_propagate_coefficient_whole, cudaFuncCachePreferShared);
_cwc_kern_convolutional_backward_propagate_coefficient_whole
<<<num_blocks, thread_per_block, shared_memory_size>>>
(2, 1, 128,
in, 225, 225, 3,
out, 111, 111,
w, 7, 7, 96);
*/
cudaFree(w);
cudaFree(out);
cudaFree(in);
cudaFreeHost(out_host);
cudaFreeHost(in_host);
return 0;
}
|
10,473 | #include "includes.h"
extern "C" {
}
#define TB 256
#define EPS 0.1
#undef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#undef MAX
#define MAX(a, b) ((a) > (b) ? (a) : (b))
__global__ void upsample_corr_kernel( int *curr_corr, int *next_corr, int curr_h, int curr_w, int next_h, int next_w )
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < next_h * next_w) {
int next_x = id % next_w, next_y = id / next_w;
float w_ratio = (float)next_w / (float)curr_w;
float h_ratio = (float)next_h / (float)curr_h;
int curr_x = (next_x + 0.5) / w_ratio;
int curr_y = (next_y + 0.5) / h_ratio;
curr_x = MAX(MIN(curr_x, curr_w-1), 0);
curr_y = MAX(MIN(curr_y, curr_h-1), 0);
int curr_id = curr_y * curr_w + curr_x;
int curr_x2 = curr_corr[2 * curr_id + 0];
int curr_y2 = curr_corr[2 * curr_id + 1];
int next_x2 = next_x + (curr_x2 - curr_x) * w_ratio + 0.5;
int next_y2 = next_y + (curr_y2 - curr_y) * h_ratio + 0.5;
next_x2 = MAX(MIN(next_x2, next_w-1), 0);
next_y2 = MAX(MIN(next_y2, next_h-1), 0);
next_corr[2 * id + 0] = next_x2;
next_corr[2 * id + 1] = next_y2;
}
return ;
} |
10,474 | /* Program : Performing a matrix multiplication with a 2-D block and 2-D thread layout
* Author : Anant Shah
* Date : 5-9-2018
* Roll Number : EE16B105
*/
#include<stdio.h>
#define SIZE 8192
#define NUM_THREADS 16
#define ERROR_HANDLER(error_msg,line) error_handler(error_msg,line)
void error_handler(cudaError_t error_msg,int line){
if(error_msg != cudaSuccess){
printf("%s in %s at %d",cudaGetErrorString(error_msg),__FILE__,line);
exit(EXIT_FAILURE);
}
}
void fill_matrix(double *mat, unsigned numRows, unsigned numCols){
/* Function to populate a 2-D matrix with the specified number of rows and columns */
for(unsigned i=0;i<numRows;i++){
for(unsigned j=0;j<numCols;j++){
mat[i*numCols+j] = i*2.1f + j*3.2f;
}
}
}
void print_matrix_to_file(double *mat, unsigned numRows, unsigned numCols){
/* Function to print the matrix elements into a file */
const char *fname = "assignment2_1_out";
FILE *f = fopen(fname,"a");
for(unsigned i=0; i<numRows; i++){
for(unsigned j=0;j<numCols;j++){
fprintf(f,"%4.4f ",mat[i*numCols+j]);
}
fprintf(f,"\n");
}
fclose(f);
}
__global__ void matrixMulX(double *M,double *N,double *P,int width){
/* Kernel to perform the matrix multiplication between two N*N matrices where the fastest changing index is ".y"
* Each thread calculates one cell in the output matrix
* Parameters : M - matrix M stored as 1-D layout
* : N - matrix N stored as 1-D layout
* : P - matrix P which stores the multiplication of the matrices A and B
* : width - Number of rows/columns in the matrix as they are square matrices
*/
int col = blockIdx.x*blockDim.x+threadIdx.x; /* Column of the cell to be calculated */
int row = blockIdx.y*blockDim.y+threadIdx.y; /* Row of the cell to be calculated */
double pSum = 0.0 ; /* Variable to store the partial sum of each multiplication */
for(unsigned i=0;i<width;i++){
pSum += M[row*width+i]*N[i*width+col];
}
P[row*width+col] = pSum;
}
__global__ void matrixMulY(double *M,double *N,double *P,int width){
/* Kernel to perform the matrix multiplication between two N*N matrices where the fastest changing index is ".x"
* Each thread calculates one cell in the output matrix
* Parameters : M - matrix M stored as 1-D layout
* : N - matrix N stored as 1-D layout
* : P - matrix P stored as 1-D layout
* : width - Number of rows/columns in the matrix as they are square matrices
*/
int col = blockIdx.x*blockDim.x+threadIdx.y; /* Column of the cell to be calculated */
int row = blockIdx.y*blockDim.y+threadIdx.x; /* Row of the cell to be calculated */
double pSum = 0.0;
for(unsigned i=0;i<width;i++){
//pSum += M[i*width+row]*N[i*width+col];
pSum += M[row*width+i]*N[i*width+col];
}
P[row*width+col] = pSum;
}
int main(int argc,char **argv){
/************************************* Variable Initialization ***************************************/
double *h_M; /* Matrix multiplicand on the host*/
double *h_N; /* Matrix multiplicand on the host*/
double *h_P; /* Result of the matrix multiplication on the host*/
double *h_P_t; /* Result of the transpose matrix multiplication on the host*/
double *d_M; /* Matrix multilpicand on the device */
double *d_N; /* Martix multiplicand on the device */
double *d_P; /* Matrix multiplicand on the device */
double *d_P_t; /* Result of the transpose matrix multiplication on the device */
size_t size; /* Number of bytes required to store the matrices in the memory */
cudaEvent_t start, stop; /* Cuda Events to be used to measure the run-time of the multiplication kernel where the fastest moving index is ".x" */
cudaEvent_t start_t,stop_t; /* Cuda Events to be used to measure the run-time of the multiplication kernel where the fastest moving index is "".y*/
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&start_t);
cudaEventCreate(&stop_t);
/*****************************************************************************************************/
/*************************** Allocating memory to the matrices on the device *************************/
size = sizeof(double)*SIZE*SIZE; /* Each array is a 2-D matrix with size N*N */
h_M = (double *)malloc(size);
h_N = (double *)malloc(size);
h_P = (double *)malloc(size);
h_P_t = (double *)malloc(size);
/*************************** Initialize the matrices with values ************************************/
fill_matrix(h_M,SIZE,SIZE);
fill_matrix(h_N,SIZE,SIZE);
/************************** Allocate memory on the GPU to store the matrices ************************/
ERROR_HANDLER(cudaMalloc((void **)&d_M,size),__LINE__);
ERROR_HANDLER(cudaMalloc((void **)&d_N,size),__LINE__);
ERROR_HANDLER(cudaMalloc((void **)&d_P,size),__LINE__);
ERROR_HANDLER(cudaMalloc((void **)&d_P_t,size),__LINE__);
/************************** Copy the matrices from the host to device ******************************/
ERROR_HANDLER(cudaMemcpy(d_M,h_M,size,cudaMemcpyHostToDevice),__LINE__);
ERROR_HANDLER(cudaMemcpy(d_N,h_N,size,cudaMemcpyHostToDevice),__LINE__);
/************************* Define the block dimensions and grid dimensions **************************/
dim3 threads(NUM_THREADS,NUM_THREADS); /* Define a (16,16) block of threads */
dim3 blocks((SIZE+NUM_THREADS-1)/NUM_THREADS,(SIZE+NUM_THREADS-1)/NUM_THREADS); /* The ceiling function has been used to define the blocks, generating a 2-D grid structure of blocks */
cudaEventRecord(start);
matrixMulX<<<blocks,threads>>>(d_M,d_N,d_P,SIZE); /* Execute the kernel */
cudaEventRecord(stop);
ERROR_HANDLER(cudaMemcpy(h_P,d_P,size,cudaMemcpyDeviceToHost),__LINE__);
cudaEventRecord(start_t);
matrixMulY<<<blocks,threads>>>(d_M,d_N,d_P_t,SIZE); /*Execute the kernel */
cudaEventRecord(stop_t);
ERROR_HANDLER(cudaMemcpy(h_P_t,d_P_t,size,cudaMemcpyDeviceToHost),__LINE__);
cudaEventSynchronize(stop_t);
float milliseconds = 0;
float milliseconds_t = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cudaEventElapsedTime(&milliseconds_t, start_t, stop_t);
print_matrix_to_file(h_P,SIZE,SIZE);
print_matrix_to_file(h_P_t,SIZE,SIZE);
printf("Run-Time(seconds) for index change '.x' : %.10f \n",milliseconds_t/1000);
printf("Run-Time(seconds) for index chnage '.y' : %.10f \n",milliseconds/1000);
/************************ Free the Memory that was allocated ****************************************/
cudaFree(d_M);
cudaFree(d_N);
cudaFree(d_P);
cudaFree(d_P_t);
free(h_M);
free(h_N);
free(h_P);
free(h_P_t);
}
|
10,475 | #include "includes.h"
// filename: gax.cu
// a simple CUDA kernel to add two vectors
extern "C" // ensure function name to be exactly "gax"
{
}
__global__ void vsquare(const double *a, double *c)
{
int i = threadIdx.x+blockIdx.x*blockDim.x;
double v = a[i];
c[i] = v*v;
} |
10,476 | #include <cstdio>
#define BLOCK_SIZE 32
__global__ void matrix_multiplication_square(float *d_a, float *d_b, float *d_c, int n) {
__shared__ float a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int tmp = 0;
int idx;
for (int sub = 0; sub < gridDim.x; sub++) {
idx = row * n + sub * BLOCK_SIZE + threadIdx.x;
if (idx >= n * n) {
a[threadIdx.y][threadIdx.x] = 0;
} else {
a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col;
if(idx >= n * n) {
b[threadIdx.y][threadIdx.x] = 0;
} else {
b[threadIdx.y][threadIdx.x] = d_b[idx];
}
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k) {
tmp += a[threadIdx.y][k] * b[k][threadIdx.x];
}
__syncthreads();
}
if (row < n && col < n) {
d_c[row * n + col] = tmp;
}
}
int main(int argc, char *argv[]) {
int n = atoi(argv[1]);
float *h_a, *h_b, *h_c;
int nbytes = n * n * sizeof(float);
cudaMallocHost((void **) &h_a, nbytes);
cudaMallocHost((void **) &h_b, nbytes);
cudaMallocHost((void **) &h_c, nbytes);
// init matrix
cudaMemset(h_b, 0, nbytes);
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
h_a[i * n + j] = 1;
if (i == j) {
h_b[i * n + j] = 1;
}
}
}
float gpu_elapsed_time_ms;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// memory on the device
cudaEventRecord(start, 0);
float *d_a, *d_b, *d_c;
cudaMallocHost((void **) &d_a, nbytes);
cudaMallocHost((void **) &d_b, nbytes);
cudaMallocHost((void **) &d_c, nbytes);
// copy matrix to device
cudaMemcpy(d_a, h_a, nbytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, nbytes, cudaMemcpyHostToDevice);
unsigned int grid_rows = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_rows, grid_cols);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
matrix_multiplication_square<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, n);
cudaMemcpy(h_c, d_c, nbytes, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
// printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on GPU: %f ms.\n\n", n, n, n, n, gpu_elapsed_time_ms);
bool good_answer = true;
for (int i = 0; i < n * n; i++) {
good_answer &= (d_a[i] == d_c[i]);
}
if (good_answer) {
printf("Good answer\n");
} else {
printf("Bad answer\n");
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFreeHost(h_a);
cudaFreeHost(h_a);
cudaFreeHost(h_a);
return 0;
}
|
10,477 | #include <iostream>
#include <math.h>
#include <cuda.h>
#include <assert.h>
#define N 65564
__global__ void sum(float *a, float *b, float *c) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < N) {
c[index] = a[index] + b[index];
}
}
void handle_error(cudaError_t error) {
if (error != cudaSuccess) {
std::cout << "Cuda Error. Exiting..";
exit (0);
}
}
int main() {
float a[N], b[N], c[N];
float *device_a, *device_b, *device_c;
for (int i = 0; i < N; i++) {
a[i] = (i+1) * 1.0 / 2;
b[i] = (i+3) * 1.0 / 3;
}
handle_error(cudaMalloc((void **) &device_a, N * sizeof(float)));
handle_error(cudaMalloc((void **) &device_b, N * sizeof(float)));
handle_error(cudaMalloc((void **) &device_c, N * sizeof(float)));
cudaMemcpy(device_a, a, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_b, b, N * sizeof(float), cudaMemcpyHostToDevice);
sum<<<ceil(N/1024.0), 1024>>>(device_a, device_b, device_c);
cudaMemcpy(c, device_c, N * sizeof(N), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
assert(c[i] == a[i] + b[i]);
}
std::cout << "Successful.\n";
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_c);
return 0;
}
|
10,478 | #include "includes.h"
__global__ void matadd(const float *a, const float *b, float *c, int n, int m){
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int idx = i * m + j;
if(i < n and j < m){
c[idx] = a[idx] + b[idx];
}
} |
10,479 | #include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/sequence.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/replace.h>
#include <thrust/functional.h>
#include <cmath>
#include <cassert>
#include <unistd.h>
#include <fcntl.h>
#include <cstdio>
#include <string>
#include <fstream>
#include <algorithm>
#include <random>
#include <iostream>
#include <iomanip>
using namespace std;
void swap(int &i) {
// Some of the & are superfluous.
i =
(0xff&(i >> 24)) |
(0xff00&(i >> 8)) |
(0xff0000&(i << 8)) |
(0xff000000&(i << 24));
}
int read_int(int fd) {
int rv;
int i;
rv = read(fd, &i, 4); assert(rv == 4);
swap(i);
return i;
}
void
output_pgm(const std::string &fn, const float (&img)[28][28]) {
std::ofstream ofs(fn, std::fstream::out|std::fstream::trunc);
ofs << "P2\n";
ofs << "28 28\n";
ofs << "255\n";
for (int i = 0; i < 28; i++) {
for (int j = 0; j < 28; j++) {
if (j > 0) {
ofs << " ";
}
ofs << 255 - int(std::round(127.5*(img[i][j] + 1)));
}
ofs << "\n";
}
}
template <int N>
void
read_mnist_images(const std::string &fn, float (&imgs)[N][28][28]) {
int rv;
int fd;
fd = open(fn.c_str(), O_RDONLY);
assert(fd >= 0);
int magic = read_int(fd);
assert(magic == 0x803);
int n_images = read_int(fd);
assert(n_images == N);
int n_rows = read_int(fd);
assert(n_rows == 28);
int n_cols = read_int(fd);
assert(n_cols == 28);
for (int i = 0; i < N; i++) {
unsigned char tmp[28][28];
rv = read(fd, tmp, 28*28); assert(rv == 28*28);
for (int r = 0; r < 28; r++) {
for (int c = 0; c < 28; c++) {
// Make go from -1 to 1.
imgs[i][r][c] = double(tmp[r][c])/127.5 - 1;
}
}
}
rv = close(fd); assert(rv == 0);
}
template <int N>
void
read_mnist_labels(const std::string &fn, unsigned char (&labels)[N]) {
int rv;
int fd;
fd = open(fn.c_str(), O_RDONLY);
assert(fd >= 0);
int magic = read_int(fd);
assert(magic == 0x801);
int n_labels = read_int(fd);
assert(n_labels == N);
rv = read(fd, labels, N); assert(rv == N);
for (int i = 0; i < N; i++) {
assert(labels[i] >= 0 && labels[i] <= 9);
}
rv = close(fd); assert(rv == 0);
}
int main(void)
{
// allocate three device_vectors with 10 elements
string imagename = "/data/home/avanroi1/cs580f/cs580/proj3/training/train-images-idx3-ubyte";
string labelname = "/data/home/avanroi1/cs580f/cs580/proj3/training/train-labels-idx1-ubyte";
static float training_images[60'000][28][28];
read_mnist_images(imagename, training_images);
//output_pgm("img0.pgm", training_images[0]);
//output_pgm("img59999.pgm", training_images[59999]);
static unsigned char training_labels[60'000];
read_mnist_labels(labelname, training_labels);
assert(training_labels[0] == 5);
assert(training_labels[59'999] == 8);
thrust::device_vector<int> X(10);
thrust::device_vector<int> Y(10);
thrust::device_vector<int> Z(10);
//just tryna scrap
// initialize X to 0,1,2,3, ....
thrust::sequence(X.begin(), X.end());
// compute Y = -X
thrust::transform(X.begin(), X.end(), Y.begin(), thrust::negate<int>());
thrust::copy(Y.begin(), Y.end(), std::ostream_iterator<int>(std::cout, "\n"));
// fill Z with twos
thrust::fill(Z.begin(), Z.end(), 2);
// compute Y = X mod 2
thrust::transform(X.begin(), X.end(), Z.begin(), Y.begin(), thrust::modulus<int>());
// replace all the ones in Y with tens
thrust::replace(Y.begin(), Y.end(), 1, 10);
// print Y
thrust::copy(Y.begin(), Y.end(), std::ostream_iterator<int>(std::cout, "\n"));
return 0;
} |
10,480 | #include <cuda_runtime.h>
#include <stdio.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(-10 * error); \
} \
}
void initialInt(int *ip, int N) {
for (int i = 0; i < N; i++) {
ip[i] = i;
}
}
void printMatrix(int *C, const int nx, const int ny) {
int *ic = C;
printf("\nMatrix: (%d.%d)\n", nx, ny);
for (int iy = 0; iy < ny; iy++) {
for (int ix = 0; ix < nx; ix++) {
printf("%3d", ic[ix]);
}
ic += nx;
printf("\n");
}
}
__global__ void printThreadIndex(int *A, const int nx, const int ny) {
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * nx + ix;
printf("thread_id (%d, %d) block_id (%d, %d) coordinate (%d, %d) "
"global index %2d ival %2d \n", threadIdx.x, threadIdx.y, blockIdx.x,
blockIdx.y, ix, iy, idx, A[idx]);
}
int main(int argc, char *argv[]) {
printf("%s Starting...\n", argv[0]);
// get device information
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// set matrix dimension
int nx = 8;
int ny = 6;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
// malloc host memory
int *h_A;
h_A = (int*)malloc(nBytes);
// initialize host matrix with interger
initialInt(h_A, nxy);
printMatrix(h_A, nx, ny);
// malloc device memory
int *d_MatA;
cudaMalloc((void**)&d_MatA, nBytes);
// transfer data from host to device
cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice);
// set up execution configuration
dim3 block(4, 2);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
// invoke the kernel
printThreadIndex <<<grid, block>>> (d_MatA, nx, ny);
cudaDeviceSynchronize();
// free host and device memory
free(h_A);
cudaFree(d_MatA);
// reset device
cudaDeviceReset();
return 0;
}
|
10,481 | #include <stdio.h>
int main(void) {
printf("Hello from CPU!");
return 0;
}
|
10,482 | /*
Program Name: CudaVectorAdd
This program adds two vector arrays on GPU.
*/
#include <stdio.h>
#define N 512
// Device Vector Add Function.
__global__ void add(int *a, int *b, int *c)
{
// Using blocks only.
int tid = threadIdx.x;
c[tid] = a[tid] + b[tid];
}
int main(){
int *a,*b,*c; // Host side pointers.
int *dev_a, *dev_b, *dev_c; // Device side pointers.
//Host side memory allocation.
a=(int *)malloc(N*sizeof(int));
b=(int *)malloc(N*sizeof(int));
c=(int *)malloc(N*sizeof(int));
//Device side memory allocation.
cudaMalloc( (void**)&dev_a, N * sizeof(int) );
cudaMalloc( (void**)&dev_b, N * sizeof(int) );
cudaMalloc( (void**)&dev_c, N * sizeof(int) );
// Initializing Vectors
for (int i=0; i<N; i++) {
a[i] = i; b[i] = i;
}
//Copying data to the GPU.
cudaMemcpy ( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy ( dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice );
// GPU kernel launch with one block and N=512 blocks.
add<<<1,N>>>(dev_a, dev_b, dev_c);
// Copying results back to the Host.
cudaMemcpy(c, dev_c, N * sizeof(int),cudaMemcpyDeviceToHost );
//Printing results.
for (int i=0; i<N; i++)
{
printf("%d + %d = %d\n", a[i],b[i],c[i]);
}
// Freeing memory to keep the atmosphere clean.
free(a); free(b); free(c);
cudaFree (dev_a); cudaFree (dev_b); cudaFree (dev_c);
return 0;
}
|
10,483 | #include <stdio.h>
int main() {
int nDevices;
cudaError_t err = cudaGetDeviceCount(&nDevices);
if (err != cudaSuccess) {
fprintf(stderr, "%s\n", cudaGetErrorString(err));
exit(1);
}
for (int i=0; i < nDevices; i++) {
cudaDeviceProp prop;
// query the device properties of the i-th device
err = cudaGetDeviceProperties(&prop, i);
if (err != cudaSuccess) {
fprintf(stderr, "%s\n", cudaGetErrorString(err));
exit(1);
}
printf("Device Number: %d\n", i);
printf("\tDevice Name: %s\n", prop.name);
printf("\tMajor compute capability: %d.%d\n", prop.major, prop.minor);
printf("\tDevice Global Memory: %f GB\n", prop.totalGlobalMem / (1024.0*1024.0*1024.0));
printf("\tShared Memory per Block: %d bytes\n", prop.sharedMemPerBlock);
printf("\tMap Host Memory available (pinned Memory): %s\n", prop.canMapHostMemory ? "true": "false");
printf("\tMemory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf("\tMemory Bus Width: %d bits\n", prop.memoryBusWidth);
printf("\tPeak Memory Bandwidth: %f GB/s\n", 2.0 * prop.memoryClockRate * (prop.memoryBusWidth / 8)/1.0e6);
printf("\tNumber of asynchronous engines: %d\n", prop.asyncEngineCount);
printf("\tL2 Cache bytes: %d\n", prop.l2CacheSize);
printf("\tConcurrent Kernels: %d\n", prop.concurrentKernels);
}
}
|
10,484 | #include <bits/stdc++.h>
#include <cuda.h>
#define H 1000
#define W 1000
using namespace std;
void foo(float* v) {
for(int i = 0; i < H; i++) {
for(int j = 0; j < W; j++) {
v[i * W + j] = 2;
}
}
}
void mult(float *A, float *B,float *C) {
float aux = 0;
for(int i = 0; i < H; i++) {
for(int j = 0; j < W; j++) {
aux = 0;
for(int k=0; k < W; k++)
aux += A[i * W + k] * B[k * W + j];
C[i * W + j] = aux;
}
}
}
void mostrar(float *v) {
for(int i=0; i<H; i++){
for(int j = 0; j < W; j++) {
cout << v[i * W + j] << " ";
}
cout << endl;
}
}
__global__
void multMat(float *d_A, float *d_B, float *d_C ) {
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if(i < H && j < W){
int Pvalue = 0;
for(int k = 0; k < W; k++) {
Pvalue += d_A[i * W + k] * d_B[k * W + j];
}
d_C[i * W + j] = Pvalue;
}
}
int main() {
float* A = new float[H * W];
float* B = new float[H * W];
float* C = new float[H * W];
float* D = new float[H * W];
foo(A);
foo(B);
{
clock_t start = clock();
mult(A, B, C);
clock_t end = clock();
double cpu_time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
printf("Tiempo invertido CPU = %lf s\n", cpu_time_used);
}
float *d_A, *d_B, *d_D;
float blockSize = 32;
dim3 dimBlock(blockSize, blockSize);
dim3 dimGrid(ceil(W / float(blockSize)), ceil(H / float(blockSize)), 1);
cudaMalloc((void**)&d_A, sizeof(float) * H * W);
cudaMalloc((void**)&d_B, sizeof(float) * H * W);
cudaMalloc((void**)&d_D, sizeof(float) * H * W);
{
clock_t start = clock();
cudaMemcpy(d_A, A, sizeof(float) * H * W, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, sizeof(float) * H * W, cudaMemcpyHostToDevice);
multMat<<<dimGrid, dimBlock>>>(d_A, d_B, d_D);
cudaMemcpy(D, d_D, sizeof(float) * H * W, cudaMemcpyDeviceToHost);
clock_t end = clock();
double cpu_time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
std::cout << "Tiempo invertido GPU = " << cpu_time_used << "s\n";
}
delete A;
delete B;
delete C;
delete D;
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_D);
}
|
10,485 | #include <algorithm>
#include <ctime>
#include <cuda_runtime_api.h>
#include <fstream>
#include <iostream>
#include <list>
#include <map>
#include <numeric>
#include <sstream>
#include <string>
#include <stdio.h>
#include <stdlib.h>
using namespace std;
static void CheckCudaErrorAux(const char *, unsigned, const char *,
cudaError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
bool FIND_BIGRAM = false; //true = find brigrams; false = find trigrams
bool PRINT = false; //if set to true it will print the found bigrams and trigrams
int GRID_DIM = 10; // grid size
int BLOCK_DIM = 128; //block size
std::string nameFile = "inputTextLong.txt"; //the name of the text file to analyse
// this utility method allows the user to better understand the CUDA errors
static void CheckCudaErrorAux(const char *file, unsigned line,
const char *statement, cudaError_t err) {
if (err == cudaSuccess)
return;
std::cerr << statement << " returned " << cudaGetErrorString(err) << "("
<< err << ") at " << file << ":" << line << std::endl;
exit(1);
}
// converts the passed text line into only lower case alphabet characters
__host__ string clean(string in) {
string final;
for(int i = 0; i < in.length(); i++) {
if(isalpha(in[i])) final += tolower(in[i]);
}
return final;
}
// this method finds the graphems (bigram or trigram) using the CPU
__host__ void findGraphemsWithCPU(string line, std::map<std::string,int> &graphems) {
int tail = FIND_BIGRAM? 1 : 2;
for(int i = 0; i < line.length()-tail; i++) {
string key = std::string() + line[i] + line[i+1];
if(!FIND_BIGRAM)
key = key + line[i+2];
std::map<std::string,int>::iterator it = graphems.find(key);
if(it != graphems.end()){
it->second++;
}else{
graphems.insert(std::make_pair(key, 1));
}
}
}
// this method finds the graphems (bigram or trigram) using the CPU
__host__ std::map<std::string,int> methodWithCPU(std::string line){
std::map<std::string,int> graphems;
findGraphemsWithCPU(line,graphems);
return graphems;
}
// this method converts a character into an int
__device__ int getCharIndex(char c){
return (c - 'a');
}
//this method finds the graphems (bigram or trigram) using the GPU
__global__ void findGraphemsWithGPU(const char *line, int* graphemsArray, int sliceLength, int lineLength, bool findBigram) {
int startPoint =
blockDim.x * blockIdx.x +
threadIdx.x;
startPoint *= sliceLength;
int endPoint = startPoint + sliceLength - 1;
int tail = findBigram? 1 : 2;
endPoint += tail;
int index1;
int index2;
int index3;
if((startPoint+tail) < lineLength ){
index2 = getCharIndex(line[startPoint]);
if(!findBigram) {
index3 = getCharIndex(line[startPoint+1]);
}
}
while((startPoint+tail) <= endPoint && (startPoint+tail) < lineLength){
index1 = index2;
if(findBigram) {
index2 = getCharIndex(line[startPoint+tail]);
atomicAdd(&graphemsArray[index1 * 26 + index2 ], 1);
}else{
index2 = index3;
index3 = getCharIndex(line[startPoint+tail]);
atomicAdd(&graphemsArray[index1 * 26 * 26 + index2 * 26 + index3], 1);
}
startPoint++;
}
return;
}
// this method prints the graphems found with the GPU
__host__ void print(int *graphemsArrayHost){
int lengthGraphems = FIND_BIGRAM? 26*26 : 26*26*26;
std::string alphabet = "abcdefghijklmnopqrstuvwxyz";
for(int i = 0 ; i < lengthGraphems; i++){
if(graphemsArrayHost[i] != 0){
div_t result1 = std::div(i,26);
div_t result2 = std::div(result1.quot,26);
if(FIND_BIGRAM){
cout << (std::string() + alphabet[result2.rem]+ alphabet[result1.rem]) << " = " << graphemsArrayHost[i] << "\n";
}else{
div_t result3 = std::div(result2.quot,26);
cout << (std::string() + alphabet[result3.rem]+ alphabet[result2.rem] + alphabet[result1.rem]) << " = " << graphemsArrayHost[i] << "\n";
}
}
}
}
// this method finds the graphems (bigram or trigram) using the GPU
__host__ int* methodWithGPU(std::string line){
// GRAPHEMS ARRAY
int lengthGraphems = FIND_BIGRAM? 26*26 : 26*26*26;
int *graphemsArrayDevice;
int *graphemsArrayHost=(int*)calloc(lengthGraphems,sizeof(int));
// allocate device memory
CUDA_CHECK_RETURN(
cudaMalloc((void ** )&graphemsArrayDevice,
sizeof(int) * lengthGraphems));
// copy from host to device memory
CUDA_CHECK_RETURN(
cudaMemcpy(graphemsArrayDevice, graphemsArrayHost, lengthGraphems * sizeof(int),
cudaMemcpyHostToDevice));
// TEXT LINE
int lengthLine = line.length();
char *lineDevice;
// allocate device memory
CUDA_CHECK_RETURN(
cudaMalloc((void ** )&lineDevice,
sizeof(char) * lengthLine));
//
// copy from host to device memory
CUDA_CHECK_RETURN(
cudaMemcpy(lineDevice, line.c_str(), lengthLine * sizeof(char),
cudaMemcpyHostToDevice));
// execute kernel
int totalthreadNumber = GRID_DIM * BLOCK_DIM;
int sliceLength = ceil(float(lengthLine)/float(totalthreadNumber));
findGraphemsWithGPU<<< GRID_DIM, BLOCK_DIM >>>(lineDevice, graphemsArrayDevice, sliceLength, lengthLine, FIND_BIGRAM);
//
cudaDeviceSynchronize();
// copy results from device memory to host
CUDA_CHECK_RETURN(
cudaMemcpy(graphemsArrayHost, graphemsArrayDevice, lengthGraphems * sizeof(int),
cudaMemcpyDeviceToHost));
// Free the GPU memory here
cudaFree(lineDevice);
cudaFree(graphemsArrayDevice);
return graphemsArrayHost;
}
// The main method.
// Parameters:
// 1 - [b,t] in order to chose between "Bigrams" or "Trigrams" (default: b)
// 2 - size of grid for the initial call (default: 10)
// 3 - size of block for the initial call (default: 128)
// 4 - [t,f,true,false] to print the result of the graphems (default: false)
// 5 - the name of the input file (default: inputTextLong.txt)
//
// calling example: ./main t 5 32 true inputTextVeryLong.txtx
__host__ int main(int argc, char** argv) {
if(argc > 1){
std::string setting(argv[1]);
if(setting == "b" ) {
FIND_BIGRAM = true;
}else if(setting == "t" ) {
FIND_BIGRAM = false;
}else{
cout<<"Parameter "<< argv[1] <<" not accepted. Only \"b\" (bigram), \"t\" (trigram), accepted. "<< "\n";
return 0;
}
if(argc > 2){
GRID_DIM = atoi(argv[2]);
if(argc > 3){
BLOCK_DIM = atoi(argv[3]);
if(argc > 4){
std::string setting(argv[4]);
if (setting == "t" || setting == "true")
PRINT = true;
if(argc > 5){
std::string setting(argv[5]);
nameFile = setting;
}
}
}
}
}
std::string line;
std::string longLine;
std::string path = "input/"+nameFile;
ifstream myfile(path.c_str());
if (myfile.is_open()) {
while (getline(myfile, line)) {
// Cleaning the line
line = clean(line);
longLine += line;
}
myfile.close();
}
else
cout << "Unable to open file";
clock_t beginCPU = clock();
std::map<std::string,int> graphems;
graphems = methodWithCPU(longLine);
clock_t endCPU = clock();
// showing contents:
cout<< "GRID_DIM: " << GRID_DIM << ", BLOCK_DIM: " << BLOCK_DIM << "\n";
double elapsed_secsCPU = double(endCPU - beginCPU) / CLOCKS_PER_SEC;
cout<<"CPU METHOD: " << "\n";
cout<<"Elapsed milliseconds: " << elapsed_secsCPU*1000 << "\n";
cout<<"Microseconds: " << endCPU - beginCPU << "\n";
// ITERATION TO START COMUNICATION WITH GPU
int *graphemsArrayHost;
clock_t beginGPU = clock();
graphemsArrayHost = methodWithGPU(longLine);
clock_t endGPU = clock();
// Free host memory
double elapsed_secsGPU = double(endGPU - beginGPU) / CLOCKS_PER_SEC;
std::cout << "FIRST ITERATION. GRID_DIM: " << GRID_DIM << ", BLOCK_DIM: " << BLOCK_DIM << "\n";
std::cout << "Elapsed Milliseconds: " << elapsed_secsGPU*1000 << "\n";
//verify data
if(PRINT){
std::cout << "The graphems obtained with CPU are:\n";
std::map<std::string,int>::iterator it;
for (it=graphems.begin(); it!=graphems.end(); ++it)
std::cout << it->first << " => " << it->second << '\n';
std::cout << "\n\n -----------------------------------------\n\n";
std::cout << "The graphems obtained with GPU are:\n";
print(graphemsArrayHost);
}
free(graphemsArrayHost);
std::cout << "Elapsed milliseconds changing grid dimension and block dimension: \n";
for (int dimBlocco=1; dimBlocco <= 512 ; dimBlocco = dimBlocco*2 ){
std::cout << "," << dimBlocco;
}
std::cout << "\n\n";
for (int dimGriglia=1; dimGriglia <= 512 ; dimGriglia = dimGriglia*2 ){
GRID_DIM = dimGriglia;
std::cout << dimGriglia;
for (int dimBlocco=1; dimBlocco <= 512 ; dimBlocco = dimBlocco*2 ){
BLOCK_DIM = dimBlocco;
int *graphemsArrayHost;
clock_t beginGPU = clock();
graphemsArrayHost = methodWithGPU(longLine);
clock_t endGPU = clock();
// Free host memory
free(graphemsArrayHost);
double elapsed_secsGPU = double(endGPU - beginGPU) / CLOCKS_PER_SEC;
std::cout << ", "<< elapsed_secsGPU*1000 ;
}
std::cout << "\n";
}
return 0;
}
|
10,486 | #include "includes.h"
#define MAT_TYPE double
#define MAT_SIZE 1024
#define N MAT_SIZE
#define N2 MAT_SIZE*MAT_SIZE
#define BLOCK 256
#define THREAD 512
void stopwatch(int);
__global__ void cuda_mul(MAT_TYPE* A,MAT_TYPE* B,MAT_TYPE* C,int w)
{
int tid,tx,ty;
tx = blockDim.x * blockIdx.x + threadIdx.x;
ty = blockDim.y * blockIdx.y + threadIdx.y;
tid = w*ty + tx;
MAT_TYPE v = 0;
MAT_TYPE a = 0;
MAT_TYPE b = 0;
for(int i=0;i< w;i++)
{
a = A[ty * w + i];
b = B[i * w + tx];
v += a+b;
}
C[tid]= v;
} |
10,487 | #include <stdio.h>
#include <vector>
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
#include <cuda.h>
using namespace std;
#define THREADS 64
__global__ void setup(int* data, int* odd, int* count, int n) {
int thid = blockIdx.x*blockDim.x + threadIdx.x;
if(thid >= n) {
return;
}
if(data[thid]%2 == 1) {
odd[thid] = 1;
atomicAdd(count, 1);
} else odd[thid] = 0;
}
__global__ void parallelPrefix(int* odd, int* ppref, int* offset, int n) {
extern __shared__ int prefs[];
int thid = blockIdx.x*blockDim.x + threadIdx.x;
if(thid >= n) {
return;
}
prefs[threadIdx.x] = odd[thid];
__syncthreads();
int val = 0;
for(int i = 1; i < THREADS; i *= 2) {
if(threadIdx.x >= i) {
val = prefs[threadIdx.x - i];
}
__syncthreads();
if(threadIdx.x >= i) {
prefs[threadIdx.x] += val;
}
__syncthreads();
}
ppref[thid] = prefs[threadIdx.x];
if(threadIdx.x == THREADS - 1) {
offset[blockIdx.x] = ppref[thid];
}
}
__global__ void sum_reduce(int* offset, int n) {
extern __shared__ int prefs[];
int thid = threadIdx.x;
printf("\n");
prefs[thid] = offset[thid];
__syncthreads();
int val = 0;
for(int i = 1; i < n; i *= 2) {
if(thid >= i) {
val = prefs[thid - i];
}
// printf("%d \n", val);
__syncthreads();
if(thid >= i) {
prefs[thid] += val;
}
__syncthreads();
}
offset[thid] = prefs[thid];
}
__global__ void concat(int* offset, int*ppref) {
int thid = blockIdx.x*blockDim.x + threadIdx.x;
if(blockIdx.x > 0) {
ppref[thid] += offset[blockIdx.x - 1];
}
}
__global__ void finish(int* odd, int* ppref, int* results, int* data, int n) {
int thid = blockIdx.x*blockDim.x + threadIdx.x;
if(thid >= n) {
return;
}
if(odd[thid] == 1) {
results[ppref[thid] - 1] = data[thid];
}
}
int main(int argc,char **argv) {
vector<int> array;
int i = 0;
ifstream file( "inp.txt" );
int number;
while(file>>number) {
array.push_back(number);
i++;
if (file.peek() == ',')
file.ignore();
}
int* data = new int[array.size()];
int* ppref = new int[array.size()];
int* odd = new int[array.size()];
int count;
int* d_data;
int* d_odd;
int* d_ppref;
int* d_count;
int* d_offset;
for(int a = 0; a < array.size(); a++) {
data[a] = array[a];
}
printf("\n");
int size = sizeof(int)*array.size();
cudaMalloc((void **) &d_data, size);
cudaMalloc((void **) &d_odd, size);
cudaMalloc((void **) &d_ppref, size);
cudaMalloc((void **) &d_count, sizeof(int));
cudaMemcpy(d_data, data, size, cudaMemcpyHostToDevice);
// launch the kernel
int blocks = array.size()/THREADS;
if(array.size()%THREADS > 0) {
blocks += 1;
}
cudaMalloc((void **) &d_offset, sizeof(int)*blocks);
int* offset = new int[blocks];
setup<<<blocks, THREADS>>>(d_data, d_odd, d_count, array.size());
//get number of odds
cudaMemcpy(&count, d_count, sizeof(int), cudaMemcpyDeviceToHost);
//do parallel prefix on odd array to find distance from the start
parallelPrefix<<<blocks, THREADS, sizeof(int)*THREADS>>>(d_odd, d_ppref, d_offset, array.size());
cudaMemcpy(offset, d_offset, sizeof(int)*blocks, cudaMemcpyDeviceToHost);
sum_reduce<<<1, blocks, sizeof(int)*blocks>>>(d_offset, blocks);
concat<<<blocks, THREADS>>>(d_offset, d_ppref);
cudaMemcpy(ppref, d_ppref, size, cudaMemcpyDeviceToHost);
//create array, if odd from small, copy into location of array
int* results = new int[count];
int* d_results;
cudaMalloc((void **) &d_results, sizeof(int)*count);
finish<<<blocks, THREADS>>>(d_odd, d_ppref, d_results, d_data, array.size());
cudaMemcpy(results, d_results, sizeof(int)*count, cudaMemcpyDeviceToHost);
// printf("\n");
// for(int a = 0; a < count; a++) {
// printf("%d ", results[a]);
// }
FILE *fp = fopen("q3.txt", "w");
if(fp != NULL) {
for(int a = 0; a < count; a++) {
fprintf(fp, "%d", results[a]);
if(a + 1 < count) {
fprintf(fp, ", ");
}
}
fclose(fp);
}
cudaFree(d_data);
cudaFree(d_odd);
cudaFree(d_ppref);
cudaFree(d_count);
cudaFree(d_results);
// force the printf()s to flush
cudaDeviceSynchronize();
return 0;
}
|
10,488 | /*** Calculating a derivative with CD ***/
#include <iostream>
#include <fstream>
#include <cmath>
#include <sys/time.h>
void copy_array(float *u, float *u_prev, int N)
{
int i;
for(i = 0; i< N*N; i++){
u_prev[i] = u[i];
}
}
void update (float *u, float *u_prev, int N, float h, float dt, float alpha)
{
// Setting up indices
int I = 0;
for(; I < N*N; I++){
if ( (I>N) && (I< N*N-1-N) && (I%N!=0) && (I%N!=N-1))
{
u[I] = u_prev[I] + alpha*dt/(h*h) * (u_prev[I+1] + u_prev[I-1] + u_prev[I+N] + u_prev[I-N] - 4*u_prev[I]);
}
}
// Boundary conditions are automatically imposed
// as we don't touch boundaries
}
double get_time()
{
struct timeval tim;
cudaThreadSynchronize();
gettimeofday(&tim, NULL);
return (double) tim.tv_sec+(tim.tv_usec/1000000.0);
}
int main(int argc, char * const argv[])
{
int N; // For textures to work, N needs to be a multiple of
int BLOCKSIZE; // 32. As I will be using BLOCKSIZE to be a multiple of 8
// I'll just look for the closest multiple of BLOCKSIZE (N_max)
if (argc != 2)
{
fprintf(stderr, "You have to provide size(n) as arguments.\n");
return -1;
}
char *p;
N = strtoul(argv[1], &p, 10);
float xmin = 0.0f;
float xmax = 3.5f;
float ymin = 0.0f;
//float ymax = 2.0f;
float h = (xmax-xmin)/(N-1);
float dt = 0.00001f;
float alpha = 0.645f;
float time = 0.4f;
int steps = ceil(time/dt);
int I;
float *x = new float[N*N];
float *y = new float[N*N];
float *u = new float[N*N];
float *u_prev = new float[N*N];
// Generate mesh and intial condition
for (int j=0; j<N; j++)
{ for (int i=0; i<N; i++)
{ I = N*j + i;
x[I] = xmin + h*i;
y[I] = ymin + h*j;
u[I] = 0.0f;
if ( (i==0) || (j==0))
{u[I] = 200.0f;}
}
}
// Loop
double start = get_time();
for (int t=0; t<steps; t++)
{ copy_array(u, u_prev, N);
update(u, u_prev, N, h, dt, alpha);
}
double stop = get_time();
double elapsed = stop - start;
printf("%d, n, %f\n", N, elapsed);
// std::ofstream temperature("temperature_cpu.txt");
// for (int j=0; j<N; j++)
// { for (int i=0; i<N; i++)
// { I = N*j + i;
// // std::cout<<u[I]<<"\t";
// temperature<<x[I]<<"\t"<<y[I]<<"\t"<<u[I]<<std::endl;
// }
// temperature<<"\n";
// //std::cout<<std::endl;
// }
// temperature.close();
}
|
10,489 | #include <stdio.h>
__device__ void mul(double a, double b, double *res)
{
*res = a * b;
// NaN
*res = (*res)-(*res) / (*res)-(*res);
}
__global__ void dot_prod(double *x, double *y, int size)
{
double d;
for (int i=0; i < size; ++i)
{
double tmp;
mul(x[i], y[i], &tmp);
d += tmp;
}
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0) {
printf("dot: %f\n", d);
}
}
|
10,490 |
#include <stdio.h>
#include <cuda.h>
typedef double FLOAT;
__global__ void sum(FLOAT* x)
{
int tid = threadIdx.x;
printf(">>>>>sumt id = %d\n", tid);
x[tid] += 1;
}
int mem04()
{
int N = 32;
int nbytes = N * sizeof(FLOAT);
FLOAT *dx = NULL, *hx = NULL;
int i;
/* allocate GPU mem */
cudaMalloc((void **)&dx, nbytes);
if (dx == NULL) {
printf("couldn't allocate GPU memory\n");
return -1;
}
/* alllocate CPU host mem: memory copy is faster than malloc */
hx = (FLOAT*)malloc(nbytes);
//cuMemAllocHost((void**)&hx, nbytes);
if (hx == NULL) {
printf("couldn't allocate CPU memory\n");
return -2;
}
/* init */
printf("hx original: \n");
for (i = 0; i < N; i++) {
hx[i] = i;
printf("%g\n", hx[i]);
}
/* copy data to GPU */
cudaMemcpy(dx, hx, nbytes, cudaMemcpyHostToDevice);
/* call GPU */
sum<<<1, N>>>(dx);
/* let GPU finish */
cudaDeviceSynchronize();
/* copy data from GPU */
cudaMemcpy(hx, dx, nbytes, cudaMemcpyDeviceToHost);
printf("\nhx from GPU: \n");
for (i = 0; i < N; i++) {
printf("%g\n", hx[i]);
}
cudaFree(dx);
free(hx);
//cudaFreeHost(hx);
return 0;
}
|
10,491 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#define BLOCKS 3
#define DATASIZE BLOCKS * 512
#define CHUNKNUM 1
#define N 100
cudaError_t square(int *result, int *data, int chunknum);
__global__ void squareKernel(int *result, int *data) {
int i = threadIdx.x;
result[i] = 0;
result[i] = data[i] * data[i];
}
void deviceReset() {
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Parallel Nsight and Visual Profiler to show complete traces.
cudaError_t cudaStatus;
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
//return 1;
}
}
void setInput(int *data) {
// Generate input data
for (int i = 0; i < DATASIZE; i++) {
data[i] = i;
}
}
void printArray(char *content, int *input) {
printf("%s\n", content);
// Print the result array
for (int i = 0; i < DATASIZE; i++)
printf("i%d=%d, ", i, input[i]);
printf("\n");
}
void dummyCudaMalloc(int **dummy_ptr) {
cudaError_t cudaStatus = cudaMalloc((void**) dummy_ptr, sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
}
int main() {
int data[DATASIZE];
int result[DATASIZE] = { 0 };
int *dummy_ptr = 0;
dummyCudaMalloc(&dummy_ptr);
// Set false value in result array
memset(result, 0, DATASIZE);
setInput(data);
// Print the input character
// printArray("Input", data);
// Search keyword in parallel.
printf("square\n");
cudaError_t cudaStatus = square(result, data, CHUNKNUM);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
// printArray("Result", result);
cudaFree(dummy_ptr);
deviceReset();
return 0;
}
// Helper function for using CUDA to search a list of characters in parallel.
cudaError_t square(int *result, int *data, int num_kernel) {
int *dev_data = 0;
int *dev_result = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr,
"cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
cudaFree(dev_result);
return cudaStatus;
}
clock_t t = clock();
// Launch a search keyword kernel on the GPU with one thread for each element.
for (int n = 0; n < N; n++) {
for (int i = 0; i < num_kernel; i++) {
int chunk_size = DATASIZE / num_kernel;
// Allocate GPU buffers for result set.
cudaStatus = cudaMalloc((void**) &dev_result, chunk_size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Allocate GPU buffers for data set.
cudaStatus = cudaMalloc((void**) &dev_data, chunk_size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input data from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_data, data + i * chunk_size,
chunk_size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
squareKernel<<<BLOCKS, chunk_size>>>(dev_result, dev_data);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr,
"cudaDeviceSynchronize returned error code %d after launching addKernel!\n",
cudaStatus);
goto Error;
}
// Copy result from GPU buffer to host memory.
cudaStatus = cudaMemcpy(result + i * chunk_size, dev_result,
chunk_size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaFree(dev_data);
cudaFree(dev_result);
dev_data = NULL;
dev_result = NULL;
}
}
t = clock() - t;
printf("%d kernel time for %d kernel(s): %f milliseconds\n",
N, CHUNKNUM,
((float) t) * 1000.0 / CLOCKS_PER_SEC);
Error: cudaFree(dev_result);
return cudaStatus;
}
|
10,492 | // From: acc6.its.brooklyn.cuny.edu/~cisc7342/codes/multiplestreams.cu
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
#define N 16
#define NTHREADS_PER_BLOCK 8
#define NRUNS 2
// grid.x = N/NTHREADS_PER_BLOCK on the first run and
// grid.x = N/(NSTREAMS*NTHREADS_PER_BLOCK) on the second.
#define NSTREAMS 2
__global__ void init_array(int* g_data, int factor) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_data[idx] = factor;
}
int correct_data(int* a, int n, int c) {
int i;
for(i = 0; i != n; ++i) {
if (a[i] != c) return 0;
}
return 1;
}
int main(int argc, char *argv[]) {
int nbytes = N * sizeof(int); // number of data bytes
dim3 block, grid; // kernel launch configuration
float elapsed_time, time_memcpy, time_kernel; // timing variables
int i, j;
// check the compute capability of the device
int num_devices = 0;
int c = 5; // value to which the array will be initialized
int* a = 0; // pointer to the array data in host memory
int* d_a = 0; // pointers to data and init value in the device memory
cudaGetDeviceCount(&num_devices);
if (num_devices == 0) {
printf("Your system does not have a CUDA capable device.\n");
return 1;
}
// cudaDeviceProp device_properties;
// cudaGetDeviceProperties(&device_properties, 0 );
// if( (1 == device_properties.major) && (device_properties.minor < 1))
// printf("%s does not have compute capability 1.1 or later\n\n", device_properties.name);
// allocate host memory
// allocate host memory (pinned is required for achieve asynchronicity)
cudaMallocHost((void**)&a, nbytes);
// allocate device memory
cudaMalloc((void**)&d_a, nbytes);
// allocate and initialize an array of stream handles
cudaStream_t* streams = (cudaStream_t*) malloc(NSTREAMS * sizeof(cudaStream_t));
for (i = 0; i != NSTREAMS; ++i) cudaStreamCreate(&streams[i]);
// create CUDA event handles
cudaEvent_t start_event, stop_event;
cudaEventCreate(&start_event );
cudaEventCreate(&stop_event );
// time memcopy from device
cudaEventRecord(start_event, 0); // record in stream-0, to ensure that all previous CUDA calls have completed
cudaMemcpyAsync(a, d_a, nbytes, cudaMemcpyDeviceToHost, streams[0]);
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event); // block until the event is actually recorded
cudaEventElapsedTime(&time_memcpy, start_event, stop_event);
printf("memcopy: %f\n", time_memcpy);
cudaEventRecord(start_event, 0);
grid.x = N/NTHREADS_PER_BLOCK;
grid.y = 1;
grid.z = 1;
block.x = NTHREADS_PER_BLOCK;
block.y = 1;
block.z = 1;
init_array<<<grid, block, 0, streams[0]>>>(d_a, c);
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&time_kernel, start_event, stop_event );
printf("kernel: %f\n", time_kernel);
//////////////////////////////////////////////////////////////////////
// time non-streamed execution for reference
cudaEventRecord(start_event, 0);
for (i = 0; i != NRUNS; ++i) {
init_array<<<grid, block>>>(d_a, c);
cudaMemcpy(a, d_a, nbytes, cudaMemcpyDeviceToHost);
}
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&elapsed_time, start_event, stop_event );
printf("non-streamed: %f (%f expected)\n",
elapsed_time / NRUNS, time_kernel + time_memcpy);
//////////////////////////////////////////////////////////////////////
// time execution with NSTREAMS streams
grid.x = N / (NSTREAMS*NTHREADS_PER_BLOCK);
memset(a, 255, nbytes); // set host memory bits to all 1s, for testing correctness
cudaMemset(d_a, 0, nbytes); // set device memory to all 0s, for testing correctness
cudaEventRecord(start_event, 0);
for (i = 0; i != NRUNS; ++i) {
// asynchronously launch NSTREAMS kernels, each operating on its own portion of data
for (j = 0; j != NSTREAMS; ++j) {
init_array<<<grid, block, 0, streams[j]>>>(d_a + j * N / NSTREAMS, c);
}
// asynchronoously launch NSTREAMS memcopies. Note that memcopy in stream x will only
// commence executing when all previous CUDA calls in stream x have completed
for (j = 0; j != NSTREAMS; ++j) {
cudaMemcpyAsync(a + j * N / NSTREAMS,
d_a + j * N / NSTREAMS, nbytes / NSTREAMS,
cudaMemcpyDeviceToHost, streams[j]);
}
}
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&elapsed_time, start_event, stop_event );
printf("%d streams: %f (%f expected with compute capability 1.1 or later)\n",
NSTREAMS, elapsed_time / NRUNS, time_kernel + time_memcpy / NSTREAMS);
// check whether the output is correct
printf("-------------------------------\n");
if (correct_data(a, N, c)) {
printf("Test PASSED\n");
} else {
printf("Test FAILED\n");
}
// release resources
for (i = 0; i != NSTREAMS; ++i) cudaStreamDestroy(streams[i]);
cudaEventDestroy(start_event);
cudaEventDestroy(stop_event);
cudaFreeHost(a);
cudaFree(d_a);
return 0;
}
|
10,493 | // test cuda programming
// nvcc myAddVec.cu -o myAddVec
#include <iostream>
#include <vector>
#include <assert.h>
using namespace std;
__global__ void addVec(int* da, int* db, int* dc, int n) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < n) {
dc[tid] = da[tid] + db[tid];
}
}
int main() {
cout << "Demo: CUDA add vector" << endl;
const int n = 1000;
size_t bytes = n*sizeof(int);
vector<int> a = vector<int>(n, 1);
vector<int> b = vector<int>(n, 2);
vector<int> c = vector<int>(n, 0);
int* da;
int* db;
int* dc;
cudaMalloc(&da, bytes);
cudaMalloc(&db, bytes);
cudaMalloc(&dc, bytes);
cudaError_t err = cudaSuccess;
err = cudaMemcpy(da, a.data(), bytes, cudaMemcpyHostToDevice);
err = cudaMemcpy(db, b.data(), bytes, cudaMemcpyHostToDevice);
int BlockSize = 256;
int GridSize = (n + BlockSize - 1)/BlockSize;
cout << "GridSize=" << GridSize << endl;
cout << "BlockSize=" << BlockSize << endl;
addVec<<<GridSize, BlockSize>>>(da, db, dc, n);
cudaDeviceSynchronize();
err = cudaMemcpy(c.data(), dc, bytes, cudaMemcpyDeviceToHost);
if(err == cudaSuccess)
cout << "cudaMemcpyDeviceToHost ok." << endl;
else
cout << err << " cudaMemcpyDeviceToHost failed." << endl;
cudaFree(da);
cudaFree(db);
cudaFree(dc);
cout << "c[0]:" << c[0] << endl;
cout << "c[100]:" << c[100] << endl;
assert(c[0] == 3);
assert(c[500] == 3);
cout << "CUDA add vector successfully!" << endl;
}
|
10,494 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31,float var_32) {
comp = -1.2201E4f - (-1.0292E8f - ldexpf((var_2 - (+1.8362E36f + var_3)), 2));
float tmp_1 = var_4 * +1.1051E36f / +1.6872E25f - +1.5355E35f;
comp += tmp_1 / logf(floorf(cosf(fmodf((var_5 - var_6 * -1.8305E-36f), (-1.4240E-44f - var_7)))));
comp += var_8 / acosf(var_9 + atanf(+0.0f));
for (int i=0; i < var_1; ++i) {
comp += ldexpf(var_10 + (+1.1009E7f / var_11 / var_12 - (var_13 * var_14)), 2);
comp = +1.1117E35f - (-1.6178E-37f - var_15 / var_16 - -0.0f - -1.0436E18f);
comp = +1.1063E-42f - ldexpf((+1.3389E26f / asinf(-1.3196E-44f + var_17 / (-1.5437E17f - var_18))), 2);
}
if (comp >= var_19 / var_20 + +1.4726E35f) {
float tmp_2 = fabsf(+1.0581E-41f);
comp += tmp_2 - (var_21 - var_22);
comp = (+1.4737E-42f + -1.9420E20f);
comp += (+1.4486E36f + +0.0f / fabsf(-1.7514E-36f / +1.0238E-44f - -0.0f / (var_23 * var_24)));
}
if (comp < (-1.3332E-36f - -1.2121E-42f - (var_25 + var_26))) {
comp += ldexpf(-1.2136E-43f, 2);
comp = (+1.0368E-36f / -0.0f);
comp = (-1.6321E34f * (-0.0f - powf((-1.6334E-35f * (+1.8962E35f * (var_27 * (+1.1051E-41f + var_28)))), +1.5735E-43f + (var_29 * var_30 / var_31 - var_32))));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
float tmp_33 = atof(argv[33]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33);
cudaDeviceSynchronize();
return 0;
}
|
10,495 | #include <cuda.h>
#include <cuda_runtime.h>
__global__ void SSSP_kernel1(int *V, int *E, int *W, bool *M, int *C, int *U, int n) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n && M[tid]) {
M[tid] = false;
int pos = V[tid], size = E[pos];
for (int i = pos + 1; i < pos + size + 1; i++) {
int nid = E[i];
atomicMin(&U[nid], C[tid] + W[i]);
}
}
}
__global__ void SSSP_kernel2(bool *M, int *C, int *U, bool *flag, int n) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n) {
if (C[tid] > U[tid]) {
C[tid] = U[tid];
M[tid] = true;
*flag = true;
}
U[tid] = C[tid];
}
} |
10,496 | /*
* Parallel bitonic sort using CUDA.
* Compile with
* nvcc -arch=sm_11 bitonic_sort.cu
* Based on http://www.tools-of-computing.com/tc/CS/Sorts/bitonic_sort.htm
* License: BSD 3
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#define THREADS 512 // 2^9
#define NIM1 13515141
#define NIM2 13515147
#define SWAP(x,y) t = x; x = y; y = t;
char* input_path = "data/input";
char* output_path = "data/output";
FILE* input_file;
FILE* output_file;
const int up = 1;
const int down = 0;
int * array;
int array_size;
int NUM_VALS;
int BLOCKS;
void print_elapsed(clock_t start, clock_t stop)
{
double elapsed = ((double) (stop - start)) / CLOCKS_PER_SEC;
printf("Elapsed time: %.0f microsecond\n", elapsed * 1000000.0);
}
int random_int()
{
return (int)rand();
}
void array_print(int *arr, int length)
{
int i;
for (i = 0; i < length; ++i) {
printf("%d ", arr[i]);
}
printf("\n");
}
void array_fill(int *arr, int length)
{
srand(13515147);
int i;
for (i = 0; i < length; ++i) {
arr[i] = random_int();
}
}
__global__ void bitonic_sort_step(int *dev_values, int j, int k)
{
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i^j;
/* The threads with the lowest ids sort the array. */
if ((ixj)>i) {
if ((i&k)==0) {
/* Sort ascending */
if (dev_values[i]>dev_values[ixj]) {
/* exchange(i,ixj); */
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
if ((i&k)!=0) {
/* Sort descending */
if (dev_values[i]<dev_values[ixj]) {
/* exchange(i,ixj); */
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
}
}
/**
* Inplace bitonic sort using CUDA.
*/
void bitonic_sort(int *values)
{
int *dev_values;
size_t size = NUM_VALS * sizeof(int);
cudaMalloc((void**) &dev_values, size);
cudaMemcpy(dev_values, values, size, cudaMemcpyHostToDevice);
dim3 blocks(BLOCKS,1); /* Number of blocks */
dim3 threads(THREADS,1); /* Number of threads */
int j, k;
/* Major step */
for (k = 2; k <= NUM_VALS; k <<= 1) {
/* Minor step */
for (j=k>>1; j>0; j=j>>1) {
bitonic_sort_step<<<blocks, threads>>>(dev_values, j, k);
}
}
cudaMemcpy(values, dev_values, size, cudaMemcpyDeviceToHost);
cudaFree(dev_values);
}
void compare(int i, int j, int dir){
int t;
if (dir == (array[i] > array[j])){
SWAP(array[i], array[j]);
}
}
/**
* Returns the greatest power of two number that is less than n
*/
int greatestPowerOfTwoLessThan(int n){
int k=1;
while(k>0 && k<n)
k=k<<1;
return k>>1;
}
/**
* Sorts a bitonic sequence in ascending order if dir=1
* otherwise in descending order
*/
void bitonicMerge(int low, int c, int dir){
int k, i;
if (c > 1){
k = greatestPowerOfTwoLessThan(c);
for (i = low;i < low+c-k ;i++)
compare(i, i+k, dir);
bitonicMerge(low, k, dir);
bitonicMerge(low+k, c-k, dir);
}
}
/**
* Generates bitonic sequence by sorting recursively
* two halves of the array in opposite sorting orders
* bitonicMerge will merge the resultant array
*/
void recursiveBitonic(int low, int c, int dir){
int k;
if (c > 1) {
k = c / 2;
recursiveBitonic(low, k, !dir);
recursiveBitonic(low + k, c-k, dir);
bitonicMerge(low, c, dir);
}
}
/**
* Sort array with serial bitonic sorting
*/
void sort_serial(){
recursiveBitonic(0, array_size, up);
}
/**
* Check if global array is sorted
*/
int is_sorted() {
int i;
for (i=0; i<array_size-1; i++) {
if (array[i] > array[i+1]) return 0;
}
return 1;
}
int main(int argc, char * argv[])
{
input_file = fopen(input_path, "w");
output_file = fopen(output_path, "w");
clock_t start, stop;
array_size = atoi(argv[1]);
NUM_VALS=array_size;
BLOCKS=NUM_VALS/512;
array = (int*) malloc( NUM_VALS * sizeof(int));
array_fill(array, NUM_VALS);
int i;
for (i = 0; i < array_size; i++){
fprintf(input_file, "%d\n", array[i]);
}
fclose(input_file);
start = clock();
sort_serial();
stop = clock();
printf("[SERIAL]\n");
if (is_sorted()) {
printf("Sorting successful\n");
} else {
printf("Sorting failed\n");
}
print_elapsed(start, stop);
free(array);
array = (int*) malloc( NUM_VALS * sizeof(int));
array_fill(array, NUM_VALS);
start = clock();
bitonic_sort(array); /* Inplace */
stop = clock();
printf("[PARALEL]\n");
if (is_sorted()) {
printf("Sorting successful\n");
} else {
printf("Sorting failed\n");
}
for (i = 0; i < array_size; i++){
fprintf(output_file, "%d\n", array[i]);
}
fclose(output_file);
print_elapsed(start, stop);
}
|
10,497 | /*
*
* Created on: May 17, 2017
* Author: Mario Lüder
*
*/
#include "FeatureValues.cuh"
#include "stdint.h"
#include "assert.h"
|
10,498 | #include <stdio.h>
__global__ void vec_add_kernel(float *a, float *b, float *c, int n)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < n) {
c[i] = a[i] + b[i];
}
}
__host__ void vec_add(float *h_a, float *h_b, float *h_c, int n)
{
int size = n*sizeof(float);
float *d_a, *d_b, *d_c;
// allocate memory and initialize on device
cudaMalloc((void **) &d_a, size);
cudaMalloc((void **) &d_b, size);
cudaMalloc((void **) &d_c, size);
cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice);
// calculate
vec_add_kernel<<<ceil(n/256.0), 256>>>(d_a, d_b, d_c, n);
cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost);
// cleanup
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
__host__ int main(void)
{
// create data to add
float h_a[] = {1,2,3};
float h_b[] = {1,2,3};
float h_c[3];
int n = 3;
vec_add(h_a, h_b, h_c, n);
for (int i = 0; i < 3; ++i) {
printf("%f\n", h_c[i]);
}
}
|
10,499 | #include <cuda.h>
#include <stdio.h>
#include <math.h>
#include "f_eval.cuh"
__global__ void kernelCompute_shared(double h, int N, int M, double* d_data, double* d_out) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ double shared_data[];
shared_data[threadIdx.x] = d_data[index];
__syncthreads();
if (index < N * M) {
int which_m = threadIdx.x / M;
int position = threadIdx.x - which_m * M;
double* temp_array = (double*)malloc(sizeof(double) * M);
memcpy(temp_array, shared_data + which_m * M, M * sizeof(double));
double temp_minus = temp_array[position] - h;
double temp_plus = temp_array[position] + h;
temp_array[position] = temp_minus;
double output_minus = f_eval(temp_array, M);
temp_array[position] = temp_plus;
double output_plus = f_eval(temp_array, M);
free(temp_array);
double output = (output_plus - output_minus) / (2 * h);
d_out[index] = output;
}
}
__global__ void kernelCompute(double h, int N, int M, double* d_data, double* d_out) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N * M) {
double* temp_array = (double*)malloc(sizeof(double) * M);
memcpy(temp_array, d_data + (index / M * M), M * sizeof(double));
double temp_minus = temp_array[index - index / M * M] - h;
double temp_plus = temp_array[index - index / M * M] + h;
temp_array[index - index / M * M] = temp_minus;
double output_minus = f_eval(temp_array, M);
temp_array[index - index / M * M] = temp_plus;
double output_plus = f_eval(temp_array, M);
double output = (output_plus - output_minus) / (2 * h);
d_out[index] = output;
free(temp_array);
/* Old verison
double* temp_minus = (double*)malloc(sizeof(double) * M);
memcpy(temp_minus, d_data + (index / M * M), M * sizeof(double));
double t_minus = temp_minus[index - index / M * M];
temp_minus[index - index / M * M] = t_minus - h;
double* temp_plus = (double*)malloc(sizeof(double) * M);
memcpy(temp_plus, d_data + (index / M * M), M * sizeof(double));
double t_plus = temp_plus[index - index / M * M];
temp_plus[index - index / M * M] = t_plus + h;
double output = (f_eval(temp_plus, M) - f_eval(temp_minus, M));
// printf("%d, %f, %f\n", index - index / M * M, temp_plus[index - index / M * M], temp_minus[index - index / M * M]);
// printf("%d, %f\n", index, f_eval(temp_plus, M) - f_eval(temp_minus, M));
d_out[index] = output / (2 * h);
*/
}
}
int main(int argc, char *argv[]) {
if(argc != 4){
printf("Input is not correct!\n");
exit(1);
}
FILE* input = fopen(argv[1], "r");
if (input == NULL) {
perror("Error: Read File Error");
exit(1);
}
int N = 0;
int M = 0;
fscanf(input, "%d", &N);
fscanf(input, "%d", &M);
//printf("%d, %d\n", N, M);
/* 2D array
double** data = (double**)malloc(N * sizeof(double*));
for (i = 0; i < N; i++) {
data[i] = (double*)malloc(M * sizeof(double));
}
for(i = 0; i < N; i++){
for(j = 0; j < M ; j++){
double temp = 0.0f;
fscanf(input, "%lf,", &temp);
data[i][j] = (double)temp;
//debug info
//printf("%lf ", data[i][j]);
}
//debug info
//printf("\n");
}
//printf("%f\n", f_eval(data[i], M));
*/
double* data = (double*)malloc(N * M * sizeof(double));
for(int i = 0; i < N * M; i++){
double temp = 0.0f;
fscanf(input, "%lf,", &temp);
data[i] = (double)temp;
}
fclose(input);
FILE* output = fopen(argv[2], "w");
double h = (double)atof(argv[3]);
// printf("h on host is: %f\n", h);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
double* d_data;
cudaMalloc((void**)&d_data, sizeof(double) * N * M);
double* d_out;
cudaMalloc((void**)&d_out, sizeof(double) * N * M);
cudaMemcpy(d_data, data, sizeof(double) * N * M, cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
if (M < 1024) {
int block_size = 1024 / M * M;
kernelCompute_shared<<<(N * M + block_size - 1) / block_size, block_size, block_size * sizeof(double)>>>(h, N, M, d_data, d_out);
}
else {
kernelCompute<<<(N * M + 1023) / 1024, 1024>>>(h, N, M, d_data, d_out);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
double* out = (double*)malloc(N * M * sizeof(double));
cudaMemcpy(out, d_out, sizeof(double) * N * M, cudaMemcpyDeviceToHost);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Time spent: %f\n", elapsedTime);
for(int i = 0; i < N * M; i++){
if ((i + 1) % M == 0){
fprintf(output, "%f\n", out[i]);
}
else {
fprintf(output, "%f ", out[i]);
}
}
free(data);
free(out);
cudaFree(d_data);
cudaFree(d_out);
// FILE* input_long = fopen("input_long", "w");
// for(int i = 0; i < 1000; i++) {
// fprintf(input_long, "p_x[%d] + ", i);
// }
return 0;
}
|
10,500 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#define R 4
#define C 40
/*
* It returns the length of a string pointed by pointer s,
* It acts like the cpu strlen() function
*/
__device__ int gpu_strlen(char * s)
{
int size = 0;
while(s[size] != '\n' && s[size] != '\0')
{
size++;
}//end of while loop
return size;
}//end of gpu_strlen
/*
* It returns 0 if input character ch is NOT an alphabetical letter
* Otherwise, it returns one.
*/
__device__ int gpu_isAlpha(char ch)
{
char lowerletter = 'a', upperletter = 'A';
int i = 0;
for(i = 0; i < 26; i++, lowerletter++, upperletter++)
{
if(lowerletter == ch || upperletter == ch)
return 1;
if(ch == ' ' || ch == '\0')
return 0;
}
return 0;
}
/* Cuda kernel to count number of words in each line of text pointed by a.
* The output is stored back in 'out' array.
* numLine specifies the num of lines in a, maxLineLen specifies the maximal
* num of characters in one line of text.
*/
__global__ void wordCount( char **a, int **out, int numLine, int maxLineLen )
{
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
if(col < maxLineLen && row < numLine)
{
out[row][col] = 0;
if(col < gpu_strlen(a[row]))
{
if(gpu_isAlpha(a[row][col]))
out[row][col] = 0;
else
out[row][col] = 1;
}//end of inner if
}//end of if statement to check the wordCount
}//end of wordCount
/* Print out the all lines of text in a on stdout
*/
void printArr( char **a, int lines )
{
int i;
for(i=0; i<lines; i++)
{
printf("%s\n", a[i]);
}
}
int main()
{
int i;
char **d_in, **h_in, **h_out;
int h_count_in[R][C], **h_count_out, **d_count_in;
//allocate
h_in = (char **)malloc(R * sizeof(char *));
h_out = (char **)malloc(R * sizeof(char *));
h_count_out = (int **)malloc(R * sizeof(int *));
cudaMalloc((void ***)&d_in, sizeof(char *) * R);
cudaMalloc((void ***)&d_count_in, sizeof(int *) * R);
//alocate for string data
for(i = 0; i < R; ++i)
{
cudaMalloc((void **) &h_out[i],C * sizeof(char));
h_in[i]=(char *)calloc(C, sizeof(char));//allocate or connect the input data to it
strcpy(h_in[i], "good morning and I'm a good student!");
cudaMemcpy(h_out[i], h_in[i], strlen(h_in[i]) + 1, cudaMemcpyHostToDevice);
}
cudaMemcpy(d_in, h_out, sizeof(char *) * R,cudaMemcpyHostToDevice);
//alocate for output occurrence
for(i = 0; i < R; ++i)
{
cudaMalloc((void **) &h_count_out[i], C * sizeof(int));
cudaMemset(h_count_out[i], 0, C * sizeof(int));
}
cudaMemcpy(d_count_in, h_count_out, sizeof(int *) * R,cudaMemcpyHostToDevice);
printArr(h_in, R);
printf("\n\n");
//set up kernel configuartion variables
dim3 grid, block;
block.x = 2;
block.y = 2;
grid.x = ceil((float)C / block.x);
grid.y = ceil((float)R / block.y); //careful must be type cast into float, otherwise, integer division used
printf("grid.x = %d, grid.y=%d\n", grid.x, grid.y );
//launch kernel
wordCount<<<grid, block>>>( d_in, d_count_in, R, C);
//copy data back from device to host
for(i = 0; i < R; ++i) {
cudaMemcpy(h_count_in[i], h_count_out[i], sizeof(int) * C,cudaMemcpyDeviceToHost);
}
printf("Occurrence array obtained from device:\n");
for(i = 0; i < R; i ++) {
for(int j = 0; j < C; j ++)
printf("%4d", h_count_in[i][j]);
printf("\n");
}
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.