serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
3,501 |
#include <cuda_runtime.h>
#include <vector>
#include <iterator>
#include <iostream>
#include <algorithm>
#include <stdio.h>
#include <ncurses.h>
#ifndef gpuErrchk
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#endif
static inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
fprintf(stdout,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
endwin();
if (abort) exit(code);
}else{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
}
}
static void print_device_info(int device_id){
cudaDeviceProp prop;
gpuErrchk(cudaGetDeviceProperties(&prop, device_id));
printw(" Device ID: %d\n", device_id);
printw(" Device name: %s\n", prop.name);
printw(" Device architecture: %u.%u\n", prop.major, prop.minor);
printw(" Device global memory: %u\n", prop.totalGlobalMem);
printw(" Device clockrate: %u\n", prop.clockRate);
}
static std::vector<int> print_devices(){
int devices;
gpuErrchk(cudaGetDeviceCount(&devices));
printw("CUDA Capable Devices with architecture greater than equal to 3.5\n\n");
std::vector<int> usable_devices;
cudaDeviceProp prop;
for(int device = 0; device < devices; device++){
gpuErrchk(cudaGetDeviceProperties(&prop, device));
if(prop.major > 3){
usable_devices.push_back(device);
printw(" Device ID: %d\n", device);
printw(" Device name: %s\n", prop.name);
printw(" Device architecture: %i.%i\n", prop.major, prop.minor);
}else if((prop.major == 3) && (prop.minor >= 5)){
printw(" Device ID: %i\n", device);
printw(" Device name: %i\n", prop.name);
printw(" Device architecture: %i.%i\n", prop.major, prop.minor);
usable_devices.push_back(device);
}
printw("Hit enter to continue\n");
getch();
}
printw("\n");
return usable_devices;
}
extern "C" std::vector<int> select_devices(){
std::vector<int> selected_devices;
if(initscr() == NULL){
printw("Failed to initialize select screen\n");
return selected_devices;
}
raw();
keypad(stdscr, TRUE);
noecho();
bool devices_selected = false;
bool get_input = true;
std::vector<int> usable_devices = print_devices();
if(usable_devices.size() == 0)
return selected_devices;
std::string device_selected;
printw("\n");
do{
printw("Select devices:\n");
for(std::vector<int>::iterator iter = usable_devices.begin() ; iter != usable_devices.end(); iter++){
print_device_info(*iter);
get_input = true;
do{
try{
printw("y/n?\n");
char selection = getch();
device_selected = selection;
std::transform(device_selected.begin(), device_selected.end(),device_selected.begin(), ::toupper);
if(device_selected == "Y" || device_selected == "YES"){
selected_devices.push_back(*iter);
get_input = false;
}else if (device_selected == "N" || device_selected == "NO"){
get_input = false;
}else if (device_selected == "E"){
selected_devices.clear();
endwin();
return selected_devices;
}else{
throw 0;
}
}catch(...){
printw("Error in selection\n");
continue;
}
refresh();
}while(get_input);
}
get_input = true;
printw("Devices selected:\n");
do{
for(std::vector<int>::iterator iter = selected_devices.begin(); iter != selected_devices.end(); iter++){
print_device_info(*iter);
}
try{
printw("Is this right (y/n)? \n");
char selection = getch();
device_selected = selection;
printw("\n");
std::transform(device_selected.begin(), device_selected.end(),device_selected.begin(), ::toupper);
if(device_selected == "Y" || device_selected == "YES"){
devices_selected = true;
get_input = false;
}else if (device_selected == "N" || device_selected == "NO"){
get_input = false;
}else if (device_selected == "E"){
selected_devices.clear();
endwin();
return selected_devices;
}else{
throw 0;
}
}catch(...){
printw("Error in selection\n");
continue;
}
refresh();
}while(get_input);
if(devices_selected == false) selected_devices.clear();
}while(devices_selected == false);
printw("Hit enter to continue\n");
getch();
endwin();
return selected_devices;
}
extern "C" std::vector<int> select_all_devices(){
std::vector<int> all_devices;
raw();
keypad(stdscr, TRUE);
noecho();
if(initscr() == NULL){
printw("Failed to initialize select screen\n");
return all_devices;
}
all_devices = print_devices();
printw("Hit enter to continue\n");
getch();
endwin();
return all_devices;
}
|
3,502 | // Vector addition: r = a + b.
#include <stdio.h>
#include <iostream>
#include <iomanip>
using std::cout;
using std::cerr;
using std::endl;
// Handle CUDA errors
void handle_error(cudaError_t err, const char *file, int line) {
if (err != cudaSuccess) {
cout << cudaGetErrorString(err) << " in " << file << " at line " << line
<< endl;
exit(EXIT_FAILURE);
}
}
#define CHECK(err) (handle_error(err, __FILE__, __LINE__))
// This will output the proper error string when calling cudaGetLastError
void __handle_last_cuda_error(const char *errorMessage, const char *file,
const int line) {
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
cout << cudaGetErrorString(err) << " in " << file << " at line " << line
<< endl;
exit(EXIT_FAILURE);
}
}
#define handle_last_cuda_error(msg) __handle_last_cuda_error(msg, __FILE__, __LINE__)
void vector_add_cpu(int n, int* a, int* b, int* r) {
for (int i = 0; i < n; i++)
r[i] = a[i] + b[i];
}
__global__
void vector_add_kernel(int n, int const *a, int const *b, int *r) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < n) {
r[i] = a[i] + b[i];
}
}
void vector_add_gpu(int n, int const *a, int const *b, int *r) {
// Launch the Vector Add CUDA Kernel
int TPB = 256; // Threads per block.
int BPG = (n + TPB - 1) / TPB; // Blocks per grid
cout << "CUDA kernel launch with " << BPG << " blocks of " << TPB
<< " threads" << endl;
vector_add_kernel<<<BPG, TPB>>>(n, a, b, r);
CHECK(cudaGetLastError());
}
/**
* Host main routine
*/
int main(void) {
// Print the vector length to be used, and compute its size
int numElements = 50000;
size_t size = numElements * sizeof(int);
cout << "[Vector addition of " << numElements << " elements]" << endl;
int *h_a = (int *) malloc(size);
int *h_b = (int *) malloc(size);
int *h_r = (int *) malloc(size);
int *r = (int *) malloc(size);
// Verify that allocations succeeded
if (h_a == NULL || h_b == NULL || h_r == NULL || r == NULL) {
cerr << "Failed to allocate host vectors!" << endl;
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i) {
h_a[i] = rand();
h_b[i] = rand();
}
cout << "Copy input data from the host memory to the CUDA device" << endl;
int *d_a = nullptr;
CHECK(cudaMalloc(&d_a, size));
int *d_b = nullptr;
CHECK(cudaMalloc(&d_b, size));
int *d_r = nullptr;
CHECK(cudaMalloc(&d_r, size));
CHECK(cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice));
// Launch the Vector Add CUDA Kernel
vector_add_gpu(numElements, d_a, d_b, d_r);
cout << "Copy output data from the CUDA device to the host memory" << endl;
CHECK(cudaMemcpy(h_r, d_r, size, cudaMemcpyDeviceToHost));
// Verify that the result vector is correct
vector_add_cpu(numElements, h_a, h_b, r);
for (int i = 0; i < numElements; ++i) {
if (h_r[i] != r[i]) {
cerr << "Result verification failed at element " << i << "!"
<< endl;
exit(EXIT_FAILURE);
}
}
cout << "Test PASSED" << endl;
// Free device global memory
CHECK(cudaFree(d_a));
CHECK(cudaFree(d_b));
CHECK(cudaFree(d_r));
// Free host memory
free(h_a);
free(h_b);
free(h_r);
free(r);
cout << "Done" << endl;
return 0;
}
|
3,503 | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <vector>
#include "vector_types.h"
#include <unistd.h>
#include <time.h>
#include "device_functions.h"
#include "cuda.h"
#include <cuda_runtime.h>
#define fix1 3.14 / (60 * 180)
using namespace std;
__global__ void angles(volatile float *a0, volatile float *b0, volatile float *a1, volatile float*b1, volatile int *hist, volatile int* hist_r, volatile int* hist_s)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float ac, bb0, sb1, sb0, cb0, k, bb1, ssb1, cb1, ccb1, pom, minus, factorial, factorials, fb, fsb, k1, pom1, ccd;
int angle;
float fix2 = 57;
bb0 = b0[idx];
bb1 = b1[idx];
ssb1 = sin(bb1);
sb0 = sin(bb0);
cb0 = cos(bb0);
cb1 = cos(bb1);
__shared__ int mn[720], r[720], s[720];
if (threadIdx.x == 0) {
for (int i = 0; i < 720; i++) {
mn[i] = 0;
r[i] = 0;
s[i] = 0;
}
}
__syncthreads();
if (idx < 100000) {
for (int i = 0; i < 100000; i++) {
k = b1[i];
k1 = a0[i] - a0[idx];
pom = k * k;
pom1 = k1 * k1;
minus = -1;
sb1 = k;
ccb1 = 1;
ccd = 1;
fb = 3;
fsb = 4;
factorial = 2;
factorials = 6;
for (int i = 0; i < 5; i++) {
ccb1 = ccb1 + minus * pom / factorial;
sb1 = sb1 + minus * k * pom / factorials;
ccd = ccd + minus * pom1 / factorial;
factorial = factorial * fb * (fb + 1);
fb += 2;
factorials = factorials * fsb * (fsb + 1);
fsb += 2;
minus = minus * (-1);
pom = pom * k * k;
}
ac = acosf(sb0 * sb1 + cb0 * ccb1 * ccd);
ac = (ac * fix2 / 0.25);
angle = (int)ac;
atomicAdd(&mn[angle], 1);
}
for (int i = idx + 1; i < 100000; i++) {
k = b0[i];
k1 = a0[i] - a0[idx];
pom = k * k;
pom1 = k1 * k1;
minus = -1;
sb1 = k;
ccb1 = 1;
ccd = 1;
fb = 3;
fsb = 4;
factorial = 2;
factorials = 6;
for (int i = 0; i < 5; i++) {
ccb1 = ccb1 + minus * pom / factorial;
sb1 = sb1 + minus * k * pom / factorials;
ccd = ccd + minus * pom1 / factorial;
factorial = factorial * fb * (fb + 1);
fb += 2;
factorials = factorials * fsb * (fsb + 1);
fsb += 2;
minus = minus * (-1);
pom = pom * k * k;
}
ac = acosf(sb0 * sb1 + cb0 * ccb1 * ccd);
ac = (ac * fix2 / 0.25);
angle = (int)ac;
atomicAdd(&r[angle], 2);
k = b1[i];
k1 = a0[i] - a0[idx];
pom = k * k;
pom1 = k1 * k1;
minus = -1;
sb1 = k;
ccb1 = 1;
ccd = 1;
fb = 3;
fsb = 4;
factorial = 2;
factorials = 6;
for (int i = 0; i < 5; i++) {
ccb1 = ccb1 + minus * pom / factorial;
sb1 = sb1 + minus * k * pom / factorials;
ccd = ccd + minus * pom1 / factorial;
factorial = factorial * fb * (fb + 1);
fb += 2;
factorials = factorials * fsb * (fsb + 1);
fsb += 2;
minus = minus * (-1);
pom = pom * k * k;
}
ac = acosf((ssb1 * sb1) + cb1 * ccb1 * ccd);
ac = (ac * fix2 / 0.25);
angle = (int)ac;
atomicAdd(&s[angle], 2);
}
}
__syncthreads();
if (threadIdx.x == 0) {
for (int i = 0; i < 720; i++) {
hist[i + (blockIdx.x * 720)] = mn[i];
hist_r[i + (blockIdx.x * 720)] = r[i];
hist_s[i + (blockIdx.x * 720)] = s[i];
}
}
}
int main(int argc, char *argv[]) {
FILE *real_g;
FILE *synthetic_g;
int galaxies_r, galaxies_s;
clock_t start, end;
start = clock();
real_g = fopen("data_100k_arcmin.txt", "r");
synthetic_g = fopen("flat_100k_arcmin.txt", "r");
fscanf(real_g, "%d", &galaxies_r);
fscanf(synthetic_g, "%d", &galaxies_s);
int N = 100000;
int xx = 782;
size_t arraybytes = N * sizeof(float);
size_t arraybytes1 = xx * 720 * sizeof(int);
size_t arraybytes11 = xx * 720 * sizeof(float);
size_t l = 720 * sizeof(int);
size_t l1 = 720 * sizeof(float); // Check this
// Allocate input vectors h_A and h_B in host memory
float* h_A = (float*)malloc(arraybytes);
float* h_B = (float*)malloc(arraybytes);
float* h_A1 = (float*)malloc(arraybytes);
float* h_B1 = (float*)malloc(arraybytes);
int* h_C = (int*)malloc(arraybytes1);
int* h_D = (int*)malloc(arraybytes1);
int* h_E = (int*)malloc(arraybytes1);
int* result = (int*)malloc(l);
int* result_r = (int*)malloc(l);
int* result_s = (int*)malloc(l);
float* final = (float*)malloc(l1);
for(int i = 0; i < galaxies_r; i++) {
fscanf(real_g, "%e %e", &h_A[i], &h_B[i]);
fscanf(synthetic_g, "%e %e", &h_A1[i], &h_B1[i]);
h_A[i] = h_A[i] * fix1;
h_A1[i] = h_A1[i] * fix1;
h_B[i] = h_B[i] * fix1;
h_B1[i] = h_B1[i] * fix1;
}
fclose(real_g);
fclose(synthetic_g);
float* d_A; cudaMalloc(&d_A, arraybytes);
float* d_B; cudaMalloc(&d_B, arraybytes);
float* d_A1; cudaMalloc(&d_A1, arraybytes);
float* d_B1; cudaMalloc(&d_B1, arraybytes);
int* d_C; cudaMalloc(&d_C, arraybytes1);
int* d_D; cudaMalloc(&d_D, arraybytes1);
int* d_E; cudaMalloc(&d_E, arraybytes1);
int* d_result; cudaMalloc(&d_result, l);
int* d_result_r; cudaMalloc(&d_result_r, l);
int* d_result_s; cudaMalloc(&d_result_s, l);
// Copy arrays from host memory to device memory
cudaMemcpy(d_A, h_A, arraybytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, arraybytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_A1, h_A1, arraybytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B1, h_B1, arraybytes, cudaMemcpyHostToDevice);
// Invoke kernel
dim3 threadsPerBlock(128);
dim3 threadsPerBlock1(736);
dim3 blocksize2(1);
dim3 blocksPerGrid(xx);
double cpu_time_used;
cudaMemset(d_C, 0, arraybytes1);
cudaMemset(d_D, 0, arraybytes1);
cudaMemset(d_E, 0, arraybytes1);
angles<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_A1, d_B1, d_C, d_D, d_E);
cudaMemcpy(h_C, d_C, arraybytes1, cudaMemcpyDeviceToHost);
cudaMemcpy(h_D, d_D, arraybytes1, cudaMemcpyDeviceToHost);
cudaMemcpy(h_E, d_E, arraybytes1, cudaMemcpyDeviceToHost);
for (int i = 0; i < 720 * xx; i++) {
result[i % 720] += h_C[i];
result_r[i % 720] += h_D[i];
result_s[i % 720] += h_E[i];
}
result_r[0] = result_r[0] + 100000;
result_s[0] = result_s[0] + 100000;
final[0] = (float) ((float)(result_r[0] - 2 * result[0] + result_s[0] + 200000) / (float)(100000 + result_s[0]));
for(int i = 1; i < 720; i++) {
final[i] = (float) ((float)(result_r[i] - 2 * result[i] + result_s[i]) / (float) result_s[i]);
}
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
double brk = 0;
printf("%f\n", cpu_time_used);
for(int i = 0; i < 720; i++) {
printf( "%f ", final[i]);
}
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
cudaFree(h_A); cudaFree(h_B); cudaFree(h_C);
cudaFree(d_D); cudaFree(h_D); cudaFree(d_E); cudaFree(h_E);
cudaFree(d_A1); cudaFree(h_A1); cudaFree(d_B1); cudaFree(h_B1);
}
|
3,504 | #include<stdio.h>
__global__ void printId(){
int blockId = blockIdx.x;
int threadId = threadIdx.x;
printf("I come from block %d - thread %d.\n", blockId, threadId);
}
int main(){
printId<<<2, 4>>>();
return 0;
} |
3,505 | #include "includes.h"
__global__ void scatter(unsigned int *d_inVals, unsigned int *d_outVals, unsigned int *d_inPos, unsigned int *d_outPos, unsigned int *d_zerosScan, unsigned int *d_onesScan, unsigned int *d_zerosPredicate, unsigned int *d_onesPredicate, size_t n)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int index = BLOCK_WIDTH * bx + tx;
int offset = d_zerosScan[n - 1] + d_zerosPredicate[n - 1];
if(index < n) {
int scatterIdx;
if(d_zerosPredicate[index]) {
scatterIdx = d_zerosScan[index];
} else {
scatterIdx = d_onesScan[index] + offset;
}
if(scatterIdx < n) { //sanity check
d_outVals[scatterIdx] = d_inVals[index];
d_outPos[scatterIdx] = d_inPos[index];
}
}
} |
3,506 | #include <cuda.h>
// Kernel that executes convolution. Nothing fancy is done. We don't even try to avoid
// block effects here.
__global__ void KernConvolve(float *data,
float *kernels,
float *dataOut,
int signalLength,
int kernelWidth,
int numPtsPerBlock)
{
// first fetch the data for this block.
extern __shared__ float sData[] ;
float *arrData = (float *) &sData[0] ;
float *arrKernel = (float *) &sData[numPtsPerBlock+kernelWidth-1] ;
// copy first the data vector.
int dataIndexInSignal = blockIdx.y * numPtsPerBlock + threadIdx.x - kernelWidth/2;
int numPtsToCopy = numPtsPerBlock + kernelWidth-1 ;
for (int index = threadIdx.x ; index < numPtsToCopy ;
index+= blockDim.x, dataIndexInSignal += blockDim.x)
{
if (dataIndexInSignal < 0 || dataIndexInSignal >= signalLength)
arrData[index] = 0 ;
else
arrData[index] = data[dataIndexInSignal] ;
}
__syncthreads() ;
// copy the kernel next.
int dataIndexInKernel = blockIdx.x * kernelWidth + threadIdx.x ;
for (int index = threadIdx.x ; index < kernelWidth ;
index+= blockDim.x, dataIndexInKernel += blockDim.x)
{
arrKernel[index] = kernels[dataIndexInKernel] ;
}
__syncthreads() ;
// perform the convolution and write out the result.
//output position.
dataIndexInSignal = blockIdx.y * numPtsPerBlock + threadIdx.x ;
for (int index = threadIdx.x ; index < numPtsPerBlock && dataIndexInSignal < signalLength ;
index+= blockDim.x, dataIndexInSignal += blockDim.x)
{
float val = 0.0 ;
for (int wtIndex = 0 ; wtIndex < kernelWidth ; wtIndex++)
{
val += arrKernel[wtIndex] * arrData[index+wtIndex] ;
}
// index of output data point in signal
int outIndex = blockIdx.x * signalLength + dataIndexInSignal ;
dataOut[outIndex] = val ;
}
}
// Kernel that executes convolution. Nothing fancy is done. We don't even try to avoid
// block effects here.
__global__ void KernPartialConvolve(float *dPtrSignal1, float *dPtrSignal2, float *dPtrBlockProducts,
int signalLength, int kernelWidth, int numPtsPerBlock)
{
extern __shared__ float sData[] ;
float *arrData1 = (float *) &sData[0] ;
float *arrData2 = (float *) &sData[numPtsPerBlock] ;
// copy first data vector.
int dataIndexInSignal = blockIdx.y * numPtsPerBlock + threadIdx.x ;
for (int index = threadIdx.x ; index < numPtsPerBlock && dataIndexInSignal < signalLength ;
index+= blockDim.x, dataIndexInSignal += blockDim.x)
{
arrData1[index] = dPtrSignal1[dataIndexInSignal] ;
}
__syncthreads() ;
// copy second data vector.
int numPtsPerBlock2 = numPtsPerBlock + kernelWidth - 1 ;
int signalIndex = blockIdx.x * signalLength ;
dataIndexInSignal = blockIdx.y * numPtsPerBlock + threadIdx.x - (kernelWidth-1)/2;
for (int index = threadIdx.x ; index < numPtsPerBlock2 && dataIndexInSignal < signalLength ;
index+= blockDim.x, dataIndexInSignal += blockDim.x)
{
if (dataIndexInSignal < 0)
arrData2[index] = 0 ;
else
arrData2[index] = dPtrSignal2[signalIndex+dataIndexInSignal] ;
}
__syncthreads() ;
dataIndexInSignal = blockIdx.y * numPtsPerBlock ;
int maxIndex = numPtsPerBlock+kernelWidth-1 ;
if (signalLength + (kernelWidth-1)/2-dataIndexInSignal < maxIndex)
maxIndex = signalLength + (kernelWidth-1)/2 - dataIndexInSignal ;
int dataIndexInBlock = blockIdx.x*gridDim.y*kernelWidth + blockIdx.y ;
for (int shift = threadIdx.x ; shift < kernelWidth ; shift+=blockDim.x)
{
float val = 0.0 ;
for (int index = 0 ; index < numPtsPerBlock ; index++)
{
if (index+shift >= maxIndex)
break ;
val += arrData1[index]*arrData2[index+shift] ;
}
dPtrBlockProducts[dataIndexInBlock + (kernelWidth-1-shift)*gridDim.y] = val ;
}
}
// Kernel that sums the results from PartialConvolve.
// Each kernel will have dimension of kernelWidth, so kernelWidth sums have to be computed per block.
// Each block will handle one dimension of a kernel sum. So the number of blocks is (kernel dimension) x (# of kernels)
__global__ void KernPartialConvolveSum(float *dPtrBlockProducts, float *dPtrResults,
int kernelWidth, int numPiecesPerKernel,
int numKernels)
{
// Results from partial convolve resulted in numPiecesPerKernel partial sums for
// every dimension of a kernel. Here a block has to sum these together.
int numPiecesPerThread = numPiecesPerKernel/blockDim.x ;
if (blockDim.x*numPiecesPerThread < numPiecesPerKernel)
numPiecesPerThread++ ;
int startKernelIndex = blockIdx.x * numPiecesPerKernel * kernelWidth ;
int startDataIndexForBlock = startKernelIndex + blockIdx.y * numPiecesPerKernel ;
int startDataIndexForThread = numPiecesPerThread*threadIdx.x ;
extern __shared__ float sData[] ;
int numToCopy1 = numPiecesPerThread ;
if (startDataIndexForThread + numToCopy1 > numPiecesPerKernel)
numToCopy1 = numPiecesPerKernel - startDataIndexForThread ;
float val = 0 ;
for (int index = 0 ; index < numToCopy1 ; index++)
{
val += dPtrBlockProducts[startDataIndexForBlock+startDataIndexForThread+index] ;
}
sData[threadIdx.x] = val ;
__syncthreads() ;
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (threadIdx.x < s)
sData[threadIdx.x] += sData[threadIdx.x + s];
__syncthreads();
}
if (threadIdx.x == 0)
{
dPtrResults[blockIdx.y+kernelWidth*blockIdx.x] = sData[0] ;
}
}
__global__ void KernAddSignals(float *signals, float *sumSignals, int signalLength,
int numSignals, int numPtsPerBlock, int numPtsPerThread)
{
int startIndex = blockIdx.x * numPtsPerBlock + threadIdx.x * numPtsPerThread ;
for (int ptNum = 0 ; ptNum < numPtsPerThread; ptNum++)
{
// index of data point in signal
int index = startIndex + ptNum ;
if (index >= signalLength)
break ;
float val = 0 ;
for (int signalNum = 0 ; signalNum < numSignals ; signalNum++)
{
val += signals[index+signalNum*signalLength] ;
}
sumSignals[index] = val ;
}
}
// Kernel that executes convolution. Nothing fancy is done. We don't even try to avoid
// block effects here.
__global__ void KernReverseConvolve(float *signals, float *kernels, float *dataOut, int signalLength, int kernelWidth,
int numKernels, int numPtsPerBlock, int numPtsPerThread)
{
// can probably speed this up well by fetching kernels to local memory or put it in constant memory.
int kernelIndex = blockIdx.x ;
int signalStartIndex = kernelIndex * signalLength ;
int startIndex = blockIdx.y * numPtsPerBlock + threadIdx.x * numPtsPerThread ;
for (int ptNum = 0 ; ptNum < numPtsPerThread; ptNum++)
{
// index of data point in signal
int index = startIndex + ptNum ;
if (index >= signalLength)
break ;
float val = 0 ;
int startIndexInKernel = kernelIndex*kernelWidth ;
for (int wtIndex = 0 ; wtIndex < kernelWidth ; wtIndex++)
{
if (wtIndex + index >= signalLength + (kernelWidth-1)/2)
break ;
if (index+wtIndex < (kernelWidth-1)/2)
continue ;
val += kernels[kernelWidth-1-wtIndex+startIndexInKernel] *
signals[signalStartIndex + index + wtIndex - (kernelWidth-1)/2] ;
}
dataOut[signalStartIndex+index] = val ;
}
}
|
3,507 | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <time.h>
#include <string.h>
int xpixels = 17500;
int ypixels = 10000;
int currentPixel = 0; //counter for calculations
int totalPixels; //the total number of pixels in the image
double xmin; // ranges for the calculating the mandelbrot set on
double xmax; // real-imaginary plane, where x is real and y
double ymin; // is imaginary
double ymax; //
char outputFile[] = "imagecuda.ppm"; // output file
/*
* complex_number_t
* Replresents a complex number by storing separate doubles for real and
* imaginary parts for the complex number.
*/
typedef struct complex_number_t{
double real;
double imag;
}complex_number_t;
/*
* pixel_t
* Represents a pixel in the PPM file by storing the RGB components.
*/
typedef struct pixel_t{
int r;
int g;
int b;
} pixel_t;
/*convert_to_2d
*Finds index in a 1D array given 2D indices
*INPUTS
* x - column position
* y - row position
* scale - number of elements per row
*OUTPUT
* long index in the 1D array
*/
unsigned int convert_to_2d(int x, int y, int scale) {
return y * scale + x;
}
/*
*calculate_mandlebrot_set
*Determines if a given position on the complex plane diverages or not and
*outputs the results to an array.
*INPUTS
* *pixelArray - poinjter to array representing output image
* xpixels - number of horizontal in output image
* ypixels - number or vertical pixels in output image
* xmin - smaller cartesian coordinate of the output image in x direction
* xmax - larger cartesian coordinate of the output image in x direction
* ymin - smaller cartesian coordinate of the output image in y direction
* ymax - larger cartesian coordinate of the output image in y direction
*
*/
__global__ void calculate_mandlebrot_set( pixel_t *pixelArray, int xpixels, int ypixels, double xmin, double xmax, double ymin, double ymax){
int MAX_ITERATIONS = 1000;
double xincrement = (xmax - xmin) / xpixels; // macro to find increments
double yincrement = (ymax - ymin) / ypixels;
complex_number_t z;
z.real = 0;
z.imag = 0;
complex_number_t c;
pixel_t *currPixel;
if (threadIdx.x + blockDim.x * blockIdx.x < xpixels && threadIdx.y + blockDim.y * blockIdx.y < ypixels){
int myIndex = (threadIdx.x + blockDim.x * blockIdx.x) + xpixels* (threadIdx.y + blockDim.y * blockIdx.y);
currPixel = pixelArray + myIndex;
int x = threadIdx.x + blockDim.x * blockIdx.x; // x posiiton of the pixel
int y = threadIdx.y + blockDim.y * blockIdx.y; // y position of the piel
c.real = xmin + x * xincrement;
c.imag = ymin + y * yincrement;
int toBreak = 0;
int i = 0;
while( i < MAX_ITERATIONS ){
complex_number_t temp;
temp.real = z.real;
temp.imag = z.imag;
z.real = (z.real * z.real) - (z.imag * z.imag) + c.real; //real
z.imag = (2*temp.real*temp.imag) + c.imag; // imaginary
if(z.real*z.real + z.imag*z.imag > 4){
//save colors to Pixel Array
(*currPixel).r = (int) 255 * (((1.0*i) / (1.0 * MAX_ITERATIONS)));
(*currPixel).g = (int) 255 * sqrt(((1.0*i) / (1.0 * MAX_ITERATIONS)));
(*currPixel).b = (int) 50 * sqrt(1.0 - ((1.0*i) / (1.0 * MAX_ITERATIONS)));
toBreak = 1;
break; //end calculation for current pixel
}
i++;
}
// never escapes
if(!toBreak){
(*currPixel).r = 0;
(*currPixel).g = 0;
(*currPixel).b = 0;
}
}
}
/*
*calcMandelbrotCuda
*A helper function that launches calculate_mandlebrot_set kernel.
*INPUTS
* *data - a pointer to and array of pixel_t for data output
*/
cudaError_t calcMandelbrotCuda(pixel_t *data ){
cudaError_t cudaStatus;
// int d_xpixels, d_ypixels;
// double d_xmin, d_xmax, d_ymin, d_ymax;
dim3 threadsPerBlock(32,32,1);
dim3 numBlocks((xpixels - 1) / threadsPerBlock.x + 1, (ypixels - 1) / threadsPerBlock.y + 1, 1);
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; }
// d_xpixels = xpixels;
// d_ypixels = ypixels;
// d_xmin = xmin;
// d_xmax = xmax;
// d_ymin = ymin;
// d_ymax = ymax;
printf("threads.x = %d, threads.y = %d\n", threadsPerBlock.x, threadsPerBlock.y);
printf("blocks.x = %d, blocks.y = %d\n", numBlocks.x, numBlocks.y);
calculate_mandlebrot_set<<<numBlocks, threadsPerBlock>>>(data, xpixels, ypixels, xmin, xmax, ymin, ymax);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) { fprintf(stderr, "calculate_mandlebrot_set launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; }
cudaDeviceSynchronize();
Error:
return cudaStatus;
}
int main(int argc, char * argv[]){
if (!(argc == 5 || argc == 2)){
printf("ERROR: incorrect number of arguments. Run with argument --help for help.\n");
return 0;
}
if (argc == 2){
// char *strHelp = "--help";
if (!strcmp(argv[1], "--help")){
printf("USAGE: CompiledMandelbrot [xmin] [xmax] [ymin] [ymax]\n");
}
else{
printf("ERROR: Unrecognized argument. Run with argument --help for help.\n");
}
return 0;
}
//set range of mandlebrot set from arguments
xmin = atof(argv[1]);
xmax = atof(argv[2]);
ymin = atof(argv[3]);
ymax = atof(argv[4]);
if (ymin >= ymax){
printf("ERROR: Invalid range for imaginary axis.\n");
return 0;
}
if (xmin >= xmax){
printf("ERROR: Invalid range for real axis.\n");
return 0;
}
//initialize some values
totalPixels = xpixels * ypixels;
//initialize output file with PPm header
FILE *fp;
fp = fopen(outputFile, "w+");
fprintf(fp, "P3 \n%d %d \n255\n\n", xpixels, ypixels);
fclose(fp);
//varaibles for timing
struct timespec start, finish;
double elapsed;
printf("Timing Calculations...\n");
clock_gettime(CLOCK_MONOTONIC, &start);
//do cuda here
pixel_t *pixelArray = (pixel_t*)(malloc(xpixels*ypixels*sizeof(pixel_t))); //for cudaMemcpy version
pixel_t *d_pixelArray; //for cudaMemcpy version
// pixel_t *pixelArray; //for cudaMallocManaged version
cudaMalloc(&d_pixelArray, totalPixels * sizeof(pixel_t)); //for cudaMemcpy version
// cudaMallocManaged(&pixelArray, totalPixels * sizeof(pixel_t)); //for culaMallocManaged version
// cudaError_t cudaStatus = calcMandelbrotCuda(pixelArray); //for cudaMallocManaged version
cudaError_t cudaStatus = calcMandelbrotCuda(d_pixelArray); //for cudaMemcpy version
//calculate elapsed time
clock_gettime(CLOCK_MONOTONIC, &finish);
elapsed = (finish.tv_sec - start.tv_sec);
elapsed += (finish.tv_nsec - start.tv_nsec) / 1000000000.0;
printf("Calculations took %f seconds\n", elapsed);
printf("\nTiming cudaMemCpy...\n"); //for cudaMemcpy version
clock_gettime(CLOCK_MONOTONIC, &start); //for cudaMemcpy version
cudaMemcpy(pixelArray, d_pixelArray, totalPixels*sizeof(pixel_t), cudaMemcpyDeviceToHost); //for cudaMemcpy version
clock_gettime(CLOCK_MONOTONIC, &finish); //for cudaMemcpy version
elapsed = (finish.tv_sec - start.tv_sec); //for cudaMemcpy version
elapsed += (finish.tv_nsec - start.tv_nsec) / 1000000000.0; //for cudaMemcpy version
printf("cudaMemCpy took %f seconds\n", elapsed); //for cudaMemcpy version
//start file output timing
printf("\nStarting file output...\n");
clock_gettime(CLOCK_MONOTONIC, &start);
fp = fopen(outputFile, "a");
int x;
int y;
for (y = 0; y < ypixels; y++){
for (x = 0; x < xpixels; x++){
pixel_t* currPixel;
currPixel = pixelArray + convert_to_2d(x, y, xpixels); //get one pixel at a time
fprintf(fp, " %d %d %d ",(*currPixel).r,(*currPixel).g,(*currPixel).b); //outpit pixel in PPM format
}
fprintf(fp, "\n"); //next line in PPM file
}
fclose(fp);
//stop timing
clock_gettime(CLOCK_MONOTONIC, &finish);
elapsed = (finish.tv_sec - start.tv_sec);
elapsed += (finish.tv_nsec - start.tv_nsec) / 1000000000.0;
printf("File output took %f seconds.\n", elapsed );
//convert the PPM file to a PNG file to save space
execl("/usr/bin/convert", "/usr/bin/convert", "image.ppm", "image.png", (char *)NULL);
} |
3,508 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float* var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25) {
if (comp == (var_2 / +1.8953E-43f)) {
for (int i=0; i < var_1; ++i) {
var_3[i] = asinf(-0.0f);
comp += var_3[i] + atan2f(powf(+1.6429E-44f / fabsf((-1.7514E5f * -1.4085E-35f)), fmodf((var_4 - var_5 - coshf(var_6 - -1.3860E-35f + var_7)), -1.9893E-43f * (+1.7807E-37f / sqrtf(+1.3247E-37f)))), (var_8 / +0.0f * (+1.7087E-37f / var_9)));
if (comp <= fmodf((var_10 + (+0.0f / var_11)), (-1.4604E-43f / var_12 * var_13))) {
comp = var_14 / +1.9332E-43f;
}
if (comp <= (+1.6425E-41f * (var_15 - var_16))) {
float tmp_1 = (-1.5774E7f - (var_17 - powf(-1.2596E13f / -1.8114E-12f, -1.9215E-37f * var_18)));
float tmp_2 = (-1.8062E-21f + (-1.6115E-41f * coshf((var_19 * var_20 + (var_21 + cosf(fmodf(+1.2369E-35f, expf(var_22 / var_23 - var_24))))))));
comp = tmp_2 * tmp_1 / (var_25 / +1.0353E24f * +1.6672E-42f);
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float* tmp_4 = initPointer( atof(argv[4]) );
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26);
cudaDeviceSynchronize();
return 0;
}
|
3,509 | #define N 512
#define NUM_BLOCKS 16
#define NUM_THREADS 48
//Do not change above three lines.
//Submission should be named as <RollNumber>_Prog.cu
//Upload just this cu file and nothing else. If you upload it as a zip, it will not be evaluated.
/*Remember the following guidelines to avoid losing marks
This exercise is quite simple. The only tricky part is that total number of threads (NUM_BLOCKS*NUM_THREADS) may be different (higher or lower) from N.
Index of an array should not exceed the array size.
No output array-element should be computed more than once
No marks will be given if the program does not compile or run (TAs will not debug your program at all)
Do not change the name of any variable that we have introduced.
*/
#include <stdio.h>
//TODO: WRITE GPU KERNEL. It should not be called repeatedly from the host, but just once. Each time it is called, it may process more than array-element or not process any array-element at all.
__global__ void MatrixAddition(int *A, int *B , int *C )
{
int op_per_thread = (N*N)/(NUM_BLOCKS*NUM_THREADS);
if((N*N)%(NUM_BLOCKS*NUM_THREADS)!=0)
op_per_thread +=1;
int thread_index = threadIdx.x + blockIdx.x*blockDim.x;
printf("%d",blockDim.x);
for(int i=0;i<op_per_thread;i++){
int index = thread_index*op_per_thread + i;
if(index < N*N)
*(C+index) = *(B+index) + *(A+index);
}
}
int main (int argc, char **argv) {
int A[N][N], B[N][N], C[N][N];
int *d_A, *d_B, *d_C; // These are the copies of A, B and C on the GPU
int *h_C; // This is a host copy of the output of B from the GPU
int i, j;
h_C = (int *)malloc(N*N*sizeof(int));
for(i=0;i<N;i++) {
for(j=0;j<N;j++) {
A[i][j] = i+j;
B[i][j]= 2*j-1;
}
}
// sequential implementation of main computation
for(i=0;i<N;i++) {
for(j=0;j<N;j++) {
C[i][j] = A[i][j]+B[i][j];
}
}
// TODO: ALLOCATE MEMORY FOR GPU COPIES OF d_A, d_B and d_C
cudaMalloc((void **)&d_A, N*N*sizeof(int));
cudaMalloc((void **)&d_B, N*N*sizeof(int));
cudaMalloc((void **)&d_C, N*N*sizeof(int));
// TODO: COPY A TO d_A
// TODO: COPY B TO d_B
cudaMemcpy(d_A,&A[0], N*N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_B,&B[0], N*N*sizeof(int), cudaMemcpyHostToDevice);
// TODO: CREATE BLOCKS with THREADS AND INVOKE GPU KERNEL
//Use NUM_BLOCKS blocks, each with NUM_THREADS threads
MatrixAddition<<<NUM_BLOCKS,NUM_THREADS>>>(d_A,d_B,d_C);
// TODO: COPY d_C BACK FROM GPU to CPU in variable h_C
cudaMemcpy(h_C,d_C, N*N*sizeof(int), cudaMemcpyDeviceToHost);
// TODO: Verify result is correct by comparing
for(i=0;i<N;i++) {
for(j=0;j<N;j++) {
//TODO: compare each element of h_C and C by subtracting them
//print only those elements for which the above subtraction is non-zero
int diff = C[i][j] - *(h_C + N*i +j);
if(diff!=0)
printf("error at %d and %d\n",C[i][j],*(h_C + N*i +j));
}
}
//IF even one element of h_C and C differ, report an error.
//Otherwise, there is no error.
//If your program is correct, no error should occur.
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
|
3,510 | __global__ void interp3_cuda(
float * vOutput,
int nPoints,
int xSize,
int ySize,
int zSize,
float * gridX,
float * gridY,
float * gridZ,
float * vInput,
float * xInterp,
float * yInterp,
float * zInterp)
{
int idx = blockDim.x * (gridDim.x * blockIdx.y + blockIdx.x) + threadIdx.x;
if (idx >= nPoints)
{
return;
}
float x = xInterp[idx];
float y = yInterp[idx];
float z = zInterp[idx];
if (x < gridX[0] || x > gridX[xSize-1] ||
y < gridY[0] || y > gridY[ySize-1] ||
z < gridZ[0] || z > gridZ[zSize-1])
{
vOutput[idx] = 0.0f;
return;
}
float x0, y0, z0, x1, y1, z1;
int ibx, itx, iby, ity, ibz, itz, im;
ibx = 0;
itx = xSize - 1;
while (ibx < (itx-1))
{
im = ((ibx + itx) >> 1);
if (x <= gridX[im])
{
itx = im;
}
else
{
ibx = im;
}
}
x0 = gridX[ibx];
x1 = gridX[itx];
iby = 0;
ity = ySize - 1;
while (iby < (ity-1))
{
im = ((iby + ity) >> 1);
if (y <= gridY[im])
{
ity = im;
}
else
{
iby = im;
}
}
y0 = gridY[iby];
y1 = gridY[ity];
ibz = 0;
itz = zSize - 1;
while (ibz < (itz-1))
{
im = ((ibz + itz) >> 1);
if (z <= gridZ[im])
{
itz = im;
}
else
{
ibz = im;
}
}
z0 = gridZ[ibz];
z1 = gridZ[itz];
int sliceDim = xSize * ySize;
int zOff0 = sliceDim * ibz;
int zOff1 = zOff0 + sliceDim;
int yOff0 = ySize * ibx;
int yOff1 = yOff0 + ySize;
float ax0 = (x - x0) / (x1 - x0);
float ay0 = (y - y0) / (y1 - y0);
float az0 = (z - z0) / (z1 - z0);
float ax1 = 1.0f - ax0;
float ay1 = 1.0f - ay0;
float v000 = vInput[zOff0 + yOff0 + iby];
float v001 = vInput[zOff0 + yOff0 + ity];
float v010 = vInput[zOff0 + yOff1 + iby];
float v011 = vInput[zOff0 + yOff1 + ity];
float v100 = vInput[zOff1 + yOff0 + iby];
float v101 = vInput[zOff1 + yOff0 + ity];
float v110 = vInput[zOff1 + yOff1 + iby];
float v111 = vInput[zOff1 + yOff1 + ity];
float v00 = v000 * ay1 + v001 * ay0;
float v01 = v010 * ay1 + v011 * ay0;
float v10 = v100 * ay1 + v101 * ay0;
float v11 = v110 * ay1 + v111 * ay0;
float v0 = v00 * ax1 + v01 * ax0;
float v1 = v10 * ax1 + v11 * ax0;
vOutput[idx] = v0 * (1.0f - az0) + v1 * az0;
} |
3,511 | #include "includes.h"
__global__ void PictureKernell(unsigned char *imageInput, int width, int height, unsigned char *imageOutput){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if((row < height) && (col < width)){
imageOutput[row*width+col] = imageInput[(row*width+col)*3+RED]*0.299 + imageInput[(row*width+col)*3+GREEN]*0.587 + imageInput[(row*width+col)*3+BLUE]*0.114;
}
} |
3,512 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
__global__ void sum (int a, int b)
{
printf ("sum of a + b = %d\n", a + b);
}
int main ()
{
int a, b;
std::cout << "Enter num a:";
std::cin >> a;
std::cout << "Enter num b:";
std::cin >> b;
sum <<<1, 1>>>(a, b);
getchar ();
return 0;
} |
3,513 | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include <iostream>
#include <stdio.h>
#define CHECK(ans) {gpuAssert((ans),__FILE__,__LINE__);}
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if(code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n",cudaGetErrorString(code),file, line);
if(abort) exit(code);
}
}
using namespace std;
#define SIZE (100*1024*1024)
__global__ void histo_kernel( unsigned char *buffer,
long size,
unsigned int *histo ) {
__shared__ unsigned int temp[256];
temp[threadIdx.x] = 0;
__syncthreads();
int i = threadIdx.x + blockIdx.x * blockDim.x;
int offset = blockDim.x * gridDim.x;
while (i < size) {
atomicAdd( &temp[buffer[i]], 1 );
i += offset;
}
__syncthreads();
atomicAdd( &(histo[threadIdx.x]), temp[threadIdx.x] );
}
int runHisto(char* file, unsigned int* freq, unsigned int memSize, unsigned int *source) {
FILE *f = fopen(file,"rb");
if (!f) {perror(file); exit(1);}
fseek(f,0,SEEK_SET);
size_t result = fread(source,1,memSize,f);
if(result != memSize) fputs("Cannot read input file", stderr);
fclose(f);
unsigned char *buffer = (unsigned char*)source;
cudaDeviceProp prop;
( cudaGetDeviceProperties( &prop, 0 ) );
int blocks = prop.multiProcessorCount;
if(!prop.deviceOverlap)
{
cout << "No overlaps, so no speedup from streams" << endl;
return 0;
}
// allocate memory on the GPU for the file's data
int partSize = memSize/32;
int totalNum = memSize/sizeof(unsigned int);
int partialNum = partSize/sizeof(unsigned int);
unsigned char *dev_buffer0;
unsigned char *dev_buffer1;
unsigned int *dev_histo;
cudaMalloc( (void**)&dev_buffer0, partSize ) ;
cudaMalloc( (void**)&dev_buffer1, partSize ) ;
cudaMalloc( (void**)&dev_histo,
256 * sizeof( int ) ) ;
cudaMemset( dev_histo, 0,
256 * sizeof( int ) ) ;
cudaStream_t stream0, stream1;
CHECK(cudaStreamCreate(&stream0));
CHECK(cudaStreamCreate(&stream1));
cudaEvent_t start, stop;
( cudaEventCreate( &start ) );
( cudaEventCreate( &stop ) );
( cudaEventRecord( start, 0 ) );
for(int i = 0; i < totalNum; i+=partialNum*2)
{
CHECK(cudaMemcpyAsync(dev_buffer0, buffer+i, partSize, cudaMemcpyHostToDevice,stream0));
CHECK(cudaMemcpyAsync(dev_buffer1, buffer+i+partialNum, partSize, cudaMemcpyHostToDevice,stream1));
// kernel launch - 2x the number of mps gave best timing
histo_kernel<<<blocks*2,256,0,stream0>>>( dev_buffer0, partSize, dev_histo );
histo_kernel<<<blocks*2,256,0,stream1>>>( dev_buffer1, partSize, dev_histo );
}
CHECK(cudaStreamSynchronize(stream0));
CHECK(cudaStreamSynchronize(stream1));
cudaMemcpy( freq, dev_histo, 256 * sizeof( int ), cudaMemcpyDeviceToHost );
( cudaEventRecord( stop, 0 ) );
( cudaEventSynchronize( stop ) );
float elapsedTime;
( cudaEventElapsedTime( &elapsedTime,
start, stop ) );
printf( "Time to generate: %3.1f ms\n", elapsedTime );
cudaFree( dev_histo );
cudaFree( dev_buffer0 );
cudaFree( dev_buffer1 );
return 0;
}
|
3,514 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <cstring>
#include <time.h>
__global__ void mem_trd_test(int *input)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
printf("threadIdx.x : %d, gid : %d - data : %d \n",threadIdx.x , gid, input[gid]);
}
int main()
{
int size = 128;
int byte_size = size * sizeof(int);
int * h_input;
h_input=(int*)malloc(byte_size);
time_t t;
srand((unsigned)time(&t));
for(int i = 0;i < size;i++)
{
h_input[i] = (int)(rand() & 0xff);
}
int * d_input;
cudaMalloc((void**)&d_input,byte_size);
cudaMemcpy(d_input,h_input,byte_size,cudaMemcpyHostToDevice);
dim3 block(64);
dim3 grid(2);
mem_trd_test<<< grid , block >>> (d_input);
cudaDeviceSynchronize();
free(h_input);
cudaFree(d_input);
cudaDeviceReset();
return 0;
} |
3,515 | #include "includes.h"
__global__ void sgemvn_kernel1_fermi(int n, int m, int n1, float alpha, float* A, int lda, float *x, float *y)
{
int ind = blockIdx.x*num_threads + threadIdx.x;
A += ind;
float res = 0.f;
for(int i=0; i<n1; i += sgemv_bs ){
#pragma unroll
for(int j=0; j < sgemv_bs ; j++){
res += A[0] * x[j];
A += lda;
}
x += sgemv_bs;
}
#if 0
if (m>n1){
for(int j=0; j<(m-n1); j++){
res += A[0] * x[j];
A += lda;
}
}
#endif
if (ind<n)
y[ind] = alpha * res;
} |
3,516 | #include <stdio.h>
#include <cuda.h>
#define HANDLE_ERROR(apiFuncCall) \
do { \
cudaError_t _status = apiFuncCall; \
if (_status != cudaSuccess) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %d.\n", \
__FILE__, __LINE__, #apiFuncCall, _status); \
exit(-1); \
} \
} while (0)
#define HANDLE_NULL(x)
#define TOTAL 1024
float cuda_malloc_test( int size, bool up ) {
cudaEvent_t start, stop;
int *a, *dev_a;
float elapsedTime = 0.0f;
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
a = (int*)malloc( size * sizeof( *a ) );
if (!a) {
exit(-1);
}
HANDLE_ERROR( cudaMalloc( (void**)&dev_a,
size * sizeof( *dev_a ) ) );
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
for (int i=0; i<TOTAL; i++) {
if (up)
HANDLE_ERROR( cudaMemcpy( dev_a, a, size * sizeof( *dev_a ),cudaMemcpyHostToDevice ) );
else
HANDLE_ERROR( cudaMemcpy( a, dev_a,size * sizeof( *dev_a ),cudaMemcpyDeviceToHost ) );
}
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime,start, stop ) );
free( a );
HANDLE_ERROR( cudaFree( dev_a ) );
HANDLE_ERROR( cudaEventDestroy( start ) );
HANDLE_ERROR( cudaEventDestroy( stop ) );
return elapsedTime;
}
float cuda_host_alloc_test( int size, bool up ) {
cudaEvent_t start, stop;int *a, *dev_a;
float elapsedTime;
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
HANDLE_ERROR( cudaHostAlloc( (void**)&a, size * sizeof( *a ),cudaHostAllocDefault ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_a,size * sizeof( *dev_a ) ) );
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
for (int i=0; i<TOTAL; i++) {
if (up)
HANDLE_ERROR( cudaMemcpy( dev_a, a, size * sizeof( *a ),cudaMemcpyHostToDevice ) );
else
HANDLE_ERROR( cudaMemcpy( a, dev_a,size * sizeof( *a ),cudaMemcpyDeviceToHost ) );
}
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime,start, stop ) );
HANDLE_ERROR( cudaFreeHost( a ) );
HANDLE_ERROR( cudaFree( dev_a ) );
HANDLE_ERROR( cudaEventDestroy( start ) );
HANDLE_ERROR( cudaEventDestroy( stop ) );
return elapsedTime;
}
//#define SIZE (10*1024*1024)
//#define SIZE (1*1024)
//#define SIZE (4*1024)
//#define SIZE (16*1024)
//#define SIZE (64*1024)
//#define SIZE (1024*1024)
#define SIZE (4*1024*1024)
//#define SIZE (16*1024*1024)
int main( void ) {
float elapsedTime;
float MB = (float)TOTAL*SIZE*sizeof(int)/1024/1024;
printf( "Pinned:\n" );
elapsedTime = cuda_host_alloc_test( SIZE, true );
printf( "Up Time using cudaHostAlloc:%3.1f ms\n",elapsedTime );
printf( "\tTransfer %d Bytes; MB/s during copy down:%3.1f\n", SIZE*4, MB /(elapsedTime/1000) );
elapsedTime = cuda_host_alloc_test( SIZE, false );
printf( "Down Time using cudaHostAlloc:%3.1f ms\n",elapsedTime );
printf( "\tTransfer %d Bytes; MB/s during copy down:%3.1f\n", SIZE*4, MB /(elapsedTime/1000) );
printf("\n-------------------------------\n\n");
printf( "Native:\n" );
elapsedTime = cuda_malloc_test( SIZE, true );
printf( "Up Time using cudaMalloc:%3.1f ms\n",elapsedTime );
printf( "\tTransfer %d Bytes; MB/s during copy down:%3.1f\n", SIZE*4, MB /(elapsedTime/1000) );
elapsedTime = cuda_malloc_test( SIZE, false );
printf( "Down Time using cudaMalloc:%3.1f ms\n",elapsedTime );
printf( "\tTransfer %d Bytes; MB/s during copy down:%3.1f\n", SIZE*4, MB /(elapsedTime/1000) );
} |
3,517 | #include "includes.h"
extern "C" {
}
__global__ void fill_u32(unsigned int *y, unsigned int elem, unsigned int len) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < len) {
y[tid] = elem;
}
} |
3,518 | #include "includes.h"
__global__ void set_valid_pos_flag(int32_t* pos_buff, const int32_t* count_buff, const int32_t entry_count) {
const int32_t start = threadIdx.x + blockDim.x * blockIdx.x;
const int32_t step = blockDim.x * gridDim.x;
for (int32_t i = start; i < entry_count; i += step) {
if (count_buff[i]) {
pos_buff[i] = VALID_POS_FLAG;
}
}
} |
3,519 | /* MD5
Original algorithm by RSA Data Security, Inc
Adapted for NVIDIA CUDA by Matthew McClaskey
Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All
rights reserved.
License to copy and use this software is granted provided that it
is identified as the "RSA Data Security, Inc. MD5 Message-Digest
Algorithm" in all material mentioning or referencing this software
or this function.
License is also granted to make and use derivative works provided
that such works are identified as "derived from the RSA Data
Security, Inc. MD5 Message-Digest Algorithm" in all material
mentioning or referencing the derived work.
RSA Data Security, Inc. makes no representations concerning either
the merchantability of this software or the suitability of this
software for any particular purpose. It is provided "as is"
without express or implied warranty of any kind.
These notices must be retained in any copies of any part of this
documentation and/or software.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <math.h>
const unsigned int S11 = 7;
const unsigned int S12 = 12;
const unsigned int S13 = 17;
const unsigned int S14 = 22;
const unsigned int S21 = 5;
const unsigned int S22 = 9;
const unsigned int S23 = 14;
const unsigned int S24 = 20;
const unsigned int S31 = 4;
const unsigned int S32 = 11;
const unsigned int S33 = 16;
const unsigned int S34 = 23;
const unsigned int S41 = 6;
const unsigned int S42 = 10;
const unsigned int S43 = 15;
const unsigned int S44 = 21;
#define TRUE 1
#define FALSE 0
__device__ const unsigned int charLen = 8;
__device__ const unsigned int pwdbitlen = 136; // number of bits in plain text
__device__ const unsigned char hexLookup[] = "0123456789abcdef";
/* F, G, H and I are basic MD5 functions */
__device__ inline unsigned int F(unsigned int x, unsigned int y, unsigned int z) { return (((x) & (y)) | ((~x) & (z))); }
__device__ inline unsigned int G(unsigned int x, unsigned int y, unsigned int z) { return (((x) & (z)) | ((y) & (~z))); }
__device__ inline unsigned int H(unsigned int x, unsigned int y, unsigned int z) { return ((x) ^ (y) ^ (z)); }
__device__ inline unsigned int I(unsigned int x, unsigned int y, unsigned int z) { return ((y) ^ ((x) | (~z))); }
/* ROTATE_LEFT rotates x left n bits */
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
/* Rotation is separate from addition to prevent recomputation */
__device__ inline void FF(unsigned int &a, unsigned int b, unsigned int c, unsigned int d, unsigned int x, unsigned int s, unsigned int ac)
{
a = ROTATE_LEFT(a + F(b, c, d) + x + ac, s) + b;
}
__device__ inline void GG(unsigned int &a, unsigned int b, unsigned int c, unsigned int d, unsigned int x, unsigned int s, unsigned int ac)
{
a = ROTATE_LEFT(a + G(b, c, d) + x + ac, s) + b;
}
__device__ inline void HH(unsigned int &a, unsigned int b, unsigned int c, unsigned int d, unsigned int x, unsigned int s, unsigned int ac)
{
a = ROTATE_LEFT(a + H(b ,c ,d) + x + ac, s) + b;
}
__device__ inline void II(unsigned int &a, unsigned int b, unsigned int c, unsigned int d, unsigned int x, unsigned int s, unsigned int ac)
{
a = ROTATE_LEFT(a + I(b, c, d) + x + ac, s) + b;
}
__device__ void setSerial(char output[], unsigned int input[]) {
for (unsigned int i = 0, j = 0; j < 16; j+=4, i++) {
for (unsigned int k = 0; k < 4; k++) {
output[j + k] = (unsigned char) ((input[i] >> 8*k) & 0xff);
}
}
}
__device__ void setHash(char output[], unsigned int input[]) {
for (unsigned int i = 0, j = 0; j < 32; j+=8, i++) {
for (unsigned int k = 0; k < 8; k+=2) {
output[j + k + 1] = hexLookup[((input[i] >> 4*k+0) & 0xf)];
output[j + k + 0] = hexLookup[((input[i] >> 4*k+4) & 0xf)];
}
}
}
__global__ void findMatch(unsigned int* ssid, unsigned int* found, char* serialResult, char* hashResult) {
unsigned int a, b, c, d;
unsigned int serial[5];
for (int i = 0; i < sizeof(serial)/sizeof(serial[0]); i++) {
serial[i] = 0;
}
/*
Set up serial number in format: "00000000xyzrsijk" + "\n"
(md5 uses little endian => "00000000rzyxkjis")
Where chars...:
x, y & z are taken from the blockId.
r & s are taken from the threadId.
i, j & k are produced in the three nested loops underneath.
The serial is stored in a int array:
serial[0] == '0000'
serial[1] == '0000'
serial[2] == 'xyzr'
serial[3] == 'sijk'
serial[4] == ' d\n' // d = 1 bit delimiter used by the md5 algorithm
*/
for (int i = 0; i < 4; i++) {
serial[0] += hexLookup[0] << charLen*i;
}
serial[1] = serial[0];
serial[2] += hexLookup[(blockIdx.x & 0xf00) >> 8] << charLen*3; // serial[2] = 'x '
serial[2] += hexLookup[(blockIdx.x & 0x0f0) >> 4] << charLen*2; // serial[2] = 'xy '
serial[2] += hexLookup[(blockIdx.x & 0x00f)] << charLen*1; // serial[2] = 'xyz '
serial[2] += hexLookup[(threadIdx.x & 0xf0) >> 4] << charLen*0; // serial[2] = 'xyzr'
serial[3] += hexLookup[(threadIdx.x & 0x0f)] << charLen*3; // serial[3] = 't '
serial[4] += 10 << charLen*0; // serial[4] = ' \n'
serial[4] += 128 << charLen*1; // serial[4] = ' d\n'
// ASCII 0(48) -> 9(57) & a(97) -> f(102)
for (unsigned int i = 48; i <= 102; i++) {
serial[3] &= ~(0xff << charLen*2); // erase last loops value
serial[3] += (i << charLen*2); // serial[3] = 'ti '
for (unsigned int j = 48; j <= 102; j++) {
serial[3] &= ~(0xff << charLen*1); // erase last loops value
serial[3] += (j << charLen*1); // serial[3] = 'tij '
for (unsigned int k = 48; k <= 102; k++) {
serial[3] &= ~(0xff << charLen*0); // erase last loops value
serial[3] += (k << charLen*0); // serial[3] = 'tijk'
//load magic numbers
a = 0x67452301;
b = 0xefcdab89;
c = 0x98badcfe;
d = 0x10325476;
// Round 1
FF ( a, b, c, d, serial[0], S11, 0xd76aa478); // 1
FF ( d, a, b, c, serial[1], S12, 0xe8c7b756); // 2
FF ( c, d, a, b, serial[2], S13, 0x242070db); // 3
FF ( b, c, d, a, serial[3], S14, 0xc1bdceee); // 4
FF ( a, b, c, d, serial[4], S11, 0xf57c0faf); // 5
FF ( d, a, b, c, 0, S12, 0x4787c62a); // 6
FF ( c, d, a, b, 0, S13, 0xa8304613); // 7
FF ( b, c, d, a, 0, S14, 0xfd469501); // 8
FF ( a, b, c, d, 0, S11, 0x698098d8); // 9
FF ( d, a, b, c, 0, S12, 0x8b44f7af); // 10
FF ( c, d, a, b, 0, S13, 0xffff5bb1); // 11
FF ( b, c, d, a, 0, S14, 0x895cd7be); // 12
FF ( a, b, c, d, 0, S11, 0x6b901122); // 13
FF ( d, a, b, c, 0, S12, 0xfd987193); // 14
FF ( c, d, a, b, pwdbitlen, S13, 0xa679438e); // 15
FF ( b, c, d, a, 0, S14, 0x49b40821); //
// Round 2
GG (a, b, c, d, serial[1], S21, 0xf61e2562); // 17
GG (d, a, b, c, 0, S22, 0xc040b340); // 18
GG (c, d, a, b, 0, S23, 0x265e5a51); // 19
GG (b, c, d, a, serial[0], S24, 0xe9b6c7aa); // 20
GG (a, b, c, d, 0, S21, 0xd62f105d); // 21
GG (d, a, b, c, 0, S22, 0x2441453); // 22
GG (c, d, a, b, 0, S23, 0xd8a1e681); // 23
GG (b, c, d, a, serial[4], S24, 0xe7d3fbc8); // 24
GG (a, b, c, d, 0, S21, 0x21e1cde6); // 25
GG (d, a, b, c, pwdbitlen, S22, 0xc33707d6); // 26
GG (c, d, a, b, serial[3], S23, 0xf4d50d87); // 27
GG (b, c, d, a, 0, S24, 0x455a14ed); // 28
GG (a, b, c, d, 0, S21, 0xa9e3e905); // 29
GG (d, a, b, c, serial[2], S22, 0xfcefa3f8); // 30
GG (c, d, a, b, 0, S23, 0x676f02d9); // 31
GG (b, c, d, a, 0, S24, 0x8d2a4c8a); // 32
// Round 3
HH (a, b, c, d, 0, S31, 0xfffa3942); // 33
HH (d, a, b, c, 0, S32, 0x8771f681); // 34
HH (c, d, a, b, 0, S33, 0x6d9d6122); // 35
HH (b, c, d, a, pwdbitlen, S34, 0xfde5380c); // 36
HH (a, b, c, d, serial[1], S31, 0xa4beea44); // 37
HH (d, a, b, c, serial[4], S32, 0x4bdecfa9); // 38
HH (c, d, a, b, 0, S33, 0xf6bb4b60); // 39
HH (b, c, d, a, 0, S34, 0xbebfbc70); // 40
HH (a, b, c, d, 0, S31, 0x289b7ec6); // 41
HH (d, a, b, c, serial[0], S32, 0xeaa127fa); // 42
HH (c, d, a, b, serial[3], S33, 0xd4ef3085); // 43
HH (b, c, d, a, 0, S34, 0x4881d05); // 44
HH (a, b, c, d, 0, S31, 0xd9d4d039); // 45
HH (d, a, b, c, 0, S32, 0xe6db99e5); // 46
HH (c, d, a, b, 0, S33, 0x1fa27cf8); // 47
HH (b, c, d, a, serial[2], S34, 0xc4ac5665); // 48
// Round 4
II (a, b, c, d, serial[0], S41, 0xf4292244); // 49
II (d, a, b, c, 0, S42, 0x432aff97); // 50
II (c, d, a, b, pwdbitlen, S43, 0xab9423a7); // 51
II (b, c, d, a, 0, S44, 0xfc93a039); // 52
II (a, b, c, d, 0, S41, 0x655b59c3); // 53
II (d, a, b, c, serial[3], S42, 0x8f0ccc92); // 54
II (c, d, a, b, 0, S43, 0xffeff47d); // 55
II (b, c, d, a, serial[1], S44, 0x85845dd1); // 56
II (a, b, c, d, 0, S41, 0x6fa87e4f); // 57
II (d, a, b, c, 0, S42, 0xfe2ce6e0); // 58
II (c, d, a, b, 0, S43, 0xa3014314); // 59
II (b, c, d, a, 0, S44, 0x4e0811a1); // 60
II (a, b, c, d, serial[4], S41, 0xf7537e82); // 61
II (d, a, b, c, 0, S42, 0xbd3af235); // 62
II (c, d, a, b, serial[2], S43, 0x2ad7d2bb); // 63
II (b, c, d, a, 0, S44, 0xeb86d391); // 64
a += 0x67452301;
b += 0xefcdab89;
c += 0x98badcfe;
d += 0x10325476;
if (((c >> charLen*2) & 0xffff) == ((ssid[0] >> charLen*2) & 0xffff) && d == ssid[1]) {
unsigned int hash[] = {a, b, c, d};
*found = TRUE;
setSerial(serialResult, serial);
setHash(hashResult, hash);
return;
}
if (k == 57)
k = 96; // values will be incremented to 97 at the end of their loops
}
if (j == 57)
j = 96;
}
if (i == 57)
i = 96;
}
}
void usage(char *argv[]) {
printf("%-7s %s %s\n", "Usage:", argv[0], "<12 hex SSID>");
exit(0);
}
// Converts the 12 hex char ssid input to arrays of integers in
// little endian which is used by the md5 algorithm.
void ssidToInts(unsigned int result[], char input[]) {
// Pad with zeros to align with multiple of 8.
// Will be masked away when doing compares.
char ssid[17];
snprintf(ssid, sizeof(ssid)/sizeof(ssid[0]), "%s%s", "0000", input);
char tmpResult[9];
tmpResult[8] = 0;
for (int i = 0; i < 16; i+=8) {
for (int j = 0; j < 8; j+=2) {
tmpResult[(j + 1) % 8] = ssid[i + (8 - 1 - j - 0)];
tmpResult[(j + 0) % 8] = ssid[i + (8 - 1 - j - 1)];
}
result[(i + 1) / 8] = strtoul(tmpResult, NULL, 16);
}
}
int main(int argc, char *argv[]) {
if (argc != 2 || strlen(argv[1]) != 12) {
usage(argv);
}
// declare
unsigned int *found, *ssid;
char *serialResult, *hashResult;
const int SERIAL_LENGTH = 16 + 1, HASH_LENGTH = 32 + 1;
// malloc
cudaMallocManaged((void**)&found, sizeof(int));
cudaMallocManaged((void**)&ssid, 2 * sizeof(int));
cudaMallocManaged((void**)&serialResult, SERIAL_LENGTH * sizeof(char));
cudaMallocManaged((void**)&hashResult, HASH_LENGTH * sizeof(char));
// init
*found = FALSE;
ssidToInts(ssid, argv[1]);
serialResult[SERIAL_LENGTH - 1] = 0;
hashResult[HASH_LENGTH - 1] = 0;
findMatch<<<4096, 256>>>(ssid, found, serialResult, hashResult);
cudaDeviceSynchronize();
if (*found) {
char password[13];
strncpy(password, hashResult, 12);
password[12] = 0;
printf("%-10s %s\n", "Serial:", serialResult);
printf("%-10s %s\n", "Hash:", hashResult);
printf("%-10s AutoPi-%s\n", "SSID:", argv[1]);
printf("%-10s %s\n", "Password:", password);
} else {
printf("No match found for SSID %s\n", argv[1]);
}
cudaFree(found);
cudaFree(ssid);
cudaFree(serialResult);
cudaFree(hashResult);
return 0;
}
|
3,520 | #include <stdio.h>
#define NX 200
#define NY 100
__global__ void saxpy2D(float scalar, float * x, float * y)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if ( row < NX && col < NY ) // Make sure we don't do more work than we have data!
y[row*NY+col] = scalar * x[row*NY+col] + y[row*NY+col];
}
int main()
{
float *x, *y;
float maxError = 0;
int size = NX * NY * sizeof (float); // The total number of bytes per vector
cudaError_t ierrAsync;
cudaError_t ierrSync;
// Allocate memory
cudaMallocManaged(&x, size);
cudaMallocManaged(&y, size);
// Initialize memory
for( int i = 0; i < NX*NY; ++i )
{
x[i] = 1.0f;
y[i] = 2.0f;
}
// Get device properties
cudaDeviceProp prop;
cudaError_t ierr;
ierr = cudaGetDeviceProperties(&prop, 0);
if (ierr != cudaSuccess) { printf("Device property error: %s\n", cudaGetErrorString(ierr)); }
printf("========== DEVICE PROPERTIES ==========\n");
printf("Device number: %d\n", 0);
printf(" Device name: %s\n", prop.name);
printf(" Compute capability: %d.%d\n", prop.major, prop.minor);
printf(" Max threads per block: %d\n", prop.maxThreadsPerBlock);
printf(" Max threads in X-dimension of block: %d\n", prop.maxThreadsDim[0]);
printf(" Max threads in Y-dimension of block: %d\n", prop.maxThreadsDim[1]);
printf(" Max threads in Z-dimension of block: %d\n\n", prop.maxThreadsDim[2]);
dim3 threads_per_block (1024,16,1);
dim3 number_of_blocks ((NX/threads_per_block.x)+1,
(NY/threads_per_block.y)+1,
1);
// Check total number of threads
if (threads_per_block.x * threads_per_block.y * threads_per_block.z > prop.maxThreadsPerBlock) {
printf("Max number of threads exceeded!\n"); goto cleanup;
}
// Check number of threads_per_block
if (prop.maxThreadsDim[0] < threads_per_block.x && prop.maxThreadsDim[1] < threads_per_block.y) { printf("Block x- or y- sizes exceeded device limits!\n"); goto cleanup; }
saxpy2D <<< number_of_blocks, threads_per_block >>> ( 2.0f, x, y );
ierrSync = cudaGetLastError();
ierrAsync = cudaDeviceSynchronize(); // Wait for the GPU to finish
if (ierrSync != cudaSuccess) { printf("Sync error: %s\n", cudaGetErrorString(ierrSync)); }
if (ierrAsync != cudaSuccess) { printf("Async error: %s\n", cudaGetErrorString(ierrAsync)); }
// Print out our Max Error
for( int i = 0; i < NX*NY; ++i )
if (abs(4-y[i]) > maxError) { maxError = abs(4-y[i]); }
printf("Max Error: %.5f", maxError);
cleanup:
// Free all our allocated memory
cudaFree( x ); cudaFree( y );
} |
3,521 | #include <stdio.h>
#include <cuda.h>
#include <math.h>
__global__ void somaMatrizGPU(int *d_vetA, int indice, int passo){
int id = blockDim.x * blockIdx.x + threadIdx.x;
if((id % indice) == 0)
d_vetA[id] += d_vetA[id+passo];
}
int main(){
int h_Size = 16;
int j, i = 0;
int h_vetA[16]={1,2,3,4,5,6,7,8,9,10, 11, 12, 13, 14, 15, 16};
int *d_vetA;
int passo, indice;
int block = h_Size;
cudaDeviceReset();
cudaMalloc((void**) &d_vetA, h_Size * sizeof(int));
cudaMemcpy(d_vetA, h_vetA, h_Size * sizeof(int), cudaMemcpyHostToDevice);
for(i = 0; i < 4; i++){
indice = pow(2, i+1);
passo = pow(2, i);
somaMatrizGPU<<<8, 2>>>(d_vetA, indice, passo);
cudaMemcpy(h_vetA, d_vetA, h_Size * sizeof(int), cudaMemcpyDeviceToHost);
for(j=0; j < h_Size; j++){
printf("%d, ", h_vetA[j]);
}
printf("\n");
}
cudaDeviceSynchronize();
cudaFree(d_vetA);
return 0;
}
|
3,522 | //pass
//--blockDim=256 --gridDim=2 -DWIDTH=2064 --no-inline
#include <cuda.h>
#include <stdio.h>
#define GRIDDIM 1
#define BLOCKDIM 2//256
#define WIDTH 2//2048
#define N WIDTH
/*
* This kernel demonstrates a blockwise strength-reduction loop.
* Each block is given a disjoint partition (of length WIDTH) of A.
* Then each thread writes multiple elements in the partition.
* It is not necessarily the case that WIDTH%blockDim.x == 0
*/
__global__ void k(int *A) {
// __assert(blockDim.x <= WIDTH);
//#ifdef BLOCK_DIVIDES_WIDTH
// //__assert(__mod_pow2(WIDTH, blockDim.x) == 0);
//#endif
for (int i=threadIdx.x; i<WIDTH; i+=blockDim.x) {
//#ifndef BLOCK_DIVIDES_WIDTH
// // working set(1) using global invariants
// /*A*/__global_invariant(__write_implies(A, (blockIdx.x*WIDTH) <= __write_offset_bytes(A)/sizeof(int))),
// /*B*/__global_invariant(__write_implies(A, __write_offset_bytes(A)/sizeof(int) < (blockIdx.x+1)*WIDTH)),
// /*C*/__invariant(threadIdx.x <= i),
// /*D*/__invariant( i <= WIDTH+blockDim.x),
// __invariant(i % blockDim.x == threadIdx.x),
// __global_invariant(__write_implies(A, (((__write_offset_bytes(A)/sizeof(int)) % WIDTH) % blockDim.x) == threadIdx.x)),
//#else
// // working set(2) iff WIDTH % blockDim.x == 0
// /*A*/__invariant(__write_implies(A, (blockIdx.x*WIDTH) <= __write_offset_bytes(A)/sizeof(int))),
// /*B*/__invariant(__write_implies(A, __write_offset_bytes(A)/sizeof(int) < (blockIdx.x+1)*WIDTH)),
// /*C*/__invariant(threadIdx.x <= i),
// /*D*/__invariant( i <= WIDTH+blockDim.x),
// __invariant(__uniform_int((i-threadIdx.x))),
// __invariant(__uniform_bool(__enabled())),
//#endif
A[blockIdx.x*WIDTH+i] = i;
}
//#ifdef FORCE_FAIL
// __assert(false);
//#endif
}
|
3,523 | /*
Program name: MatrixTranspose.cu
Author name: Dr. Nileshchandra Pikle
Email: nilesh.pikle@gmail.com
Contact Number: 7276834418
Webpage: https://piklenileshchandra.wixsite.com/personal
Purpose: To perform Matrix Transpose using CUDA
Description: Matrix transpose program is considered to demonstrate 2D thread block and 2D grid.
Three versions of matrix transpose are created here
1. Matrix transpose using single thread (sequential)
2. Matrix transpose using N threads (1D thread block) where N is #rows in matrix
3. Matrix transpose using N x N threads (2D thread block) where N is #rows in matrix
*In first version a single thread on GPU responsible to perform entire transpose operation hence it
is a sequetial operation. This version performs worst than sequential CPU as GPU core is lightweight
*In second version each thread is responsible to take a row of matrix and store into new matrix in
column. Consider this as coarse grained.
*In third version each thread is responsible to take a single element of matrix and store into new
matrix at transposed position. Consider this as coarse grained.
Note: 3rd version is NOT optimized. However it outperforms first and second version. Shared memory
level optimizations can be performed to accelerate execution even further. To see how to
optimize second version refer link below
Link: https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/
*/
#include<stdio.h>
#include<stdio.h>
#include<math.h>
#include<time.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if(error != cudaSuccess) \
{ \
printf("Error %s %d", __FILE__, __LINE__); \
printf("\n Code %d Reason %s \n",error, cudaGetErrorString(error)); \
exit(1); \
} \
}
__global__ void transposeKernel_single_thread(int *d_input, int *d_output, int MAT_SIZE)
{
int i,j;
for(i = 0; i < MAT_SIZE; i++)
{
for(j = 0; j < MAT_SIZE; j++)
{
d_output[j * MAT_SIZE + i] = d_input[i * MAT_SIZE + j];
}
}
}
__global__ void transposeKernel_thread_per_row(int *d_input, int *d_output, int MAT_SIZE)
{
int j;
int gid = threadIdx.x + blockIdx.x *blockDim.x;
if(gid < MAT_SIZE)
{
for(j = 0; j < MAT_SIZE; j++)
{
d_output[gid * MAT_SIZE + j] = d_input[j * MAT_SIZE + gid];
}
}
}
__global__ void transposeKernel_thread_per_element(int *d_input, int *d_output, int MAT_SIZE)
{
int row,col;
col = threadIdx.x + blockIdx.x * blockDim.x;
row = threadIdx.y + blockIdx.y * blockDim.y;
if( (row < MAT_SIZE) && (col < MAT_SIZE) )
{
d_output[row * MAT_SIZE + col] = d_input[col * MAT_SIZE + row];
}
}
void initMatrix(int *matrix, int MAT_SIZE, int flag)
{
int i,j;
if(flag == 0)
{
for(i = 0; i < MAT_SIZE; i++)
{
for(j = 0; j < MAT_SIZE; j++)
{
matrix[i * MAT_SIZE + j] = 0;
}
}
}else{
for(i = 0; i < MAT_SIZE; i++)
{
for(j = 0; j < MAT_SIZE; j++)
{
matrix[i * MAT_SIZE + j] = i;
//printf(" %d ",matrix[i * MAT_SIZE + j]);
}
//printf("\n");
}
}
}
int checkResult(int *hg_output, int *h_output, int MAT_SIZE)
{
int i,j, flag = 1;
for(i = 0; i < MAT_SIZE; i++)
{
for(j = 0; j < MAT_SIZE; j++)
{
if(hg_output[i*MAT_SIZE+j] != h_output[i*MAT_SIZE+j])
{
flag = 0;
}
}
}
return flag;
}
__global__ void init_kernel(int *d_output, int MAT_SIZE)
{
int i,j;
for(i = 0; i < MAT_SIZE; i++)
{
for(j = 0; j < MAT_SIZE; j++)
{
d_output[i * MAT_SIZE + j] = 0;
}
}
}
int main()
{
int i,j;
int MAT_SIZE = 2048; // Matrix size MAT_SIZE x MAT_SIZE
int *h_input = (int *)malloc(MAT_SIZE*MAT_SIZE*sizeof(int));
int *h_output = (int *)malloc(MAT_SIZE*MAT_SIZE*sizeof(int));
int *hg_output = (int *)malloc(MAT_SIZE*MAT_SIZE*sizeof(int));
if(!h_input)
{
printf("\n Error: Allocating memory to h_input!!!");
}
if(!h_output)
{
printf("\n Error: Allocating memory to h_output!!!");
}
if(!hg_output)
{
printf("\n Error: Allocating memory to h_output!!!");
}
/*
Data initialization for input matrix h_input and h_output matrix
*/
initMatrix(h_output,MAT_SIZE,0); // matrix initialization function 0 indicates initialize with 0
initMatrix(hg_output,MAT_SIZE,0); // matrix initialization function 0 indicates initialize with 0
initMatrix(h_input,MAT_SIZE,1); // matrix initialization function 0 indicates initialize with row index (refer function)
/*
Matrix transpose code: h_output stores the transposed output of matrix of h_input
*/
clock_t t; // clock function to calculate execution time of sequential program
t = clock(); // record start time
for(i = 0; i < MAT_SIZE; i++)
{
for(j = 0; j < MAT_SIZE; j++)
{
h_output[i * MAT_SIZE + j] = h_input[j * MAT_SIZE + i];
//printf(" %d ", h_output[i * MAT_SIZE + j]);
}
//printf("\n");
}
t = clock() - t;
double CPUtime_taken = ((double)t)/CLOCKS_PER_SEC;
printf("\n Time required for sequential execution is %.3f ms\n", CPUtime_taken*1000);
/*
Parallel CUDA programs start here
*/
/* Allocate memory to d_input and d_output array on device */
int *d_input, *d_output;
CHECK(cudaMalloc((void **)&d_input, MAT_SIZE*MAT_SIZE*sizeof(int)));
CHECK(cudaMalloc((void **)&d_output, MAT_SIZE*MAT_SIZE*sizeof(int)));
/* Transfer data from host memory to device memory h_input to d_input and h_output to d_output array on device*/
CHECK(cudaMemcpy(d_input, h_input, MAT_SIZE*MAT_SIZE*sizeof(int), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_output, h_output, MAT_SIZE*MAT_SIZE*sizeof(int), cudaMemcpyHostToDevice));
/**********************************************************************************************/
/* 1. Transpose kerene launch using single thread
A thread block of single thread is configured to perform matrix transpose
This is exactly like sequential version of matrix transpose as only one
thread is responsible to perform entire matrix transpose operation
*/
/* Declaring Time variables to measure GPU time*/
float GPUelapsed = 0.0; // To store final kernel execution time
cudaEvent_t start, stop; // Variables to record start and stop of kernel
cudaEventCreate(&start); // Event create start
cudaEventCreate(&stop); // Event create stop
cudaEventRecord(start, 0); // Record time at start variables
/**/transposeKernel_single_thread<<<1,1>>>(d_input,d_output,MAT_SIZE); // krenel for matrix transpose
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&GPUelapsed, start, stop);
cudaEventDestroy(start); // Event destroy start
cudaEventDestroy(stop); // Event destroy stop
printf("\n 1. Parallel runtime Single thread %0.3f ms",GPUelapsed);
printf("\n Parallel Speedup Single thread = (CPU time)/(GPU time) = %0.3f\n", (CPUtime_taken*1000)/GPUelapsed);
CHECK(cudaMemcpy(hg_output, d_output, MAT_SIZE*MAT_SIZE*sizeof(int), cudaMemcpyDeviceToHost)); // copy results back on host
int check = checkResult(hg_output, h_output, MAT_SIZE); // This function checks two matrices are same or not
if(check == 0)printf("\n Something went wrong => Results are not matching for 'transposeKernel_single_thread' !!!");
initMatrix(hg_output,MAT_SIZE,0); // Initialize hg_output matrix to 0
init_kernel<<<1,1>>>(d_output, MAT_SIZE); // Initialize d_output matrix to 0
/**********************************************************************************************/
/* 2. Transpose kerene launch using thread per row
In this configuration, # threads launched are equalt to # rows of matrix i.e MAT_SIZE
Where each thread is responsible to perform transpose operation.
Each thread reads the matrix entries row major order from d_input matrix and stores in
d_output matrix in column major order.
# threads launched = # rows in matrix
numT = # threads per thread block
numB = # thread blocks in the grid
*/
int numT, numB;
numT = 128;
numB = ceil(MAT_SIZE/(float)numT);
/* Declaring Time variables to measure GPU time*/
float GPUelapsed2 = 0.0; // To store final kernel execution time
cudaEvent_t start2, stop2; // Variables to record start and stop of kernel
cudaEventCreate(&start2); // Event create start
cudaEventCreate(&stop2); // Event create stop
cudaEventRecord(start2, 0); // Record time at start variables
/**/ transposeKernel_thread_per_row<<<numB,numT>>>(d_input,d_output,MAT_SIZE); // krenel for matrix transpose
cudaEventRecord(stop2, 0);
cudaEventSynchronize(stop2);
cudaEventElapsedTime(&GPUelapsed2, start2, stop2);
cudaEventDestroy(start2); // Event destroy start
cudaEventDestroy(stop2); // Event destroy stop
printf("\n 2. Parallel runtime thread per row is %0.3f ms",GPUelapsed2);
printf("\n Parallel Speedup thread per row = (CPU time)/(GPU time) = %0.3f\n", (CPUtime_taken*1000)/GPUelapsed2);
CHECK(cudaMemcpy(hg_output, d_output, MAT_SIZE*MAT_SIZE*sizeof(int), cudaMemcpyDeviceToHost)); // copy results back on host
check = checkResult(hg_output, h_output, MAT_SIZE); // This function checks two matrices are same or not
if(check == 0)printf("\n Something went wrong => Results are not matching for 'transposeKernel_thread_per_row'!!!");
initMatrix(hg_output,MAT_SIZE,0); // Initialize hg_output matrix to 0
init_kernel<<<1,1>>>(d_output, MAT_SIZE); // Initialize d_output matrix to 0
/**********************************************************************************************/
/* 3. Transpose kerene launch using thread per element of the matrix
In this configuration, # threads launched are equalt to # of elements in the matrix i.e MAT_SIZE x MAT_SIZE
Where each thread is responsible to perform transpose operation on a single element.
Each thread reads a single corresponding matrix element from d_input matrix and stores in
d_output matrix in transposed indices.
# threads launched = # elements in matrix
numT = # threads per thread block in 2D
numB = # thread blocks in the grid in 2D
*/
dim3 num2T(8, 8,1);
dim3 num2B(ceil(MAT_SIZE/(float)num2T.x), ceil(MAT_SIZE/(float)num2T.y ),1 );
//printf("\n num2B.x = %d num2B.y = %d ", num2B.x,num2B.x );
/* Declaring Time variables to measure GPU time */
float GPUelapsed3 = 0.0; // To store final kernel execution time
cudaEvent_t start3, stop3; // Variables to record start and stop of kernel
cudaEventCreate(&start3); // Event create start
cudaEventCreate(&stop3); // Event create stop
cudaEventRecord(start3, 0); // Record time at start variables
/**/ transposeKernel_thread_per_element<<<num2B,num2T>>>(d_input,d_output,MAT_SIZE); // krenel for matrix transpose
cudaEventRecord(stop3, 0);
cudaEventSynchronize(stop3);
cudaEventElapsedTime(&GPUelapsed3, start3, stop3);
cudaEventDestroy(start3); // Event destroy start
cudaEventDestroy(stop3); // Event destroy stop
printf("\n 3. Parallel runtime thread per element is %0.3f ms",GPUelapsed3);
printf("\n Parallel Speedup thread per element = (CPU time)/(GPU time) = %0.3f\n", (CPUtime_taken*1000)/GPUelapsed3);
CHECK(cudaMemcpy(hg_output, d_output, MAT_SIZE*MAT_SIZE*sizeof(int), cudaMemcpyDeviceToHost)); // copy results back on host
check = checkResult(hg_output, h_output, MAT_SIZE); // This function checks two matrices are same or not
if(check == 0)
{
printf("\n Something went wrong => Results are not matching for 'transposeKernel_thread_per_element'!!!");
}
free(h_input);
free(h_output);
free(hg_output);
cudaFree(d_input);
cudaFree(d_output);
return(0);
}
|
3,524 | #include <stdio.h>
__global__ void addk()
{ }
int main()
{
//float out[], in[];
addk<<<1,10>>>();
printf("Hello world from CPU");
return 0;
}
|
3,525 | /*
compile : nvcc malloc_test.cu -o malloc_test
exec : ./malloc_test 512 1
starting 512MB , increase 1MB
if you have multiple GPU, use below
export CUDA_VISIBLE_DEVICES=3;./malloc_test 512 1
MB convert table would be :
GB MB
1 1024
2 2048
3 3072
4 4096
5 5120
6 6144
7 7168
8 8192
9 9216
10 10240
11 11264
12 12288
13 13312
14 14336
15 15360
16 16384
17 17408
18 18432
19 19456
20 20480
21 21504
22 22528
23 23552
24 24576
25 25600
26 26624
27 27648
28 28672
29 29696
30 30720
31 31744
32 32768
*/
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char* argv[]) {
int *devPtr= NULL;
size_t mem_size, free, total , start, inc, size ;
float free_f, total_f, used_f, used_start_f, used_now_f;
cudaMemGetInfo(&free,&total);
free_f = float(free) / (1024*1024) ;
total_f = float(total) / (1024*1024) ;
used_f = total_f-free_f ;
used_start_f = used_f;
if(argc<3){
printf(" plz use below command \n ./malloc_test 1024 10 \n to malloc 1024MB and increment would be 10MB \n");
printf("\n11GB 11264MB\n15GB 15360MB \n23GB 23552MB\n31GB 31744MB\n");
printf("\ncurrent free memory is %.1f\n", free_f);
return 0;
}
start = atoi(argv[1]) / sizeof(int) ;
inc = atoi(argv[2]) ;
size = start;
printf("------------------------------------------------------------------\n");
printf("\tTotal(MB)=\tFree(MB)+\tinit(MB)+\tAlloc(MB)\n");
printf("0\t%.1f =\t%.1f+ \t%.1f \t <------ initial used memory \n", total_f,free_f, used_f );
printf("------------------------------------------------------------------\n");
int i = 0;
do {
mem_size = sizeof(int) * size * (1024*1024) + (inc*i) * (1024*1024) ;
cudaMalloc(&devPtr, mem_size ); // MB
cudaMemGetInfo(&free,&total);
free_f = float(free) / (1024*1024) ;
total_f = float(total) / (1024*1024) ;
used_f = total_f-free_f ;
used_now_f = (float)mem_size/(1024*1024) ;
printf("%d\t%.1f =\t%.1f+ \t%.1f+ \t%.1f \n", i, total_f, free_f, used_start_f, used_now_f);
if(devPtr == NULL) {
printf("couldn't allocate %.1f MB ", used_now_f);
printf("Err : %s\n", cudaGetErrorString(cudaGetLastError()) );
return 0;
} else {
//printf("Allocated %d int's.\n", int(size));
}
cudaFree(devPtr);
size = (size* sizeof(int) + inc )/sizeof(int) ;
mem_size = sizeof(int) * size ;
i=i+1;
} while(1);
}
|
3,526 |
__device__
unsigned long long int combin(int m, int k) {
unsigned long long int comb = 1;
for (int j = m; j > m - k; j--) {
comb = comb * j;
}
for (int q = k; q > 0; q--) {
comb = comb / q;
}
return comb;
}
__device__
void queenGen(unsigned int* queensList, unsigned long long int i, int m, int k) {
//lexico
int q = 0;
i = i + 1;
int j = 0;
for (int s = 1; s < k + 1; s++) {
int cs = j + 1;
unsigned long long int com = combin(m - cs, k - s);
while (i > com) {
i -= com;
cs += 1;
com = combin(m - cs, k - s);
}
queensList[q++] = cs - 1;
j = cs;
}
}
|
3,527 | #include <stdio.h>
#define X 12
#define THREAD_X 4
__global__ void index(int *A){
int i = blockDim.x*blockIdx.x+threadIdx.x;
//int i = threadIdx.x;
A[i] = i;
// A[i] = gridDim.x;
//A[i] = blockDim.x;
// A[i] = threadIdx.x;
}
int main(){
int A[X], *A_d;
int i;
//dim3 dimBlock(THREAD_X, THREAD_Y);
//dim3 dimGrid(X/THREAD_X,Y/THREAD_Y);
cudaMalloc((void**)&A_d, sizeof(int)*X);
for(i = 0; i < X; i++){
A[i] = -1;
}
cudaMemcpy(A_d, A, sizeof(int)*X, cudaMemcpyHostToDevice);
index<<<12/4, 4>>>(A_d);
//index<<<dimGrid, dimBlock>>>(A_d);
cudaMemcpy(A, A_d, sizeof(int)*X, cudaMemcpyDeviceToHost);
for(i=0; i < X; i++){
printf("%d ",A[i]);
}
printf("\n");
cudaFree(A_d);
}
|
3,528 | //ECGR 6090 Heterogeneous Computing Homework 0
// Problem 1 a - Vector Add on GPU
//Written by Aneri Sheth - 801085402
// Reference taken from Lecture Slides by Dr. Tabkhi
// Other references taken from - http://ecee.colorado.edu/~siewerts/extra/code/example_code_archive/a490dmis_code/CUDA/cuda_work/samples/0_Simple/vectorAdd/vectorAdd.cu and https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#using-cuda-gpu-timers
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#define n 1000 //job size = 1K, 10K, 100K, 1M and 10M
__global__ void add(int *a, int *b, int *c) //add kernel
{
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
//function to generate random numbers
void random_ints(int* x, int size)
{
int i;
for (i=0;i<size;i++) {
x[i]=rand()%n;
}
}
int main(void)
{
int *a, *b, *c; // CPU copies
int *d_a, *d_b, *d_c; // GPU copies
int size = n * sizeof(int);
cudaEvent_t start, stop; //time start and stop
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//Allocate device memory
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
//Allocate CPU memory
a = (int *)malloc(size); random_ints(a, n);
b = (int *)malloc(size); random_ints(b, n);
c = (int *)malloc(size);
cudaEventRecord( start, 0 );
//Copy CPU memory to GPU memory
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
//Call the add kernel
add<<<1,n>>>(d_a, d_b, d_c); //1 thread block with n threads
//Copy from device to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
cudaEventRecord( stop, 0 );
cudaEventSynchronize(stop);
cudaEventElapsedTime( &time, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
printf("GPU Execution Time = %f\n",time);
for (int i=0;i<n;i++) {
printf("a[%d]=%d , b[%d]=%d, c[%d]=%d\n",i,a[i],i,b[i],i,c[i]);
} //print the result
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
3,529 | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#include <functional>
#include <stdlib.h> /* srand, rand */
#include <time.h> /* time */
#include <chrono>
#include <list>
#include <cuda.h>
#include <cstdlib>
#include <ctime>
#include <cmath>
using namespace std;
#define ROW_TILE_WIDTH 64
#define COL_TILE_WIDTH 64
#define EPSILON (1e-6)
template<typename T>
__global__
void naive_matrix_multiply(T* A, T* B, T* C, int width, int cRows, int cCols)
{
__shared__ T shATile[ROW_TILE_WIDTH][COL_TILE_WIDTH];
__shared__ T shBTile[ROW_TILE_WIDTH][COL_TILE_WIDTH];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
T pValue = 0;
// iterate for width/COL_TILE_WIDTH number of times
// to compute the C tile
for (int p = 0; p < width / COL_TILE_WIDTH; p++) {
//load values to tiles from A and B
shATile[threadIdx.y][threadIdx.x] = A[row * width + p * ROW_TILE_WIDTH + threadIdx.x];
shBTile[threadIdx.y][threadIdx.x] = B[(p * COL_TILE_WIDTH + threadIdx.y) * cCols + col];
// wait until all threads finish loading values
__syncthreads();
// update pValue for this thread
for (int i = 0; i < COL_TILE_WIDTH; i++) pValue += shATile[threadIdx.y][i] * shBTile[i][threadIdx.x];
// wait until all threads finish computing pValue before overwriting
__syncthreads();
}
C[row * cCols + col] = pValue;
}
template<typename T>
void initialize_matrix(T* M, int rows, int cols, std::function<float()> F) {
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
M[i * cols + j] = F();
}
}
}
template<typename T>
void initialize_matrix(T* M, int rows, int cols, std::function<float(int, int)> F) {
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
M[i * cols + j] = F(i, j);
}
}
}
template<typename T>
void print_matrix(T* M, int rows, int cols) {
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
std::cout << M[i * cols + j] << " ";
}
std::cout << "\n";
}
}
template<typename T>
void naive_matrix_multiply_cpu(T* A, T* B, T* C, int width, int C_rows, int C_cols) {
for (int i = 0; i < C_rows; i++)
for (int j = 0; j < C_cols; j++) {
T value = 0.0f;
for (int k = 0; k < width; k++) {
value += A[i * width + k] * B[k * C_cols + j];
}
C[i * C_cols + j] = value;
}
}
template<typename T>
T maxDiff(T* A1, T* A2, int rows, int cols) {
T maxDiff = A1[0] - A2[0];
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
T diff = abs(A1[i * cols + j] - A2[i * cols + j]);
if (diff > maxDiff) {
maxDiff = diff;
}
}
}
return maxDiff;
}
int main(void)
{
FILE* fp;
fp = fopen("Naive_Multiplication_CPU_Mohit.csv", "w+");
fprintf(fp, "Algorithm_Name,Input_Dimensions,Execution_Time(ms)");
int max_limit = 800000;
int my_list[] = {16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 256, 272, 288, 304, 320, 336, 352, 368, 384, 400, 416, 432, 448, 464, 480, 496, 512, 528, 544, 560, 576, 592, 608, 624, 640, 656, 672, 688, 704, 720, 736, 752, 768, 784, 800, 816, 832, 848, 864, 880, 896, 912, 928, 944, 960, 976, 992, 1008, 1040, 1072, 1104, 1136, 1168, 1200, 1232, 1264, 1296, 1328, 1360, 1392, 1424, 1456, 1488, 1520, 1552, 1584, 1616, 1648, 1680, 1712, 1744, 1776, 1808, 1840, 1872, 1904, 1936, 1968, 2000, 2032, 2064, 2096, 2128, 2160, 2192, 2224, 2256, 2288, 2320, 2352, 2384, 2416, 2448, 2480, 2512, 2544, 2576, 2608, 2640, 2672, 2704, 2736, 2768, 2800, 2832, 2864, 2896, 2928, 2960, 2992, 3024, 3056, 3088, 3120, 3152, 3184, 3216, 3248, 3280, 3312, 3344, 3376, 3408, 3440, 3472, 3504, 3536, 3568, 3600, 3632, 3664, 3696, 3728, 3760, 3792, 3824, 3856, 3888, 3920, 3952, 3984, 4016, 4080, 4144, 4208, 4272, 4336, 4400, 4464, 4528, 4592, 4656, 4720, 4784, 4848, 4912,4976, 5040, 5104, 5168, 5232, 5296, 5360, 5424, 5488, 5552, 5616, 5680, 5744, 5808, 5872, 5936, 6000, 6064, 6128, 6192, 6256, 6320, 6384, 6448, 6512, 6576, 6640, 6704, 6768, 6832, 6896, 6960, 7024, 7088, 7152, 7216, 7280, 7344, 7408, 7472, 7536, 7600, 7664, 7728, 7792, 7856, 7920, 7984, 8048, 8112, 8176, 8240, 8304, 8368, 8432, 8496, 8560, 8624, 8688, 8752, 8816, 8880, 8944, 9008, 9072, 9136, 9200, 9264, 9328, 9392, 9456, 9520, 9584, 9648, 9712, 9776, 9840, 9904, 9968, 10032, 10096, 10160, 10224, 10288, 10352, 10416, 10480, 10544, 10608, 10672, 10736, 10800, 10864, 10928, 10992};
int length = sizeof(my_list) / sizeof(my_list[0]);
printf("%d", length);
char algoname[100] = "naive_matrix_cpu";
for (int i = 0; i < length; i++){
if(my_list[i]<=1360){
double l[5];
int matSize = my_list[i];
cout<<"Matrix Size : "<<matSize<<endl;
int A_rows = matSize;
int A_cols = matSize;
int B_rows = matSize;
int B_cols = matSize;
int C_rows = A_rows;
int C_cols = B_cols;
int A_size = A_rows * A_cols;
int B_size = B_rows * B_cols;
int C_size = C_rows * C_cols;
float* A, * B, * C, * C_cpu;
// // timing
// cudaEvent_t start_gpu, stop_gpu;
// float gpu_time_ms = 0;
// cudaEventCreate(&start_gpu);
// cudaEventCreate(&stop_gpu);
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&A, A_size * sizeof(float));
cudaMallocManaged(&B, B_size * sizeof(float));
cudaMallocManaged(&C, C_size * sizeof(float));
cudaMallocManaged(&C_cpu, C_size * sizeof(float));
// initialize A and B matrices
auto all_ones = []() -> float {
return 1.0f;
};
srand(time(NULL));
auto rand_numbers = []() -> float {
return static_cast<float>(rand()) / (static_cast<float>(RAND_MAX / 1000));
};
auto index_based = [](int i, int j) -> float {
return j;
};
initialize_matrix<float>(A, A_rows, A_cols, rand_numbers);
initialize_matrix<float>(B, B_rows, B_cols, rand_numbers);
// launch kernel
dim3 dim_grid(C_cols / COL_TILE_WIDTH, C_rows / ROW_TILE_WIDTH, 1);
dim3 dim_block(COL_TILE_WIDTH, ROW_TILE_WIDTH, 1);
//cudaEventRecord(start_gpu);
//naive_matrix_multiply<float> << <dim_grid, dim_block >> > (A, B, C, A_cols, C_rows, C_cols);
//cudaEventRecord(stop_gpu);
//// Wait for GPU to finish before accessing on host
//cudaDeviceSynchronize();
//cudaEventSynchronize(stop_gpu);
//cudaEventElapsedTime(&gpu_time_ms, start_gpu, stop_gpu);
if(my_list[i]<=768){
for(int j=0;j<5;j++){
// check results on CPU
auto t1 = std::chrono::system_clock::now();
naive_matrix_multiply_cpu<float>(A, B, C_cpu, A_cols, C_rows, C_cols);
auto t2 = std::chrono::system_clock::now();
if (fabsf(maxDiff<float>(C, C_cpu, C_rows, C_cols)) <= (float)EPSILON)
std::cout << "PASS" << std::endl;
else
std::cout << "FAIL" << std::endl;
auto cpu_time_ms = std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count() / 1000.0f;
// std::cout << "GPU time = " << gpu_time_ms << "ms" << std::endl;
l[j]=cpu_time_ms;
}
double avg;
avg=(l[0]+l[1]+l[2]+l[3]+l[4])/5;
cout << "Using Milliseconds Clock: (AVG)"<< endl;
cout << " CPU time taken to execute for strassen matrices of size - "
<< matSize << " : " <<avg<<" ms"<< endl;
cout << endl;
fprintf(fp,"\n%s,%d,%lf",algoname,matSize,avg);
}else{
auto t1 = std::chrono::system_clock::now();
naive_matrix_multiply_cpu<float>(A, B, C_cpu, A_cols, C_rows, C_cols);
auto t2 = std::chrono::system_clock::now();
if (fabsf(maxDiff<float>(C, C_cpu, C_rows, C_cols)) <= (float)EPSILON)
std::cout << "PASS" << std::endl;
else
std::cout << "FAIL" << std::endl;
auto cpu_time_ms = std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count() / 1000.0f;
cout << "Using Milliseconds Clock:(else above 1000) "<< endl;
cout << " CPU time taken to execute for strassen matrices of size - "
<< matSize << " : " <<cpu_time_ms<<" ms"<< endl;
cout << endl;
fprintf(fp,"\n%s,%d,%lf",algoname,matSize,cpu_time_ms);
}
// std::cout << "Speedup = " << cpu_time_ms / gpu_time_ms << std::endl;
// fprintf(fp, "\n%s,%d,%lf", algoname, matSize, cpu_time_ms);
// Free memory
cudaFree(A);
cudaFree(B);
cudaFree(C);
}
else
{
cout<<"MAX LIMIT"<<endl;
fprintf(fp, "\n%s,%d,%d", algoname, my_list[i], max_limit);
}
}
return 0;
} |
3,530 | #include "includes.h"
__global__ void Fprop2(const float* layer1, const float* syn2, float* out, const int offset)
{
int i = blockDim.x*blockIdx.x + threadIdx.x; //4
//int j = blockIdx.x; //Data.count
int k = blockDim.y*blockIdx.y + threadIdx.y; //256
atomicAdd(&out[i], layer1[256*offset + k] * syn2[k*4 + i]);
} |
3,531 | #include <cmath>
#include <iostream>
#define N 25600
#define M 1024
using namespace std;
#define CHECK(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
cout<< "Error:" << cudaGetErrorString(_m_cudaStat) \
<< " at line " << __LINE__ << " in file " << __FILE__ << "\n"; \
exit(1); \
} }
void generateMatrix(int* matrix, int size) {
srand(time(NULL));
for (size_t i = 0; i < size; ++i) {
matrix[i] = rand() % 100;
}
}
void print(int* matrix, int n, int m) {
for (int i = 0; i < n; ++i) {
for (int j = 0; j < m; ++j) {
cout << matrix[i*m + j] << " ";
}
cout << endl;
}
cout << endl;
}
void compute(int* matrix, int n, int m, int* result) {
for (int i = 0; i < n; ++i) {
result[i] = 0;
int sz = i * m;
for (int j = 1; j < m; ++j) {
if (matrix[sz + j - 1] > matrix[sz + j])
++result[i];
}
}
}
bool checkResult(int* resultCPU, int* resultGPU, int n) {
for(int i = 0; i < N; ++i){
if(resultCPU[i] != resultGPU[i]){
cout << "Wrong in str: " << i << " inCPU: " << resultCPU[i] << " inGPU: " << resultGPU[i] << endl;
return false;
}
}
return true;
}
__global__ void fastComputeCUDA(int* matrix, int n, int m, int* result)
{
int idxStr = threadIdx.x + blockIdx.x*blockDim.x;
if(idxStr >= n) return;
int res = 0;
int temp0 = matrix[idxStr*m];
__shared__ int cache[256][33];
for(int k = 0; k < m/32; ++k){
//32 потока читают одну строчку
for(int s = 0; s < 256/8; ++s){
int row = threadIdx.x / 32 + s*8;
int col = k * 32 + threadIdx.x % 32;
cache[row][threadIdx.x % 32]
= matrix[(blockIdx.x*256 + row) * m + col];
}
__syncthreads();
for(int idx = 0; idx < 32; ++idx){
int tempC = cache[threadIdx.x][idx];
if(temp0 > tempC)
++res;
temp0 = tempC;
}
}
result[idxStr] = res;
}
int main(void) {
float elapsedTimeCUDA, elapsedTimeCPU;
clock_t startCPU;
int* matrixDEVICE;
int* resultDEVICE;
int* resultHOST = new int[N];
int* matrixHOST = new int[N*M];
int* resultCPU = new int[N];
generateMatrix(matrixHOST, N*M);
startCPU = clock();
compute(matrixHOST, N, M, resultCPU);
elapsedTimeCPU = (double)(clock() - startCPU) / CLOCKS_PER_SEC;
cout << "CPU time = " << elapsedTimeCPU * 1000 << " ms\n";
cout << "CPU memory throughput = " << N * M * 4 / elapsedTimeCPU / 1024 / 1024 / 1024 << " Gb/s\n";
cudaEvent_t startCUDA, stopCUDA;
cudaEventCreate(&startCUDA);
cudaEventCreate(&stopCUDA);
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
CHECK(cudaMalloc(&matrixDEVICE, N * M * 4));
CHECK(cudaMemcpy(matrixDEVICE, matrixHOST, N * M * 4, cudaMemcpyHostToDevice));
CHECK(cudaMalloc(&resultDEVICE, N * 4));
cudaEventRecord(startCUDA, 0);
fastComputeCUDA <<<((N + 255)/256), 256 >>> (matrixDEVICE, N, M, resultDEVICE);
cudaEventRecord(stopCUDA, 0);
cudaEventSynchronize(stopCUDA);
CHECK(cudaGetLastError());
cudaEventElapsedTime(&elapsedTimeCUDA, startCUDA, stopCUDA);
CHECK(cudaMemcpy(resultHOST, resultDEVICE, N * 4, cudaMemcpyDeviceToHost));
cout << (checkResult(resultCPU, resultHOST, N) ? "Result is correct" : "Result isn't correct") << endl;
cout << "CUDA time = " << elapsedTimeCUDA << " ms\n";
cout << "CUDA memory throughput = " << N * M * 4 / elapsedTimeCUDA / 1024 / 1024 / 1.024 << " Gb/s\n";
//waitKey(0);
return 0;
}
|
3,532 | #include <thrust/transform.h>
#include <thrust/inner_product.h>
#include <thrust/sequence.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/functional.h>
#include <iostream>
#include <iterator>
#include <algorithm>
void saxpy_slow(float A, thrust::device_vector<float>& X, thrust::device_vector<float>& Y)
{
thrust::device_vector<float> temp(X.size());
thrust::fill(temp.begin(), temp.end(), A);
thrust::transform(X.begin(), X.end(), temp.begin(), temp.begin(), thrust::multiplies<float>());
thrust::transform(temp.begin(), temp.end(), Y.begin(), Y.begin(), thrust::plus<float>());
}
struct saxpy_functor : public thrust::binary_function<float,float,float>
{
const float a;
saxpy_functor(float _a) : a(_a) {}
__host__ __device__
float operator()(const float& x, const float& y) const {
return a * x + y;
}
};
void saxpy_fast(float A, thrust::device_vector<float>& X, thrust::device_vector<float>& Y)
{
thrust::transform(X.begin(), X.end(), Y.begin(), Y.begin(), saxpy_functor(A));
}
extern "C"
__global__ void saxpy ( const float *X, float *Y, float A, int N)
{
int i= threadIdx.x+blockDim.x*blockIdx.x;
int T= blockDim.x*gridDim.x;
#pragma unroll 4
for (; i<N; i+=T)
Y[i] = A*X[i] + Y[i];
}
#ifndef BLK_SZ
#define BLK_SZ 512
#endif
#ifndef THR
#define THR (BLK_SZ*6)
#endif
void saxpy_cuda(float A, thrust::device_vector<float>& X, thrust::device_vector<float>& Y)
{
saxpy <<< (THR-1)/BLK_SZ + 1, BLK_SZ >>> ( thrust::raw_pointer_cast(&(X[0])),
thrust::raw_pointer_cast(&(Y[0])), A, X.size());
}
int main(int argc, char **argv)
{
// Default Size of input vectors
int N=1000000;
// Modify size of input vectors using program argument
if (argc>1) { N = atoi(argv[1]); }
thrust::host_vector<float> x_h(N);
thrust::host_vector<float> y_h(N);
thrust::host_vector<float> y_result(N);
thrust::sequence(x_h.begin(), x_h.end(), 10.0f, 1.5f);
thrust::fill (y_h.begin(), y_h.end(), -2.0f);
thrust::device_vector<float> x(x_h.begin(), x_h.end());
thrust::device_vector<float> y(y_h.begin(), y_h.end());
thrust::transform(x_h.begin(), x_h.end(), y_h.begin(), y_h.begin(), saxpy_functor(2.0f));
saxpy_slow(2.0f, x, y);
y_result = y;
float R = thrust::inner_product( y_h.begin(), y_h.end(), y_result.begin(), 0.0f,
thrust::plus<float>(), thrust::minus<float>());
std::cout << "Saxpy Slow. Sum of Differences is " << R << std::endl;
thrust::transform(x_h.begin(), x_h.end(), y_h.begin(), y_h.begin(), saxpy_functor(2.0f));
saxpy_fast(2.0f, x, y);
y_result = y;
R = thrust::inner_product( y_h.begin(), y_h.end(), y_result.begin(), 0.0f,
thrust::plus<float>(), thrust::minus<float>());
std::cout << "Saxpy Fast. Sum of Differences is " << R << std::endl;
thrust::transform(x_h.begin(), x_h.end(), y_h.begin(), y_h.begin(), saxpy_functor(2.0f));
saxpy_cuda(2.0f, x, y);
y_result = y;
R = thrust::inner_product( y_h.begin(), y_h.end(), y_result.begin(), 0.0f,
thrust::plus<float>(), thrust::minus<float>());
std::cout << "Saxpy CUDA. Sum of Differences is " << R << std::endl;
return 0;
}
|
3,533 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
static cudaStream_t *streams;
// CUDA kernel to pause for at least num_cycle cycles
__global__ void sleep(int64_t num_cycles)
{
int64_t cycles = 0;
int64_t start = clock64();
while(cycles < num_cycles) {
cycles = clock64() - start;
}
}
// Returns number of cycles required for requested seconds
extern "C" int64_t get_cycles(float seconds)
{
// Get device frequency in KHz
int64_t Hz;
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
Hz = int64_t(prop.clockRate) * 1000;
// Calculate number of cycles to wait
int64_t num_cycles;
num_cycles = (int64_t)(seconds * Hz);
return num_cycles;
}
// Create streams
extern "C" void create_streams(int num_streams)
{
// Allocate streams
streams = (cudaStream_t *) malloc((num_streams+1)*sizeof(cudaStream_t));
// Default stream
streams[0] = NULL;
// Primer kernel launch
sleep<<< 1, 1 >>>(1);
// Create streams
for(int i = 1; i <= num_streams; i++)
cudaStreamCreate(&streams[i]);
}
// Launches a kernel that sleeps for num_cycles
extern "C" void sleep_kernel(int64_t num_cycles, int stream_id)
{
// Launch a single GPU thread to sleep
int blockSize, gridSize;
blockSize = 1;
gridSize = 1;
// Execute the kernel
sleep<<< gridSize, blockSize, 0, streams[stream_id] >>>(num_cycles);
}
// Wait for stream to complete
extern "C" void wait_for_stream(int stream_id)
{
cudaStreamSynchronize(streams[stream_id]);
}
// Wait for streams to complete
extern "C" void wait_for_streams(int num_streams)
{
for(int i = 1; i <= num_streams; i++)
cudaStreamSynchronize(streams[i]);
}
// Destroy stream objects
extern "C" void destroy_streams(int num_streams)
{
// Clean up stream
for(int i = 1; i <= num_streams; i++)
cudaStreamDestroy(streams[i]);
free(streams);
}
|
3,534 | #include <stdio.h>
#include <stdlib.h>
#define DATASIZE 64
__global__ void get_average(double *in, double *out, int width)
{
int id=threadIdx.x;
if(id==0||id==width-1)
{
out[id]=in[id];
}
else
{
out[id]=(in[id-1]+in[id]+in[id+1])/3;
}
}
void init( double * input, int width )
{
int i;
for(i=0;i<width;i++)
{
input[i]= rand() % 5;
}
}
int check(double * original, double * averaged, int width)
{
int i;
for(i=1;i<width-1;i++)
{
if(averaged[i]!=(original[i-1]+original[i]+original[i+1])/3)
{
printf("Something goes wrong :(\n");
return -1;
}
}
if(averaged[0]!=original[0]||averaged[width-1]!=original[width-1])
{
printf("Boundary condition are not fine :( \n");
return -1;
}
printf("Correct solution\n");
return 0;
}
int main()
{
double * d_in;
double * d_out;
// Memory allocation at host sid
double * h_in = (double *)malloc(sizeof(double)*DATASIZE);
double * h_out= (double *)malloc(sizeof(double)*DATASIZE);
// CHANGE: Memory allocation at device side
cudaMalloc(&d_in,sizeof(double)* DATASIZE);
cudaMalloc(&d_out,sizeof(double)* DATASIZE);
// create random 1d array
init(h_in,DATASIZE);
// copy from host to device (GPU) init data
cudaMemcpy(d_in,h_in,DATASIZE,cudaMemcpyHostToDevice);
// kernel
get_average<<<1,DATASIZE>>>(d_in,d_out,DATASIZE*sizeof(double));
// CHANGE: copy resulting data from device to host
//cudaMemcpy(h_out,d_out,sizeof(double)*DATASIZE,cudaMemcpyHostToDevice);
cudaMemcpy(h_out,d_out,sizeof(double)*DATASIZE,cudaMemcpyDeviceToHost);
check(h_in,h_out,DATASIZE);
// Free device, host memory
cudaFree(d_in);
// CHANGE:
cudaFree(d_out);
free(h_in);
free(h_out);
} |
3,535 | //
// Created by smallflyfly on 2021/5/18.
//
#include <stdio.h>
#include <cuda_runtime.h>
int main(int argc, char **argv) {
int deviceCount = 0;
cudaError_t errorId = cudaGetDeviceCount(&deviceCount);
if (errorId != cudaSuccess) {
printf("cudaDeviceCount returned %d\n -> %s\n", (int)errorId, cudaGetErrorString(errorId));
printf("Result = FAIL\n");
exit(EXIT_FAILURE);
}
if (deviceCount == 0) {
printf("There are no available device that support CUDA\n");
} else {
printf("Detected %d CUDA Capable device\n", deviceCount);
}
int device = 0, driverVersion = 0, runtimeVersion = 0;
cudaSetDevice(device);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
printf("Device %d:\"%s\"\n", device, deviceProp.name);
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
printf("Driver version / Runtime Version %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10,
runtimeVersion/1000, (runtimeVersion%100)/10);
printf("CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor);
printf("Total amount of global memory: %.2f GBytes\n", (float)deviceProp.totalGlobalMem / pow(1024.0, 3));
printf("GPU clock rate: %.0f MHz (%0.2f GHz)\n", deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f);
printf("Max number of thread per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor);
printf("max number of thread per block: %d\n", deviceProp.maxThreadsPerBlock);
printf("max size of each dims of a block: %d x %d x %d\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]);
printf("max size of each dim of a grid: %d x %d x %d\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]);
exit(EXIT_SUCCESS);
} |
3,536 | /* CUDA finite difference wave equation solver, written by
* Jeff Amelang, 2012
*
* Modified by Kevin Yuh, 2013-14 */
#include <cstdio>
#include <cuda_runtime.h>
#include "Cuda1DFDWave_cuda.cuh"
/*
* Wave solver kernel.
*/
__global__
void
cudaWaveSolverKernel(const float *old_displacements,
const float *current_displacements,
float *new_displacements,
const unsigned int numberOfNodes,
const float courant,
const float left_boundary_value) {
// Get current thread's index.
unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
const float courantSquared = courant * courant;
// Skip the first element (left boundary condition) and set the
// left boundary value.
if (thread_index == 0) {
new_displacements[thread_index] = left_boundary_value;
thread_index += blockDim.x * gridDim.x;
}
// Skip the last element (right boundary condition).
while (thread_index <= numberOfNodes - 2) {
new_displacements[thread_index] =
2 * current_displacements[thread_index] - old_displacements[thread_index]
+ courantSquared * (current_displacements[thread_index + 1]
- 2 * current_displacements[thread_index]
+ current_displacements[thread_index - 1]);
// Update thread_index.
thread_index += blockDim.x * gridDim.x;
}
// Set right boundary value.
if (thread_index == numberOfNodes - 1) {
new_displacements[thread_index] = 0;
}
}
/*
* Helper function to call the kernel.
*/
void cudaCallWaveSolverKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
const float *old_displacements,
const float *current_displacements,
float *new_displacements,
const unsigned int numberOfNodes,
const float courant,
const float left_boundary_value) {
cudaWaveSolverKernel<<<blocks, threadsPerBlock>>>(old_displacements,
current_displacements, new_displacements, numberOfNodes, courant,
left_boundary_value);
}
|
3,537 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda_runtime_api.h>
#define BASE_TYPE float
__global__ void add(BASE_TYPE *a, BASE_TYPE *b, BASE_TYPE *result, const int N)
{
int numElemPerThread = N / blockDim.x;
int k = threadIdx.x * numElemPerThread;
for (int i = k; i < k + numElemPerThread; i++)
{
result[i] = a[i] + b[i];
}
}
BASE_TYPE* gen_array(const int N)
{
BASE_TYPE *a = new BASE_TYPE[N];
for (int i = 0; i < N; i++)
a[i] = rand() % 100;
return a;
}
void print_array(const BASE_TYPE *a, const int N)
{
for (int i = 0; i < N; i++)
printf("%3.0f ", a[i]);
printf("\n");
}
void cuda_init_array(BASE_TYPE **dev, const BASE_TYPE *host, const size_t size)
{
cudaMalloc((void **)dev, size);
if (host != NULL)
cudaMemcpy(*dev, host, size, cudaMemcpyHostToDevice);
}
int main()
{
srand(time(NULL));
const int N = 8;
const size_t size = N * sizeof(BASE_TYPE);
const int block_size = 2;
dim3 threadsPerBlock = dim3(block_size);
dim3 blocksPerGrid = dim3(N / block_size);
BASE_TYPE *host_a = gen_array(N), *host_b = gen_array(N), *host_c = new BASE_TYPE[N];
BASE_TYPE *dev_a, *dev_b, *dev_c;
print_array(host_a, N);
print_array(host_b, N);
cuda_init_array(&dev_a, host_a, size);
cuda_init_array(&dev_b, host_b, size);
cuda_init_array(&dev_c, NULL, size);
add<<<blocksPerGrid, threadsPerBlock>>>(dev_a, dev_b, dev_c, N);
cudaMemcpy(host_c, dev_c, size, cudaMemcpyDeviceToHost);
print_array(host_c, N);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
delete[] host_a;
delete[] host_b;
delete[] host_c;
return 0;
} |
3,538 | /*
* Compile with
* nvcc -arch=sm_11 bitonic_sort.cu
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
/* Every thread gets exactly one value in the unsorted array. */
#define THREADS 512 // 2^9
#define BLOCKS 128 // 2^15
#define NUM_VALS THREADS*BLOCKS
double getElapsedTime(clock_t start, clock_t stop)
{
return ((double) (stop - start)) / CLOCKS_PER_SEC;
//printf("Elapsed time: %.3fs\n", elapsed);
}
void fillArray(float *array, int length)
{
srand(time(NULL));
int i;
for (i = 0; i < length; i++) {
array[i] = (float)rand()/(float)RAND_MAX;
}
}
__global__ void bitonic_sort_step(float *dev_values, int j, int k)
{
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i^j;
/* The threads with the lowest ids sort the array. */
if ((ixj)>i) {
if ((i&k)==0) {
/* Sort ascending */
if (dev_values[i]>dev_values[ixj]) {
/* exchange(i,ixj); */
float temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
if ((i&k)!=0) {
/* Sort descending */
if (dev_values[i]<dev_values[ixj]) {
/* exchange(i,ixj); */
float temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
}
}
/**
* Inplace bitonic sort using CUDA.
*/
void gpuBitonicSort(float *values)
{
float *devValues;
size_t size = NUM_VALS * sizeof(float);
cudaMalloc((void**) &devValues, size);
cudaMemcpy(devValues, values, size, cudaMemcpyHostToDevice);
dim3 blocks(BLOCKS,1); /* Number of blocks */
dim3 threads(THREADS,1); /* Number of threads */
int j, k;
/* Major step */
for (k = 2; k <= NUM_VALS; k <<= 1) {
/* Minor step */
for (j=k>>1; j>0; j=j>>1) {
bitonic_sort_step<<<blocks, threads>>>(devValues, j, k);
}
}
cudaDeviceSynchronize();
cudaMemcpy(values, devValues, size, cudaMemcpyDeviceToHost);
cudaFree(devValues);
}
const int ASCENDING = 1;
const int DESCENDING = 0;
void compare(float *a, int i, int j, int dir)
{
if (dir == (a[i] > a[j])) {
float h=a[i];
a[i]=a[j];
a[j]=h;
}
}
void cpuBitonicMerge(float* values, int lo, int count, int direction)
{
if (count > 1) {
int k = count/2;
for (int i=lo; i<lo+k; i++)
compare(values, i, i+k, direction);
cpuBitonicMerge(values, lo, k, direction);
cpuBitonicMerge(values, lo+k, k, direction);
}
}
void cpuBitonicSort(float *values, int lo, int count, int direction)
{
if (count > 1) {
int k = count/2;
cpuBitonicSort(values, lo, k, ASCENDING);
cpuBitonicSort(values, lo + k, k, DESCENDING);
cpuBitonicMerge(values, lo, count, direction);
}
}
int floatcomp(const void* elem1, const void* elem2)
{
if(*(const float*)elem1 < *(const float*)elem2)
return -1;
return *(const float*)elem1 > *(const float*)elem2;
}
int main(void)
{
clock_t start, stop;
float *array = (float*) malloc( NUM_VALS * sizeof(float));
float *cpuArray = (float*) malloc( NUM_VALS * sizeof(float));
float *gpuArray = (float*) malloc( NUM_VALS * sizeof(float));
fillArray(array, NUM_VALS);
for(int i = 0; i < NUM_VALS; i++) {
cpuArray[i] = array[i];
gpuArray[i] = array[i];
}
start = clock();
gpuBitonicSort(gpuArray);
stop = clock();
printf("\nElapsed time on gpu: %.3fs\n", getElapsedTime(start, stop));
start = clock();
cpuBitonicSort(cpuArray, 0, NUM_VALS, ASCENDING);
stop = clock();
printf("\nElapsed time on cpu: %.3fs\n", getElapsedTime(start, stop));
//std::qsort(array, NUM_VALS, sizeof(float), floatcomp);
for(int i = 0; i < NUM_VALS; i++) {
if(gpuArray[i] != cpuArray[i]){
printf("Error [%d] %f != %f\n", i, gpuArray[i], cpuArray[i]);
}
}
free(array);
free(cpuArray);
free(gpuArray);
}
|
3,539 | __global__ void run_bidding(
const int num_nodes, float *data, int *person2item, float *bids, int *bidders, int *sbids, float *prices, float auction_eps){
int i = blockDim.x * blockIdx.x + threadIdx.x; // person index
if(i < num_nodes){
if(person2item[i] == -1) {
int fir_maxObj = -1;
float fir_maxObjValue = 0;
float sec_maxObjValue = -1000;
float temp_ObjValue = 0;
fir_maxObj = 0;
fir_maxObjValue = data[i] - prices[0];
for(int j = 1; j < num_nodes; j++){
temp_ObjValue = data[i + num_nodes * j] - prices[j];
if(temp_ObjValue > fir_maxObjValue){
sec_maxObjValue = fir_maxObjValue;
fir_maxObj = j;
fir_maxObjValue = temp_ObjValue;
} else if(temp_ObjValue > sec_maxObjValue){
sec_maxObjValue = temp_ObjValue;
}
}
float bid = data[i + num_nodes * fir_maxObj] - sec_maxObjValue + auction_eps;
int idx = atomicAdd(sbids + fir_maxObj, 1);
bids[idx + num_nodes * fir_maxObj] = bid;
bidders[idx + num_nodes * fir_maxObj] = i;
}
}
}
__global__ void run_assignment(
const int num_nodes, int *person2item, int *item2person, float *bids, int *bidders, int *sbids, float *prices, int *num_assigned){
int j = blockDim.x * blockIdx.x + threadIdx.x; // item index
if(j < num_nodes) {
int num_bidders = sbids[j];
if(num_bidders != 0) {
float high_bid = bids[0 + num_nodes * j];
int high_bidder = bidders[0 + num_nodes * j];
float tmp_bid = -1.0;
for(int i = 1; i < num_bidders; i++){
tmp_bid = bids[i + num_nodes * j];
if(tmp_bid > high_bid){
high_bid = tmp_bid;
high_bidder = bidders[i + num_nodes * j];
}
}
int current_person = item2person[j];
if(current_person >= 0){
person2item[current_person] = -1;
} else {
atomicAdd(num_assigned, 1);
}
prices[j] = high_bid;
person2item[high_bidder] = j;
item2person[j] = high_bidder;
}
}
}
|
3,540 | /*
* CSE 5441 : Lab 4 part1
* Filename : biswas_rajarshi_part1.cu
* Author : Rajarshi Biswas (biswas.91@osu.edu)
* The Ohio State University.
*/
#include <iostream>
#include <cstdlib>
#include <time.h>
#include <stdio.h>
using namespace std;
#define SIZE 1024
#define THREADS_X 32
#define THREADS_Y 32
/*
* The kernel function. Runs on the device.
* d_A - Source matrix.
* d_B - Source matrix.
* d_C - Destination matrix.
* dim - Dimension.
*/
__global__ void multiply_on_device (float *d_A, float* d_B, float *d_C, int dim) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = i * dim + j;
float sum = 0;
for (int k = 0; k < dim; k++) {
sum += d_A[i * dim+ k ] * d_B[ k * dim + j ];
}
d_C[index] = sum;
}
/*
* Runs on the host.
* h_A - Source matrix.
* h_B - Source matrix.
* h_C - Destination matrix.
*/
void multiply_on_host(float *h_A, float *h_B, float *h_C) {
float sum = 0;
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
for (int k = 0; k < SIZE; k++) {
sum+= h_A[ i*SIZE+ k ] * h_B[ k*SIZE+j ];
}
h_C[ i*SIZE+j ] = sum;
sum = 0;
}
}
}
/*
* Random float number generator between two numbers.
* a - The lower bound.
* b - The upper bound.
*/
float RandomFloat(float a, float b) {
float random = ((float) rand()) / (float) RAND_MAX;
float diff = b - a;
float r = random * diff;
return a + r;
}
int main() {
struct timespec start, end;
int number_of_elements = SIZE * SIZE;
size_t memSize = SIZE * SIZE * sizeof(float);
// Initialize the host memory.
float* h_A = new float[number_of_elements];
float* h_B = new float[number_of_elements];
// Stores the result of serial computation.
float* h_C1 = new float[number_of_elements];
// Stores the result of CUDA computation.
float* h_C2 = new float[number_of_elements];
// Initialize the matrix.
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
h_A[i * SIZE + j] = RandomFloat(1.0, 2.0);
}
}
// Compute the transpose.
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
h_B[j * SIZE + i] = h_A[i * SIZE + j];
}
}
cout << "\n**************************************************\n";
// Call serial version.
clock_gettime(CLOCK_REALTIME,& start);
multiply_on_host(h_A, h_B, h_C1);
clock_gettime(CLOCK_REALTIME,& end);
double time_taken_serial = ((double)end.tv_sec + 1.0e-9*end.tv_nsec) -
((double)start.tv_sec + 1.0e-9*start.tv_nsec);
// Initialize the device memory.
float* d_A;
float* d_B;
float* d_C;
cudaMalloc( (void**) &d_A, memSize);
cudaMalloc( (void**) &d_B, memSize);
cudaMalloc( (void**) &d_C, memSize);
dim3 threadsPerBlock(THREADS_X, THREADS_Y);
dim3 blocksPerGrid(SIZE/THREADS_X, SIZE/THREADS_Y);
// call the cuda version.
clock_gettime(CLOCK_REALTIME,& start);
// Copy host to device.
cudaMemcpy(d_A, h_A, memSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, memSize, cudaMemcpyHostToDevice);
multiply_on_device <<< blocksPerGrid, threadsPerBlock >>> (d_A, d_B, d_C, SIZE);
// Copy back the result from device to host memory.
cudaMemcpy(h_C2, d_C, memSize, cudaMemcpyDeviceToHost);
clock_gettime(CLOCK_REALTIME,& end);
double time_taken_cuda = ((double)end.tv_sec + 1.0e-9*end.tv_nsec) -
((double)start.tv_sec + 1.0e-9*start.tv_nsec);
// Print the result
cout << "Size of the matrix: "<< SIZE << " x " << SIZE <<"\n";
cout << "Time taken by the serial version: " << time_taken_serial << " seconds\n";
cout << "Time taken by the CUDA version: " << time_taken_cuda << " seconds\n";
cout << "\n**************************************************\n";
// Free host memory.
free(h_A);
free(h_B);
free(h_C1);
free(h_C2);
// Free device memory.
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
3,541 | /***************************************************************************//**
* \file intermediateVelocity.cu
* \author Christopher Minar (minarc@oregonstate.edu)
* \brief kernels to generate the right hand side for the initial velocity solve
*/
#include "intermediateVelocity.h"
/**
* \namespace kernels
* \brief Contains all the custom-written CUDA kernels.
*/
namespace kernels
{
__global__
void zeroInside(int *ghostTags, double *value, int points)
{
if (threadIdx.x + (blockDim.x * blockIdx.x) >= points)
return;
int i = threadIdx.x + (blockDim.x * blockIdx.x);
// if not inside
value[i] = (ghostTags[i] != 0) * value[i];
}
__global__//note dx and dy must be equal and uniform at the point the boundary atm for the second line (forcing for the inside) to work
void updateRHS1_luo_Y(int *hybridTagsUV, int *ghostTagsUV, double *rhs, double *distance_from_intersection_to_node, double *distance_between_nodes_at_IB, double *uv, int nx, int ny)
{
if (threadIdx.x + (blockDim.x * blockIdx.x) >= nx*(ny-1))
return;
int i = threadIdx.x + (blockDim.x * blockIdx.x) + (nx-1)*ny;
// if not outtag & if not in tag rhs if out tag outside interpolation
rhs[i] = (hybridTagsUV[i]==-1) * (ghostTagsUV[i]<=0) * (rhs[i]) + (hybridTagsUV[i]!=-1) * distance_between_nodes_at_IB[i]/(distance_from_intersection_to_node[i]+distance_between_nodes_at_IB[i]) * uv[i];
}
__global__
void updateRHS1_luo_X(int *hybridTagsUV, int *ghostTagsUV, double *rhs, double *distance_from_intersection_to_node, double *distance_between_nodes_at_IB, double *uv, int nx, int ny)
{
if (threadIdx.x + (blockDim.x * blockIdx.x) >= (nx-1)*ny)
return;
int i = threadIdx.x + (blockDim.x * blockIdx.x);
// if not outtag & if not in tag rhs if out tag outside interpolation //flag inside interpolation?
//rhs[i] = (hybridTagsUV[i]==-1) * (ghostTagsUV[i]<=0) * (rhs[i]) + (hybridTagsUV[i]!=-1) * distance_between_nodes_at_IB[i]/(distance_from_intersection_to_node[i]+distance_between_nodes_at_IB[i]) * uv[i];
rhs[i] = (ghostTagsUV[i] == -1) * rhs[i];
}
} // end of namespace kernels
|
3,542 | #include "includes.h"
__global__ void stretch_weights_kernel(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int kernel_size, float scale, int reverse)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
const int kernel_area = kernel_size * kernel_size;
const int i = index * kernel_area;
const int stage_step = (nweights / kernel_area) / 4; // 4 stages
const int stage_id = index / stage_step;
// nweights = (c / groups) * n * size * size;
// kernel_area = size*size
if (i < nweights)
{
if (stage_id == 0) {
// simple copy
for (int x = 0; x < kernel_size; ++x) {
for (int y = 0; y < kernel_size; ++y) {
weight_deform_gpu[x + y*kernel_size + i] = src_weight_gpu[x + y*kernel_size + i];
}
}
}
else if (stage_id > 0)
{
if (stage_id == 1) scale = 0.65;
else if (stage_id == 2) scale = 0.8;
else if (stage_id == 3) scale = 1.3;
if (reverse) scale = 1 / scale;
const int x_c = kernel_size / 2;
const int y_c = kernel_size / 2;
float dropout_sum = 0;
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
// Xsource = x_c + (x_d - x_c) / scale
// Ysource = y_c + (y_d - y_c) / scale
float x_s = x_c + (x - x_c) / scale;
float y_s = y_c + (y - y_c) / scale;
int x_0 = floor(x_s); // round down
int x_1 = ceil(x_s); // round up
if (x_0 == x_1) x_1 = x_0 + 1;
int y_0 = floor(y_s);
int y_1 = ceil(y_s);
if (y_0 == y_1) y_1 = y_0 + 1;
float c_x_0 = x_1 - x_s;
float c_x_1 = x_s - x_0;
float c_y_0 = y_1 - y_s;
float c_y_1 = y_s - y_0;
float val = 0;
if (x_0 >= 0 && x_0 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_0 + y_0*kernel_size + i] * c_x_0 * c_y_0;
else dropout_sum += c_x_0 * c_y_0;
if (x_1 >= 0 && x_1 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_1 + y_0*kernel_size + i] * c_x_1 * c_y_0;
else dropout_sum += c_x_1 * c_y_0;
if (x_0 >= 0 && x_0 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_0 + y_1*kernel_size + i] * c_x_0 * c_y_1;
else dropout_sum += c_x_0 * c_y_1;
if (x_1 >= 0 && x_1 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_1 + y_1*kernel_size + i] * c_x_1 * c_y_1;
else dropout_sum += c_x_1 * c_y_1;
weight_deform_gpu[x + y*kernel_size + i] = val;
}
}
// compensate for dropped items
//const float coef = (kernel_size*kernel_size) / (kernel_size*kernel_size - dropout_sum);
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
//if (scale < 1) weight_deform_gpu[x + y*kernel_size + i] /= scale;// *= coef;
weight_deform_gpu[x + y*kernel_size + i] /= scale;// *= coef;
}
}
}
}
} |
3,543 | #include <bits/stdc++.h>
using namespace std;
#define THREADS_PER_BLOCK 1024//1024
void initData(int* M, int rows, int cols){
for (int i=0; i<rows; i++){
for(int j=0; j<cols; j++){
M[cols * i + j] = 2;
}
}
}
void displayData(int *M, int rows, int cols){
for (int i=0; i<rows; i++){
for(int j=0; j<cols; j++){
cout<< M[cols * i + j]<<" ";
}
cout<<endl;
}
cout<<endl;
}
__global__ void sum(int *a, int *b, int *r, int tam) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if( index < tam){
r[index] = a[index] + b[index];
}
}
void matrixAdd(int *A, int *B, int* R, int rows, int cols){
int *d_A, *d_B, *d_R;
int nElem = rows * cols;
int size = nElem * sizeof(int);
//Allocate device memory for matrices
cudaMalloc((void **) &d_A, size);
cudaMalloc((void **) &d_B, size);
cudaMalloc((void **) &d_R, size);
//Copy B and C to device memory
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
sum<<<(nElem+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_A, d_B, d_R, nElem);//run
cudaMemcpy(R, d_R, size, cudaMemcpyDeviceToHost);
//Free device matrices
cudaFree(d_B);
cudaFree(d_A);
cudaFree(d_R);
}
int run(){
int rows = 10;
int cols = 10;
int nElem = rows * cols;
int * A = (int *) malloc(nElem * sizeof(int));
int * B = (int *) malloc(nElem * sizeof(int));
int * R = (int *) malloc(nElem * sizeof(int));
initData(B, rows, cols);
initData(A, rows, cols);
matrixAdd(A, B, R, rows, cols);
//displayData(A, rows, cols);
//displayData(B, rows, cols);
displayData(R, rows, cols);
free(A); free(B); free(R);
}
int main(){
run();
return 0;
}
|
3,544 | #include "includes.h"
namespace ann {
// CUDA2
}
__global__ void kernel_feedforward( int layer_id, int *l, int *s, int *sw, float *z_arr, float *a_arr, float *w_arr ){
volatile int idx = threadIdx.x + blockDim.x*blockIdx.x;
int neuron_count = l[layer_id];
int neuron_count_prev = l[layer_id-1];
//printf("layer = %d idx = %d count = %d\n", layer_id, idx, neuron_count-1);
if(idx >= neuron_count-1) return;
float z = 0;
for(int k = 0; k < neuron_count_prev; k++){
z += w_arr[sw[layer_id-1] + k*(neuron_count - 1) + idx]*a_arr[s[layer_id-1] + k];
// printf("w_arr[%d] * a_arr[%d] = %.20f\n",
// sw[layer_id-1] + k*(neuron_count - 1) + idx ,
// s[layer_id-1] + k,
// w_arr[sw[layer_id-1] + k*(neuron_count - 1) + idx]*a_arr[s[layer_id-1] + k]);
// printf("%.10f * %.10f = %.10f\n", w_arr[sw[layer_id-1] + k*(neuron_count - 1) + idx ],
// a_arr[s[layer_id-1] + k],
// w_arr[sw[layer_id-1] + k*(neuron_count - 1) + idx]*a_arr[s[layer_id-1] + k]
// );
}
z_arr[s[layer_id] + idx] = z;
float a = 1.0 / (1.0 + expf(-z));
a_arr[s[layer_id] + idx] = a;
// printf("index = %d z = %.5f\n", s[layer_id] + idx, z);
// printf("a = %.20f\n", a);
} |
3,545 | #include "includes.h"
__global__ void WriteClockValues( unsigned int *completionTimes, unsigned int *threadIDs )
{
size_t globalBlock = blockIdx.x+blockDim.x*(blockIdx.y+blockDim.y*blockIdx.z);
size_t globalThread = threadIdx.x+blockDim.x*(threadIdx.y+blockDim.y*threadIdx.z);
size_t totalBlockSize = blockDim.x*blockDim.y*blockDim.z;
size_t globalIndex = globalBlock*totalBlockSize + globalThread;
completionTimes[globalIndex] = clock();
threadIDs[globalIndex] = threadIdx.y<<4|threadIdx.x;
} |
3,546 | #include <iostream>
#include <math.h>
#include <cstdint>
#include <time.h>
#include <cstdio>
#include <stdio.h>
#include <stdlib.h>
void cpuProcess(int n, double *arr){
double localMax = -1;
for (int i = 0; i < n; i ++){
if (arr[i] > localMax) localMax = arr[i];
}
arr[0] = localMax;
}
__global__ void gpuProcess(int n, double *arr){
double localMax = -1;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride){
if (arr[i] > localMax) localMax = arr[i];
}
arr[index] = localMax;
}
int main(int argc, char *argv[]){
clock_t start, diff;
int N = atoi(argv[1]) * 1000000;
double *h_arr = new double[N];
for (int i = 0; i < N; i++) {
double r = rand()/1000000.0;
h_arr[i] = r;
}
start = clock();
cpuProcess(N, h_arr);
diff = (clock() - start) * 1000 / CLOCKS_PER_SEC;
std::cout << "CPU MAX: " << h_arr[0] << std::endl;
printf("Time taken for cpu: %d milliseconds\n\n", diff);
start = clock();
double *d_arr;
cudaMalloc(&d_arr, sizeof(double)*N);
cudaMemcpy(d_arr, h_arr, N*sizeof(double), cudaMemcpyHostToDevice);
diff = (clock() - start) * 1000 / CLOCKS_PER_SEC;
printf("Time taken to copy arr to device: %d milliseconds\n", diff);
int numThreads = N;
int threadsPerBlock = 256;
start = clock();
do {
numThreads = N/16;
if (numThreads == 0) numThreads = 1;
if (numThreads < threadsPerBlock) threadsPerBlock = numThreads;
int numBlocks = (numThreads + threadsPerBlock - 1)/threadsPerBlock;
gpuProcess<<<numBlocks, threadsPerBlock>>>(N, d_arr);
std::cout << "Launching " << numThreads << " threads: " << numBlocks << " blocks and " << threadsPerBlock << " threads/block" << std::endl;
cudaDeviceSynchronize();
N = numBlocks * threadsPerBlock;
} while(numThreads > 1);
cudaMemcpy(h_arr, d_arr, 1*sizeof(double), cudaMemcpyDeviceToHost);
std::cout << "GPU MAX: " << h_arr[0] << std::endl;
diff = (clock() - start) * 1000 / CLOCKS_PER_SEC;
printf("Time taken for gpu: %d milliseconds\n", diff);
// Free memory
cudaFree(d_arr);
return 0;
} |
3,547 | #include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include<stdlib.h>
#include<stdio.h>
#define min(a,b) (a<b?a:b)
const int threadsPerBlock = 256;
int blocksPerGrid = 32;
__global__ void reduce(float *data, float *output, int N){
__shared__ float scratch[threadsPerBlock];
int global_index = threadIdx.x + blockIdx.x * blockDim.x;
int local_index = threadIdx.x;
//if(global_index >= N) return;
float soma_bloco = 0;
while (global_index < N) {
soma_bloco += data[global_index];
global_index += blockDim.x * gridDim.x;
}
scratch[local_index] = soma_bloco;
__syncthreads();
//Redução paralela
int i = blockDim.x/2;
while (i != 0) {
if (local_index < i && local_index+i < N )
scratch[local_index] += scratch[local_index + i];
__syncthreads();
i /= 2;
}
if (local_index == 0)
output[blockIdx.x] = scratch[0];
}
float soma_seq(float *x, int tamanho){
int i;
float sum=0;
for(i=0;i<tamanho;i++) {
sum+=x[i];
}
return sum;
}
int main(int argc, char * argv[])
{
const int N = atoi(argv[1]);
blocksPerGrid = min(32, (N+threadsPerBlock-1) / threadsPerBlock);
float *a, *b;
int size = N;
float *dev_a, *dev_b; int *dev_size;
a = (float*)malloc(N*sizeof(float));
b = (float*)malloc(N*sizeof(float));
cudaMalloc( (void**)&dev_a, N * sizeof(float) );
cudaMalloc( (void**)&dev_b, N * sizeof(float) );
cudaMalloc( (void**)&dev_size, sizeof(int) );
for (int i=0; i<N; i++ ){
a[i]= 1.0f; b[i] = 0;
}
float valor_seq = soma_seq(a, N);
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMemcpy (dev_a,a, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy (dev_b,b, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy (dev_size, &size, 1*sizeof(int), cudaMemcpyHostToDevice);
reduce<<<blocksPerGrid, threadsPerBlock>>>(dev_a, dev_b, size);
cudaMemcpy(b, dev_b, blocksPerGrid*sizeof(float), cudaMemcpyDeviceToHost);
float soma = 0;
//Segundo estágio da redução
for (int i=0; i<blocksPerGrid; i++) {
soma += b[i];
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("%.10f\n", time);
if(soma != valor_seq)
printf("Soma incorreta\n");
//printf("Soma: %f\n", soma);
cudaFree(dev_a);
cudaFree(dev_b);
return 0;
}
|
3,548 | #include "memory.h"
#define USE_CUDA_MEMALLOC 0
#define USE_WC 1
void memalloc_host(int imgWidthF, int imgHeightF, unsigned char **input,
int imgWidth, int imgHeight, unsigned char **output)
{
cudaError_t cuda_ret;
#if USE_CUDA_MEMALLOC == 0
*input = (unsigned char *)malloc(3*imgWidthF*imgHeightF*sizeof(unsigned char));
*output = (unsigned char *)malloc(3*imgWidth*imgHeight*sizeof(unsigned char));
#else
#if USE_WC == 0
cuda_ret = cudaHostAlloc(input, 3*imgWidthF*imgHeightF*sizeof(unsigned char), cudaHostAllocDefault);
#else
cuda_ret = cudaHostAlloc(input, 3*imgWidthF*imgHeightF*sizeof(unsigned char), cudaHostAllocWriteCombined);
#endif
cuda_ret = cudaHostAlloc(output, 3*imgWidth*imgHeight*sizeof(unsigned char), cudaHostAllocDefault);
#endif
}
void memfree_host(unsigned char **input, unsigned char **output)
{
cudaError_t cuda_ret;
#if USE_CUDA_MEMALLOC == 0
free(*input);
free(*output);
#else
cuda_ret = cudaFree(input);
cuda_ret = cudaFree(output);
#endif
} |
3,549 | /*
# compile
$ nvcc -o matrix_dot_product matrix_dot_product.cu
# numpy counterpart
import numpy as np
m1 = np.array(((0, 1, 2), (3, 4, 5), (6, 7, 8)))
m2 = np.array(((3, 4), (5, 6), (7, 8)))
m1.dot(m2)
*/
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
// kernel of product of two matrices: m1 x m2
__global__
void kDot(const int nThreads, const float *m1, const float *m2, float *output, const int m1_rows , const int m1_columns, const int m2_columns ){
/* Computes the product of two matrices: m1 x m2.
Inputs:
m1: array, left matrix of size m1_rows x m1_columns
m2: array, right matrix of size m1_columns x m2_columns (the number of rows in the right matrix
must be equal to the number of the columns in the left one)
output: array, the results of the computation are to be stored here:
m1 * m2, product of two arrays m1 and m2, a matrix of size m1_rows x m2_columns
m1_rows: int, number of rows in the left matrix m1
m1_columns: int, number of columns in the left matrix m1
m2_columns: int, number of columns in the right matrix m2
*/
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
int r = (int)i / m2_columns; //
int c = i % m2_columns;
float t_output = 0.f;
for( int k = 0; k < m1_columns; ++k ) {
t_output += m1[ r * m1_columns + k ] * m2[ k * m2_columns + c ];
}
output[i] = t_output;
}
}
// product of two matrices: m1 x m2
// output is m1_rows x m2_columns
// __device__
void dDot(const float *m1, const float *m2, float *output, const int m1_rows , const int m1_columns, const int m2_columns ){
kDot <<< m1_rows, m2_columns >>> (m1_rows * m2_columns, m1, m2, output, m1_rows , m1_columns, m2_columns );
cudaDeviceSynchronize();
//return output;
}
int main(void)
{
// host initialization
const int M1_SIZE = 9; // 3x3 matrix
const int M1_BYTES = M1_SIZE * sizeof(float);
const int M2_SIZE = 6; // 3x2 matrix
const int M2_BYTES = M2_SIZE * sizeof(float);
const int PRODUCT_SIZE = 6;
const int PRODUCT_BYTES = PRODUCT_SIZE * sizeof(float);
float h_m1[M1_SIZE];
for (int i = 0; i < M1_SIZE; i++)
{
h_m1[i] = float(i); // 0, 1, .. 9
}
float h_m2[M2_SIZE] = {3., 4., 5., 6., 7., 8.};
float h_out[PRODUCT_SIZE];
// GPU
float *d_m1, *d_m2;
float *d_out;
cudaMalloc((void**) &d_m1, M1_BYTES);
cudaMalloc((void**) &d_m2, M2_BYTES);
cudaMalloc((void**) &d_out, PRODUCT_BYTES);
cudaMemcpy(d_m1, h_m1, M1_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_m2, h_m2, M2_BYTES, cudaMemcpyHostToDevice);
dDot(d_m1, d_m2, d_out, 3, 3, 2);
cudaMemcpy(h_out, d_out, PRODUCT_BYTES, cudaMemcpyDeviceToHost);
// print result
for (int i = 0; i < PRODUCT_SIZE; i++)
{
printf("h_out[%d] = %f\n", i, h_out[i]);
}
// free memory
cudaFree(d_m1);
cudaFree(d_m2);
cudaFree(d_out);
// free(h_m1);
// free(h_m2);
// free(h_out);
} |
3,550 | #include <stdio.h>
#include <iostream>
#include <math.h>
__global__
void stream_triad(int n, int scalar, float *x, float *y, float *z)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
z[i] = scalar * x[i] + y[i];
}
int main(int argc, char **argv)
{
int N = 1<<20;
int scalar = 3;
float *x, *y, *z;
// Allocate Unified Memory -- accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
cudaMallocManaged(&z, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
z[i] = 0.0f;
}
// Launch kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
stream_triad<<<numBlocks, blockSize>>>(N, scalar, x, y, z);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 5.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(z[i]-5.0f));
FILE *fp;
fp = fopen("result_stream_triad.txt","a+");
fprintf(fp, "StreamTriad: Max error = %f\n", maxError);
fclose(fp);
// Free memory
cudaFree(x);
cudaFree(y);
cudaFree(z);
return 0;
}
|
3,551 | #include <cuda_runtime.h>
__global__ void empty() {}
void benchmark_gpu_init() {
cudaFree(0);
empty<<<1,1>>>();
cudaDeviceSynchronize();
}
|
3,552 | #include "includes.h"
__global__ void add(int* in, int d, int n){
int gid = threadIdx.x + blockIdx.x * blockDim.x;
if(gid >= n) return ;
int pre = (d==0) ? 1 : (2<<(d-1));
if(gid >= pre) {
in[gid] += in[gid-pre];
}
} |
3,553 | /*
nvcc flagg.cu -o flagg
file = open("noise.bin", "wb");
file.write(np.random.normal(0.,10.,2000000,dtype=np.float32));
file.close()
*/
#include <iostream>
#include <math.h>
#define NCHANS 1000 // # of channels -- assume no more than 2048 channels for now, see blinest call in main{}
#define NANTS 2000 // # of antennas
#define NFILT 101 // filter size -- need to be odd
__device__
void swap(float *p,float *q) {
float t;
t=*p;
*p=*q;
*q=t;
}
/*median value for NCHANS-long array*/
__device__
float medchans(float *a) {
int i,j;
float tmp[NCHANS] = {0};
for (i = 0; i < NCHANS; i++)
tmp[i] = a[i];
for(i = 0; i < NCHANS-1; i++) {
for(j = 0; j < NCHANS-i-1; j++) {
if(tmp[j] > tmp[j+1])
swap(&tmp[j],&tmp[j+1]);
}
}
return tmp[(int)((NCHANS+1)/2-1)];
}
/*computes MAD for one spectrum*/
__device__
float mad(float *a) {
int i;
float med;
float dev[NCHANS];
med = medchans(a);
for (i = 0; i < NCHANS; i++)
dev[i] = abs(a[i]-med);
return 1.4826*medchans(dev);
}
/*median value for NFILT-long array + manages edge effects*/
__device__
float medval(float *a, int idx, int n) {
int i,j;
float tmp[NFILT] = {0};
if (idx < (NFILT-1)/2+1) {
for (i=idx+(NFILT-1)/2+1; i<NFILT; i++)
tmp[i] = a[0];
for (i = 0; i<idx+(NFILT-1)/2+1; i++)
tmp[i] = a[i];
}
else if(idx > n - ((NFILT-1)/2+1)) {
for (i=n-(idx-(NFILT-1)/2); i<NFILT; i++)
tmp[i] = a[n-1];
for (i = idx-(NFILT-1)/2; i<n; i++)
tmp[i-(idx-(NFILT-1)/2)] = a[i];
}
else{
for (i = idx-(NFILT-1)/2; i<idx+(NFILT-1)/2+1; i++)
tmp[i-(idx-(NFILT-1)/2)] = a[i];
}
for(i = 0; i<NFILT-1;i++) {
for(j = 0;j < NFILT-i-1;j++) {
if(tmp[j] > tmp[j+1])
swap(&tmp[j],&tmp[j+1]);
}
}
return tmp[(NFILT+1)/2-1];
}
__global__
void blinest(float *d_data, float *d_bline) {
if (threadIdx.x < NCHANS/4){
int idx = blockIdx.x * NCHANS/4 + threadIdx.x;
int nAnt = (int)(blockIdx.x/4);
int nSam = (blockIdx.x % 4) * NCHANS/4 + threadIdx.x;
d_bline[idx] = medval(&d_data[nAnt*NCHANS], nSam, NCHANS);
}
__syncthreads();
}
__global__
void blincorr(float *d_data, float *d_bline) {
if (threadIdx.x < NCHANS/4){
int idx = blockIdx.x * NCHANS/4 + threadIdx.x;
d_data[idx] = d_data[idx] - d_bline[idx];
}
__syncthreads();
}
__global__
void flagg(float *d_data, float *d_flags, float dThres)
{
int i;
int nAnt = blockIdx.x;
float mv;
mv = mad(&d_data[nAnt*NCHANS]);
//printf("antenns %d : sigma = %f\n", nAnt, mv);
for(i = nAnt*NCHANS; i < (nAnt+1)*NCHANS; i++) // possible to write kernel to compute flags over blocks and threads
if (d_data[i] > dThres*mv || d_data[i] < -dThres*mv)
d_flags[i] = 1;
}
int main(void)
{
int N = NANTS*NCHANS; // size of 1 time sample, autocorrelations only
float dThres = 6.;
float *x = (float *)malloc(N*sizeof(float)); // data in (autocorrelations)
float *fl_data = (float *)malloc(N*sizeof(float)); // data out (corrected data)
float *d_data; // input data on device
cudaMalloc((void **)&d_data, N*sizeof(float));
float *h_bline = (float *)malloc(N*sizeof(float)); // baseline
float *d_bline; // baseline
cudaMalloc((void **)&d_bline, N*sizeof(float));
float *h_flags = (float *)malloc(N*sizeof(float)); // data out (corrected data)
float *d_flags; // flags on device
cudaMalloc((void **)&d_flags, N*sizeof(float));
cudaMemset(d_flags, 0., N*sizeof(float));
/*disk files management*/
FILE *ptr;
FILE *write_ptr;
FILE *write_flg;
FILE *write_bl;
ptr = fopen("noise.bin","rb");
write_ptr = fopen("output.bin","wb");
write_flg = fopen("flags.bin","wb");
write_bl = fopen("bline.bin","wb");
int rd;
rd = fread(x,N,sizeof(float),ptr);
/*copy data onto GPU*/
cudaMemcpy(d_data, x, N*sizeof(float), cudaMemcpyHostToDevice);
/*FLAG DATA*/
blinest<<<NANTS*4, NCHANS/4>>>(d_data, d_bline);
cudaDeviceSynchronize();
blincorr<<<NANTS*4, NCHANS/4>>>(d_data, d_bline);
cudaDeviceSynchronize();
flagg<<<NANTS, 1>>>(d_data, d_flags, dThres);
cudaDeviceSynchronize();
/*copy back to CPU and write to disk*/
cudaMemcpy(fl_data, d_data, N*sizeof(float), cudaMemcpyDeviceToHost);
fwrite(fl_data,N,sizeof(float),write_ptr);
cudaMemcpy(h_flags, d_flags, N*sizeof(float), cudaMemcpyDeviceToHost);
fwrite(h_flags,N,sizeof(float),write_flg);
cudaMemcpy(h_bline, d_bline, N*sizeof(float), cudaMemcpyDeviceToHost);
fwrite(h_bline,N,sizeof(float),write_bl);
/*Free memory*/
free(x);
free(fl_data);
free(h_bline);
free(h_flags);
cudaFree(d_data);
cudaFree(d_bline);
cudaFree(d_flags);
fclose(ptr);
fclose(write_ptr);
fclose(write_flg);
fclose(write_bl);
return 0;
}
|
3,554 | #include <stdio.h>
__global__
void transposeReadRow(float* out, float* in, int rowCount, int columnCount) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int idy = (blockIdx.y * blockDim.y) + threadIdx.y;
if (idx < columnCount && idy < rowCount) {
out[idx*columnCount+idy] = in[idy*rowCount+idx];
}
}
__global__
void transposeReadColumn(float* out, float* in, int rowCount, int columnCount) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int idy = (blockIdx.y * blockDim.y) + threadIdx.y;
if (idx < columnCount && idy < rowCount) {
out[idy*rowCount+idx] = in[idx*columnCount+idy];
}
}
__global__
void transposeReadRowUnwrap8(float* out, float* in, int rowCount, int columnCount) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int idy = (blockIdx.y * blockDim.y) + threadIdx.y;
int x = idy*rowCount+idx;
int y = idx*columnCount+idx;
if (idx + 7 * blockDim.x < columnCount && idy < rowCount) {
out[y] = in[x];
out[y+rowCount*blockDim.x] = in[x+blockDim.x];
out[y+2*rowCount*blockDim.x] = in[x+2*blockDim.x];
out[y+3*rowCount*blockDim.x] = in[x+3*blockDim.x];
out[y+4*rowCount*blockDim.x] = in[x+4*blockDim.x];
out[y+5*rowCount*blockDim.x] = in[x+5*blockDim.x];
out[y+6*rowCount*blockDim.x] = in[x+6*blockDim.x];
out[y+7*rowCount*blockDim.x] = in[x+7*blockDim.x];
}
}
__global__
void transposeReadColumnUnwrap8(float* out, float* in, int rowCount, int columnCount) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int idy = (blockIdx.y * blockDim.y) + threadIdx.y;
int x = idy*rowCount+idx;
int y = idx*columnCount+idx;
if (idx + 7 * blockDim.x < columnCount && idy < rowCount) {
out[x] = in[y];
out[x+blockDim.x] = in[y+rowCount*blockDim.x];
out[x+2*blockDim.x] = in[y+2*rowCount*blockDim.x];
out[x+3*blockDim.x] = in[y+3*rowCount*blockDim.x];
out[x+4*blockDim.x] = in[y+4*rowCount*blockDim.x];
out[x+5*blockDim.x] = in[y+5*rowCount*blockDim.x];
out[x+6*blockDim.x] = in[y+6*rowCount*blockDim.x];
out[x+7*blockDim.x] = in[y+7*rowCount*blockDim.x];
}
}
__global__
void transposeDiagonalReadRow(float* out, float* in, int rowCount, int columnCount) {
int blockX = (blockIdx.x + blockIdx.y) % gridDim.x;
int blockY = blockIdx.x;
int idx = (blockX * blockDim.x) + threadIdx.x;
int idy = (blockY * blockDim.y) + threadIdx.y;
if (idx < columnCount && idy < columnCount) {
out[idx*columnCount+idy] = in[idy*rowCount+idx];
}
}
__global__
void transposeDiagonalReadColumn(float* out, float* in, int rowCount, int columnCount) {
int blockX = (blockIdx.x + blockIdx.y) % gridDim.x;
int blockY = blockIdx.x;
int idx = (blockX * blockDim.x) + threadIdx.x;
int idy = (blockY * blockDim.y) + threadIdx.y;
if (idx < columnCount && idy < columnCount) {
out[idy*rowCount+idx] = in[idx*columnCount+idy];
}
}
void transpose(float* out, float* in, int rowCount, int columnCount) {
for (int y = 0; y < columnCount; y++) {
for (int x = 0; x < rowCount; x++) {
out[x*columnCount+y] = in[y*rowCount+x];
}
}
}
bool checkMatrix(float* matrixA, float* matrixB, int rowCount, int columnCount) {
for (int x = 0; x < rowCount * columnCount; x++) {
if (matrixA[x] != matrixB[x]) {
return false;
}
}
return true;
}
void printMatrix(float* matrix, int rowCount, int columnCount) {
for (int y = 0; y < columnCount; y++) {
for (int x = 0; x < rowCount; x++) {
printf("%-3d ", int(matrix[y*rowCount+x]));
}
printf("\n");
}
}
int main(void) {
printf("\n");
int rowCount = 64;
int columnCount = 64;
dim3 block(32,32);
dim3 grid((columnCount+block.x-1)/block.x, (rowCount+block.y-1)/block.y);
dim3 gridUnwrap8((columnCount+(block.x/8)-1)/(block.x/8), (rowCount+block.y-1)/block.y);
float* h_matrix = (float*)malloc(rowCount*columnCount*sizeof(float));
for (int x = 0; x < rowCount * columnCount; x++) { h_matrix[x] = x; }
float* h_transpose_matrix = (float*)malloc(rowCount*columnCount*sizeof(float));
float* expected_matrix = (float*)malloc(rowCount*columnCount*sizeof(float));
transpose(expected_matrix, h_matrix, rowCount, columnCount);
float* d_matrix;
cudaMalloc((float**)&d_matrix, rowCount*columnCount*sizeof(float));
cudaMemcpy(d_matrix, h_matrix, rowCount*columnCount*sizeof(float), cudaMemcpyHostToDevice);
float* d_transpose_matrix;
cudaMalloc((float**)&d_transpose_matrix, rowCount*columnCount*sizeof(float));
transposeReadRow<<<grid,block>>>(d_transpose_matrix, d_matrix, rowCount, columnCount);
cudaDeviceSynchronize();
cudaMemcpy(h_transpose_matrix, d_transpose_matrix, rowCount*columnCount*sizeof(float), cudaMemcpyDeviceToHost);
printf("%-30s: %d\n", "transposeReadRow", checkMatrix(expected_matrix, h_transpose_matrix, rowCount, columnCount));
transposeReadColumn<<<grid,block>>>(d_transpose_matrix, d_matrix, rowCount, columnCount);
cudaDeviceSynchronize();
cudaMemcpy(h_transpose_matrix, d_transpose_matrix, rowCount*columnCount*sizeof(float), cudaMemcpyDeviceToHost);
printf("%-30s: %d\n", "transposeReadColumn", checkMatrix(expected_matrix, h_transpose_matrix, rowCount, columnCount));
transposeReadRowUnwrap8<<<gridUnwrap8,block>>>(d_transpose_matrix, d_matrix, rowCount, columnCount);
cudaDeviceSynchronize();
cudaMemcpy(h_transpose_matrix, d_transpose_matrix, rowCount*columnCount*sizeof(float), cudaMemcpyDeviceToHost);
printf("%-30s: %d\n", "transposeReadRowUnwrap8", checkMatrix(expected_matrix, h_transpose_matrix, rowCount, columnCount));
transposeReadColumnUnwrap8<<<gridUnwrap8,block>>>(d_transpose_matrix, d_matrix, rowCount, columnCount);
cudaDeviceSynchronize();
cudaMemcpy(h_transpose_matrix, d_transpose_matrix, rowCount*columnCount*sizeof(float), cudaMemcpyDeviceToHost);
printf("%-30s: %d\n", "transposeReadColumnUnwrap8", checkMatrix(expected_matrix, h_transpose_matrix, rowCount, columnCount));
transposeDiagonalReadRow<<<gridUnwrap8,block>>>(d_transpose_matrix, d_matrix, rowCount, columnCount);
cudaDeviceSynchronize();
cudaMemcpy(h_transpose_matrix, d_transpose_matrix, rowCount*columnCount*sizeof(float), cudaMemcpyDeviceToHost);
printf("%-30s: %d\n", "transposeDiagonalReadRow", checkMatrix(expected_matrix, h_transpose_matrix, rowCount, columnCount));
transposeDiagonalReadColumn<<<gridUnwrap8,block>>>(d_transpose_matrix, d_matrix, rowCount, columnCount);
cudaDeviceSynchronize();
cudaMemcpy(h_transpose_matrix, d_transpose_matrix, rowCount*columnCount*sizeof(float), cudaMemcpyDeviceToHost);
printf("%-30s: %d\n", "transposeDiagonalReadColumn", checkMatrix(expected_matrix, h_transpose_matrix, rowCount, columnCount));
cudaFree(d_matrix);
cudaFree(d_transpose_matrix);
free(h_matrix);
free(h_transpose_matrix);
free(expected_matrix);
cudaDeviceReset();
printf("\n");
return 0;
} |
3,555 | /**
File name: graph_to_csr.cu
Author: Yuede Ji
Last update: 15:52 10-09-2015
Description: convert current normal graph file to scr and begin position stored file
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define N 65536 // vertex number
//Using arrays to implement queue
/**
char filein[] = "/home/yuede/dataset/kron_16_16.dat";
char fileout[] = "/home/yuede/dataset/kron_16_16.cpu.as.result";
char file_beg_pos[] = "/home/yuede/dataset/kron_16_16.beg.pos";
char file_csr[] = "/home/yuede/dataset/kron_16_16.csr";
char file_v_e[] = "/home/yuede/dataset/kron_16_16.v_e";
**/
char filein[] = "/home/yuede/dataset/kron_10_4.dat";
char fileout[] = "/home/yuede/dataset/kron_10_4.cpu.as.result";
char file_beg_pos[] = "/home/yuede/dataset/kron_10_4.beg.pos";
char file_csr[] = "/home/yuede/dataset/kron_10_4.csr";
char file_v_e[] = "/home/yuede/dataset/kron_10_4.v_e";
const int INF = 0x7FFFFFFF;
int v_num = 0;
int e_num = 0;
void empty_file(char * filename)
{
FILE * fp = fopen(filename, "w");
fclose(fp);
}
//load from .dat files, and store in array csr[N*N], beg_pos[N]
void csr_begin(char *filename)
{
empty_file(file_beg_pos);
empty_file(file_csr);
FILE * fp_in = fopen(filein, "r");
FILE * fp_csr = fopen(file_csr, "a");
FILE * fp_beg_pos = fopen(file_beg_pos, "a");
int v, n;//v denotes current vertex, n denotes no. of adjacent node
int j = 0;// j denotes the index in csr[N*N];
int begin = 0;
fprintf(fp_beg_pos, "%d\n", begin);
while(fscanf(fp_in, "%d%d", &v, &n)!=EOF)
{
//printf("%d %d\n", v, n);
begin += n;
fprintf(fp_beg_pos, "%d\n", begin);
for(int i=0; i<n; ++i)
{
fscanf(fp_in, "%d", &j);
fprintf(fp_csr, "%d\n", j);
}
}
fclose(fp_beg_pos);
fclose(fp_csr);
e_num = begin+1;
v_num = v+1;
printf("v_num = %d, e_num = %d\n", v_num, e_num);
FILE *fp_v_e = fopen(file_v_e, "w");
fprintf(fp_v_e, "%d %d\n", v_num, e_num);
fclose(fp_v_e);
}
int main()
{
csr_begin(filein);
return 0;
}
|
3,556 | /* CSCI 563 Programming Assignment 2
Clayton Kramp
*/
#include <iostream>
#include <fstream>
using namespace std;
// Main Device Function to be used to count number of ones
__global__ void countOnes(int* A, int* count, int row, int col) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= row || j >= col) return;
if (A[i * col + j] == 1) {
// Atomic addition for race conditions
atomicAdd(count, 1);
}
}
int main(int argc, char* argv[]) {
if (argc != 2) {
cerr << "Arguments error" << endl;
return -1;
}
ifstream file(argv[1]);
if (!file.good()) {
cerr << "Bad input" << endl;
return -1;
}
int row, col;
file >> col >> row;
int** A = new int*[row];
A[0] = new int[row*col];
for (int i = 1; i < row; i++) A[i] = A[i-1] + col;
// Fill in Host Array A
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
int element;
file >> element;
A[i][j] = element;
}
}
file.close();
int* count = new int;
*count = 0;
// Copy memory to device array deviceA
int* deviceA;
int bytes = row * col * sizeof(int);
cudaMalloc(&deviceA, bytes);
cudaMemcpy(deviceA, A[0], bytes, cudaMemcpyHostToDevice);
// Copy deviceCount
int* deviceCount;
cudaMalloc(&deviceCount, 4);
cudaMemcpy(deviceCount, count, 4, cudaMemcpyHostToDevice);
dim3 threadsPerBlock(8, 8, 1);
dim3 numBlocks((col + threadsPerBlock.x-1) / threadsPerBlock.x,
(row + threadsPerBlock.y-1) / threadsPerBlock.y, 1);
// Launch the program
countOnes<<<numBlocks, threadsPerBlock>>>(deviceA, deviceCount, row, col);
//cudaDeviceSynchronize();
// Copy back from device the deviceCount
cudaMemcpy(count, deviceCount, 4, cudaMemcpyDeviceToHost);
cout << *count << endl;
delete A[0];
delete A;
cudaFree(deviceA);
cudaFree(deviceCount);
return 0;
}
|
3,557 | #include "includes.h"
__global__ void unmarshalling(int *input_itemsets, int *tmp, int max_rows, int max_cols)
{
int i, j;
i = blockIdx.y*blockDim.y+threadIdx.y;
j = blockIdx.x*blockDim.x+threadIdx.x;
if( i >= max_rows || j >= max_cols) return;
if( (i+j) < max_rows) {
input_itemsets[i*max_cols+j] = tmp[(i+j)*max_cols+j];
}
else {
input_itemsets[i*max_cols+j] = tmp[(i+j)*max_cols+j-(i+j-max_rows+1)];
}
} |
3,558 | #include "includes.h"
__global__ void shiftRightPixels(int16_t *bayImg, size_t width, size_t height, int bppMult)
{
int2 pixelCoord = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
if (pixelCoord.x < width && pixelCoord.y < height)
{
bayImg[pixelCoord.y * width + pixelCoord.x] >>= bppMult;
}
} |
3,559 | //
// Created by Sowmya Parameshwara on 11/10/16.
//
/**
*
* 1) Input is stored by transposing the matrix, so that the attributes of a column are stored in a single row. This
* will optimise the algorithm since all threads in a block will access nearby elements, while normalising.
* 2) Each row is normalised at a time for calculating standardscore, the calculated values are stored in output matrix by transposing.
* 3) Number of threads in a block is set as 16 (This value determined by checking performance for different values). The number of blocks
* is decided based on matrix size "N" and number of threads.
* 4) The contents of a row are divided among the blocks. In each block,Each thread populates one elements of the block into shared data.
* We then calculate partial sum without divergence, on the data stored in shared memory.
* 5) Once all blocks compute partial sum, we launch a kernel function on a single block by passing the calculated values from the previous step.
* This will calculate the final sum and final squared sum. To this final block we ensure the size of the partial sum array passed equals
the next nearest power of 2 of "the number of blocks", as partial sum algorithm works only for powers of 2.
* 6) The above data is used to calculate standard deviation for that row using the formula ((totalSquareSum + N*powf(mean, 2.0) - 2 * mean * totalSum)/(float)N)
* 7) The above value is used to calculate standard score for every element in that row.
* 8) The above step repeats for every row, calculating the standard score for all elements in the row.
*
* Steps to compile and execute on Jarvis :
* 1) qlogin -q interactive.q (Launches interactive session).
* 2) nvcc matrixNorm.cu -o matrixNorm (Compile code on jarvis).
* 3) cd hw4 (Code is available here).
* 4) ./matrixNorm 15000 4 <Argument 1 : Size of matrix, Argument 2 : Random seed value>
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <sys/types.h>
#include <sys/times.h>
#include <sys/time.h>
#include <time.h>
#include <cuda_runtime.h>
/* Program Parameters */
#define MAXN 15000 /* Max value of N */
int N; /* Matrix size */
/* Matrices */
volatile float A[MAXN][MAXN], B[MAXN][MAXN];
/* junk */
#define randm() 4|2[uid]&3
/* Prototype */
void matrixNorm();
/* returns a seed for srand based on the time */
unsigned int time_seed() {
struct timeval t;
struct timezone tzdummy;
gettimeofday(&t, &tzdummy);
return (unsigned int)(t.tv_usec);
}
/* Set the program parameters from the command-line arguments */
void parameters(int argc, char **argv) {
int seed = 0; /* Random seed */
char uid[32]; /*User name */
/* Read command-line arguments */
srand(time_seed()); /* Randomize */
if (argc == 3) {
seed = atoi(argv[2]);
srand(seed);
printf("Random seed = %i\n", seed);
}
if (argc >= 2) {
N = atoi(argv[1]);
if (N < 1 || N > MAXN) {
printf("N = %i is out of range.\n", N);
exit(0);
}
}
else {
printf("Usage: %s <matrix_dimension> [random seed]\n",
argv[0]);
exit(0);
}
/* Print parameters */
printf("\nMatrix dimension N = %i.\n", N);
}
/* Initialize A and B*/
void initialize_inputs() {
int row, col;
printf("\nInitializing...\n");
for (row = 0; row < N; row++) {
for (col = 0; col < N; col++) {
A[row][col] = (float)rand() / 32768.0;
B[row][col] = 0.0;
}
}
}
/* Print input matrices */
void print_inputs() {
int row, col;
if (N < 10) {
printf("\nA =\n\t");
for (col = 0; col < N; col++) {
for (row = 0; row < N; row++) {
printf("%5.2f%s", A[row][col], (row < N-1) ? ", " : ";\n\t");
}
}
}
}
void print_B() {
int row, col;
if (N < 10) {
printf("\nB =\n\t");
for (row = 0; row < N; row++) {
for (col = 0; col < N; col++) {
printf("%1.10f%s", B[row][col], (col < N-1) ? ", " : ";\n\t");
}
}
}
}
int main(int argc, char **argv) {
/* Timing variables */
struct timeval etstart, etstop; /* Elapsed times using gettimeofday() */
struct timezone tzdummy;
clock_t etstart2, etstop2; /* Elapsed times using times() */
unsigned long long usecstart, usecstop;
struct tms cputstart, cputstop; /* CPU times for my processes */
/* Process program parameters */
parameters(argc, argv);
initialize_inputs();
/* Print input matrices */
print_inputs();
/* Start Clock */
printf("\nStarting clock.\n");
gettimeofday(&etstart, &tzdummy);
etstart2 = times(&cputstart);
/* Gaussian Elimination */
matrixNorm();
/* Stop Clock */
gettimeofday(&etstop, &tzdummy);
etstop2 = times(&cputstop);
printf("Stopped clock.\n");
usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec;
usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec;
/* Display output */
print_B();
/* Display timing results */
printf("\nElapsed time = %g ms.\n",
(float)(usecstop - usecstart)/(float)1000);
printf("(CPU times are accurate to the nearest %g ms)\n",
1.0/(float)CLOCKS_PER_SEC * 1000.0);
printf("My total CPU time for parent = %g ms.\n",
(float)( (cputstop.tms_utime + cputstop.tms_stime) -
(cputstart.tms_utime + cputstart.tms_stime) ) /
(float)CLOCKS_PER_SEC * 1000);
printf("My system CPU time for parent = %g ms.\n",
(float)(cputstop.tms_stime - cputstart.tms_stime) /
(float)CLOCKS_PER_SEC * 1000);
printf("My total CPU time for child processes = %g ms.\n",
(float)( (cputstop.tms_cutime + cputstop.tms_cstime) -
(cputstart.tms_cutime + cputstart.tms_cstime) ) /
(float)CLOCKS_PER_SEC * 1000);
/* Contrary to the man pages, this appears not to include the parent */
printf("--------------------------------------------\n");
exit(0);
}
/**
* Method to calculate the partial sum without divergence in all the blocks.
*/
__global__ void block_sum(const float *hostInput, float *sumResults, float *squareResults, const size_t n)
{
__shared__ float sharedSumData[1024];
__shared__ float sharedSquareData[1024];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int tx = threadIdx.x;
float x = 0;
if(i < n) {
x = hostInput[i];
}
sharedSumData[tx] = x;
sharedSquareData[tx] = x*x;
__syncthreads();
// block-wide reduction in _shared_ mem
for(int offset = blockDim.x / 2;
offset > 0;
offset >>= 1)
{
if(tx < offset)
{
sharedSumData[tx] += sharedSumData[tx + offset];
sharedSquareData[tx] += sharedSquareData[tx + offset];
}
__syncthreads();
}
// finally, thread 0 writes the calculated result of this block
if(threadIdx.x == 0)
{
// note that the result is per-block
// not per-thread
sumResults[blockIdx.x] = sharedSumData[0];
squareResults[blockIdx.x] = sharedSquareData[0];
}
}
/**
* Method to calculate the sum of the results calculated from all the blocks in the previous step.
*/
__global__ void single_block_reduction(float *sumResults, float *squareResults, const size_t n)
{
__shared__ float sharedSumData[256];
__shared__ float sharedSquareData[256];
int tx = threadIdx.x;
if(tx < n) {
sharedSumData[tx] = sumResults[tx];
sharedSquareData[tx] = squareResults[tx];
}
__syncthreads();
// block-wide reduction in _shared_ mem
for(int offset = n/2;
offset > 0;
offset >>= 1)
{
if(tx < offset)
{
sharedSumData[tx] += sharedSumData[tx + offset];
sharedSquareData[tx] += sharedSquareData[tx + offset];
}
__syncthreads();
}
// finally, thread 0 writes the calculated result
if(threadIdx.x == 0)
{
// note that the result is per-block
// not per-thread
sumResults[0] = sharedSumData[0];
squareResults[0] = sharedSquareData[0];
}
}
float* sum (float *hostInput, size_t n, float *deviceMatrixA, float *sumResults, float *squareResults,int numOfThreadsPerBlock,int numOfBlocks,int next) {
cudaMemcpy( deviceMatrixA, hostInput, sizeof(float) * N, cudaMemcpyHostToDevice );
/* dim3 dimGrid(numOfBlocks, 1, 1);
dim3 dimBlock(numOfThreadsPerBlock, 1, 1);*/
block_sum<<<numOfBlocks,numOfThreadsPerBlock>>> (deviceMatrixA, sumResults, squareResults, n);
/* float *sumResultsInput = 0;
float *squareResultsInput = 0;
cudaMalloc((void**)&sumResultsInput, sizeof(float) * next);
cudaMemset(sumResultsInput, 0.0, sizeof(float) * next);
cudaMemcpy(sumResultsInput, sumResults, sizeof(float) * numOfBlocks , cudaMemcpyDeviceToDevice );
cudaMalloc((void**)&squareResultsInput, sizeof(float) * next);
cudaMemset(squareResultsInput, 0.0, sizeof(float) * next);
cudaMemcpy(squareResultsInput, squareResults, sizeof(float) * numOfBlocks , cudaMemcpyDeviceToDevice ); */
single_block_reduction<<<1,numOfBlocks>>>(sumResults,squareResults,next);
float * results = (float *)malloc(sizeof(float) * 2);
cudaMemcpy(&results[0], &sumResults[0], sizeof(float) , cudaMemcpyDeviceToHost );
cudaMemcpy(&results[1], &squareResults[0], sizeof(float) , cudaMemcpyDeviceToHost );
return results;
}
void matrixNorm() {
int row, col;
float mu, sigma;
printf("Computing Parallely.\n");
size_t numOfThreadsPerBlock = 16;
size_t numOfBlocks = N/numOfThreadsPerBlock + (((N)%numOfThreadsPerBlock) ? 1 : 0);
int next = pow(2, ceil(log(numOfBlocks)/log(2)));
float *sumResults = 0;
cudaMalloc((void**)&sumResults, sizeof(float) * (next));
cudaMemset(sumResults, 0.0, sizeof(float) * next);
float *squareResults = 0;
cudaMalloc((void**)&squareResults, sizeof(float) * (next));
cudaMemset(squareResults, 0.0, sizeof(float) * next);
float *deviceMatrixA = 0;
cudaMalloc( (void**)&deviceMatrixA, sizeof(float) * N);
for (row=0; row < N; row++) {
mu = 0.0;
float *hostResults;
hostResults = sum ((float *)A[row], N, deviceMatrixA, sumResults, squareResults,numOfThreadsPerBlock,numOfBlocks,next);
mu = hostResults[0] / (float) N;
sigma = (hostResults[1] + N*powf(mu, 2.0) - 2 * mu * hostResults[0])/(float)N;
for (col=0; col < N; col++) {
if (sigma == 0.0) {
B[col][row] = 0.0;
} else {
B[col][row] = (A[row][col] - mu) / sigma;
}
}
}
cudaFree(sumResults);
cudaFree(squareResults);
cudaFree(deviceMatrixA);
}
|
3,560 | #include <cuda_runtime.h>
#include <stdio.h>
#include <assert.h>
#define ROUND8_(x) (((x)+7)&~7)
#define panic(fmt, ...) { printf(fmt, __VA_ARGS__); asm("trap;"); }
#ifdef __cplusplus
#define __BEGIN_DECLS extern "C" {
#define __END_DECLS }
#else
#define __BEGIN_DECLS
#define __END_DECLS
#endif
__BEGIN_DECLS;
#ifndef _WIN64
#define MALLOCSIZETYPE long long int // must be x64 size for malloc alignment
#else
#define MALLOCSIZETYPE long long int
#endif
__device__ void *malloc_(size_t size) {
assert(size > 0);
size = ROUND8_(size);
MALLOCSIZETYPE *p = (MALLOCSIZETYPE *)malloc(sizeof(MALLOCSIZETYPE) + size);
if (p)
p[0] = size;
else panic("failed to allocate %u bytes of memory", size);
return (void *)(p + 1);
}
__device__ void free_(void *ptr) {
if (!ptr) return;
MALLOCSIZETYPE *p = (MALLOCSIZETYPE *)ptr;
free(p - 1);
}
__device__ void *realloc_(void *ptr, size_t size) {
assert(size > 0);
size = ROUND8_(size);
MALLOCSIZETYPE *p = (MALLOCSIZETYPE *)malloc(sizeof(MALLOCSIZETYPE) + size);
if (p)
p[0] = size;
else panic("failed to allocate %u bytes of memory", size);
if (ptr) {
MALLOCSIZETYPE *p2 = (MALLOCSIZETYPE *)ptr;
size_t ptrSize = (size_t)*(p2 - 1);
if (ptrSize) memcpy(p + 1, p2, ptrSize);
free(p2 - 1);
}
return (void *)(p + 1);
}
__device__ size_t _msize_(void *ptr) {
MALLOCSIZETYPE *p2 = (MALLOCSIZETYPE *)ptr;
return (size_t)*(p2 - 1);
}
__END_DECLS;
|
3,561 | #include <stdio.h>
#include <stdexcept>
#include <algorithm>
#include <cuda_runtime.h>
#include <iostream>
#include <vector>
#include <stdlib.h>
#include <time.h>
void saxpySerial(int n, float alpha, float *x, float *y) {
for ( int i = 0 ; i<n ; ++i ){
y[i] = alpha*x[i] + y[i];
}
}
__global__
void saxpyParallel(int n, float alpha, float *x, float *y) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if ( i<n ) y[i] = alpha*x[i] + y[i];
}
int main(int argc, char* argv[]){
std::cout<< "> Starting "<<std::endl;
float *x, *y, *d_x, *d_y;
int N = 10;
if (argc>1) {
N = atoi(argv[1]);
}
std::cout<<"> N = "<<N<<std::endl;
x = (float *) malloc(N*sizeof(float));
y = (float *) malloc(N*sizeof(float));
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
for (int i = 0 ; i<N ; i++){
x[i]=3.0;
y[i]=4.0;
}
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
int nblocks = (N+255) / 256;
clock_t tiempoInicio, tiempoFinal;
double resultado;
tiempoInicio = clock();
saxpyParallel<<<nblocks, 256>>>(N, 2.0f, d_x, d_y);
tiempoFinal = clock();
resultado = (double) (tiempoFinal-tiempoInicio)/ CLOCKS_PER_SEC;
printf("> Total time Paralell: %lf s\n", resultado);
tiempoInicio = clock();
saxpySerial(N, 2.0, x, y);
tiempoFinal = clock();
std::cout<< "> Finished <"<<std::endl;
resultado = (double) (tiempoFinal-tiempoInicio)/ CLOCKS_PER_SEC;
printf("> Total time Serial: %lf s\n", resultado);
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
return 0;
}
|
3,562 | /**
* @file processQuatJulEle_linux.cu
*
* Calculate julia sets for quaternions.
*
*/
/** Work out which piece of the global array this thread should operate on */
__device__ size_t calculateGlobalIndex() {
// Which block are we?
size_t const globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
// Which thread are we within the block?
size_t const localThreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
// How big is each block?
size_t const threadsPerBlock = blockDim.x*blockDim.y;
// Which thread are we overall?
return localThreadIdx + globalBlockIndex*threadsPerBlock;
}
/**
Iterate for one point and return its count
*/
__device__ unsigned int doIterations( double const xPart0,
double const yPart0,
double const zPart0,
double const wPart0,
double const cx,
double const cy,
double const cz,
double const cw,
unsigned int const maxIters ) {
// Initialise: z = z0
double xPart = xPart0;
double yPart = yPart0;
double zPart = zPart0;
double wPart = wPart0;
unsigned int count = 0;
// Loop until escaped. Check Quaternion magnitude smaler than 4
while ( ( count <= maxIters )
&& ((xPart*xPart + yPart*yPart + zPart*zPart + wPart*wPart) <= 16.0) ) {
++count;
// Update: z = z*z + z0;
double const oldXPart = xPart;
double const oldYPart = yPart;
double const oldZPart = zPart;
double const oldWPart = wPart;
// Quat mult and add constant
xPart = oldXPart*oldXPart-oldYPart*oldYPart-oldZPart*oldZPart-oldWPart*oldWPart + cx;
yPart = oldXPart*oldYPart+oldYPart*oldXPart-oldZPart*oldWPart+oldWPart*oldZPart + cy;
zPart = oldXPart*oldZPart+oldYPart*oldWPart+oldZPart*oldXPart-oldWPart*oldYPart + cz;
wPart = oldXPart*oldWPart-oldYPart*oldZPart+oldZPart*oldYPart+oldWPart*oldXPart + cw;
//xPart = xPart*xPart - yPart*yPart + xPart0;
//yPart = 2.0*oldRealPart*yPart + yPart0;
}
return count;
}
/** Main entry point.
* Read data from arraybuffer and call doIterations for every point.
*/
__global__ void processMandelbrotElement(
double * out,
const double * x,
const double * y,
const double * z,
const double * w,
const double cx,
const double cy,
const double cz,
const double cw,
const unsigned int maxIters,
const unsigned int numel ) {
// Work out which thread we are
size_t const globalThreadIdx = calculateGlobalIndex();
// If we're off the end, return now
if (globalThreadIdx >= numel) {
return;
}
// Get our X, Y, Z, W coords
double const xPart0 = x[globalThreadIdx];
double const yPart0 = y[globalThreadIdx];
double const zPart0 = z[globalThreadIdx];
double const wPart0 = w[globalThreadIdx];
// Run the itearations on this location
unsigned int const count = doIterations( xPart0, yPart0, zPart0, wPart0, cx, cy, cz, cw, maxIters );
out[globalThreadIdx] = double( count );
}
|
3,563 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<iostream>
using namespace std;
int main()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
printf("Device name:%s\n", deviceProp.name);
printf("Multiprocessor count:%d\n", deviceProp.multiProcessorCount);
printf("Total global memory:%u\n", deviceProp.totalGlobalMem);
printf("Memory clock rate:%d\n", deviceProp.memoryClockRate);
printf("Clock rate:%d\n", deviceProp.clockRate);
printf("Memory bus width:%d\n", deviceProp.memoryBusWidth);
scanf("");
system("pause");
} |
3,564 |
// Babak Poursartip
// 09/15/2020
// udemy CUDA
//
#include <cstdio>
// =================================
__global__ void func(){
}
// =================================
int main(){
printf(" starts ... \n");
cudaDeviceReset();
printf(" finished. \n");
return 0;
}
|
3,565 | #include "includes.h"
__global__ void kernel( void ) {
} |
3,566 |
#ifndef __CUDACC__
#define __CUDACC__
#endif
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include <string.h>
#define WEI 11
#define ITEN 5
void inserirPeso(int vet[]);
void inserirValor(int vet[]);
void info(int tam, int pes[], int val[], int n);
cudaError_t mochilaWithCuda(int *mochila, const int *peso, const int *valor);
__global__ void mochilaKernel(int *mochila, const int *peso, const int *valor,const int wei,const int iten)
{
int i;
int w = threadIdx.x + 1;
if (w<wei+1){
for (i = 1; i<iten + 1; i++){
if (peso[i]>w){
mochila[i*(wei + 1) + w] = mochila[(i - 1)*(wei + 1) + w];
}
else{
if (mochila[(i - 1)*(wei + 1) + w] > valor[i] + mochila[(i - 1)*(wei + 1) + w - peso[i]]){
mochila[i*(wei + 1) + w] = mochila[(i - 1)*(wei + 1) + w];
}
else{
mochila[i*(wei + 1) + w] = valor[i] + mochila[(i - 1)*(wei + 1) + w - peso[i]];
}
}
}
}
}
int main()
{
//declarao do peso limite da mochila, o numero de itens e variavel auxiliar
int i, j, w;
//declao do peso e valor de cada item, e da matriz mochila
int peso[ITEN], valor[ITEN];
int *mochila;
mochila = (int*)malloc((ITEN + 1)*(WEI + 1)*sizeof(int));
for (i = 0; i < (ITEN + 1)*(WEI + 1); i++) mochila[i] = 0;
//inseres dos itens com seus respectivos valores
inserirPeso(peso);
inserirValor(valor);
info(WEI, peso, valor, ITEN);
//Pseudo Codigo Transcrito
// Add vectors in parallel.
cudaError_t cudaStatus = mochilaWithCuda(mochila,peso,valor);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
printf("\n");
//Imprimindo a matriz
for (i = 0; i<ITEN + 1; i++) {
for (j = 0; j<WEI + 1; j++) {
printf("%d ", mochila[(i*(WEI + 1)) + j]);
}
printf("\n"); // para pular linha quando terminar a coluna
}
printf("\n");
printf("\n");
printf("Valor maximo da mochila: %d\n", mochila[ITEN*(WEI + 1) + WEI]);
free(mochila);
system("pause");
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t mochilaWithCuda(int *host_mochila, const int *host_peso, const int *host_valor)
{
int *dev_mochila = 0;
int *dev_peso = 0;
int *dev_valor = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_mochila,(ITEN + 1)*(WEI + 1)*sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_peso, ITEN * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_valor, ITEN * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_mochila, host_mochila, (ITEN + 1)*(WEI + 1)* sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_valor, host_valor, ITEN * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_peso, host_peso , ITEN * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
mochilaKernel<<<1, WEI>>>(dev_mochila, dev_peso, dev_valor, WEI, ITEN);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(host_mochila, dev_mochila, (ITEN+1)*(WEI+1)* sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_mochila);
cudaFree(dev_valor);
cudaFree(dev_peso);
return cudaStatus;
}
void inserirPeso(int vet[]){
vet[1] = 1;
vet[2] = 2;
vet[3] = 5;
vet[4] = 6;
vet[5] = 7;
}
void inserirValor(int vet[]){
vet[1] = 1;
vet[2] = 6;
vet[3] = 18;
vet[4] = 22;
vet[5] = 28;
}
void info(int tam, int pes[], int val[], int n){
int i;
printf("=========================================================\n");
printf(" *Dados da mochila* \n");
printf("\n");
printf("Capacidade total da mochila: %d\n", tam);
printf("Numero de itens: %d itens", n);
printf("\n");
printf("Valor de cada item: ");
for (i = 1; i<n + 1; i++){
printf("%d ", val[i]);
}
printf("\n");
printf("Peso de cada item: ");
for (i = 1; i<n + 1; i++){
printf("%d ", pes[i]);
}
printf("\n=========================================================\n");
}
|
3,567 | #include "includes.h"
__global__ void matrix_multiply_tiling_cuda(int* A, int* B, int* C, int m, int n) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = n * blockDim.y * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + n - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = blockDim.x;
// Index of the first sub-matrix of B processed by the block
int bBegin = blockDim.x * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = blockDim.y * m;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
int Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
// Suppose to be As[blockDim.y][blockDim.x] but need dynamic allocation
// For simplicity, use a macro here
__shared__ int As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
// Suppose to be Bs[blockDim.x][blockDim.y] but need dynamic allocation
// For simplicity, use a macro here
__shared__ int Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + n * ty + tx];
Bs[ty][tx] = B[b + m * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < blockDim.x; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = m * blockDim.y * by + blockDim.x * bx;
C[c + m * ty + tx] = Csub;
} |
3,568 | // Corresponding header file: /include/square_ops.h
#include <cuda_runtime.h>
#include <float.h>
float *d_x, *d_y, *d_logY;
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
float* const redChannel,
float* const greenChannel,
float* const blueChannel)
{
int absolute_image_position_x = blockDim.x * blockIdx.x + threadIdx.x;
int absolute_image_position_y = blockDim.y * blockIdx.y + threadIdx.y;
if ( absolute_image_position_x >= numCols || absolute_image_position_y >= numRows )
return ;
int thread_1D_pos = absolute_image_position_y * numCols + absolute_image_position_x;
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
__global__
void rgb_to_xyY(
float* d_r,
float* d_g,
float* d_b,
float* d_x,
float* d_y,
float* d_log_Y,
float delta,
int num_pixels_y,
int num_pixels_x )
{
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x;
if ( image_index_2d.x < nx && image_index_2d.y < ny )
{
float r = d_r[ image_index_1d ];
float g = d_g[ image_index_1d ];
float b = d_b[ image_index_1d ];
float X = ( r * 0.4124f ) + ( g * 0.3576f ) + ( b * 0.1805f );
float Y = ( r * 0.2126f ) + ( g * 0.7152f ) + ( b * 0.0722f );
float Z = ( r * 0.0193f ) + ( g * 0.1192f ) + ( b * 0.9505f );
float L = X + Y + Z;
float x = X / L;
float y = Y / L;
float log_Y = log10f( delta + Y );
d_x[ image_index_1d ] = x;
d_y[ image_index_1d ] = y;
d_log_Y[ image_index_1d ] = log_Y;
}
}
__global__ void kernel_scan(int* d_bins, int size)
{
int index = blockDim.x*blockIdx.x+threadIdx.x;
if(index >= size)
return;
int temp;
if(index > 0)
{
temp = d_bins[index - 1];
}
else
{
temp = 0;
}
__syncthreads();
d_bins[index] = temp;
__syncthreads();
int val = 0;
for(int s=1; s<=size; s*=2)
{
int a = index-s;
val = 0;
if(a>=0)
val = d_bins[a];
__syncthreads();
if(a>=0)
d_bins[index] += val;
__syncthreads();
}
}
__global__ void kernel_histo(const float* d_in, int* d_bins, float min,float max,int size, int numBins)
{
int index = blockDim.x*blockIdx.x+threadIdx.x;
if(index<size)
{
int a = ((d_in[index] - min)/(max-min))* numBins;
atomicAdd(&d_bins[a], 1);
}
}
__global__ void kernel_maxmin(float* d_in, float*d_out, int size, int maxmin)
{
int tid = threadIdx.x;
int x = blockDim.x * blockIdx.x + threadIdx.x;
extern __shared__ float shared[];
if(x>=size)
return ;
if(x<size)
shared[tid] = d_in[x];
else
{
if(maxmin == 0)
shared[tid] = FLT_MAX;
else
shared[tid] = -FLT_MAX;
}
__syncthreads();
for(int s=1; s<blockDim.x; s++)
{
if(tid % (2*s) == 0)
{
if(s+tid < blockDim.x)
if(maxmin == 0)
shared[tid] = min(shared[tid], shared[tid+s]);
else
shared[tid] = max(shared[tid], shared[tid+s]);
}
__syncthreads();
}
__syncthreads();
if(tid == 0)
d_out[blockIdx.x] = shared[0];
}
__global__ void tonemap(
float* d_x,
float* d_y,
float* d_log_Y,
float* d_cdf_norm,
float* d_r_new,
float* d_g_new,
float* d_b_new,
float min_log_Y,
float max_log_Y,
float log_Y_range,
int num_bins,
int num_pixels_y,
int num_pixels_x )
{
int ny = num_pixels_y;
int nx = num_pixels_x;
int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x;
if ( image_index_2d.x < nx && image_index_2d.y < ny )
{
float x = d_x[ image_index_1d ];
float y = d_y[ image_index_1d ];
float log_Y = d_log_Y[ image_index_1d ];
int bin_index = min( num_bins - 1, int( (num_bins * ( log_Y - min_log_Y ) ) / log_Y_range ) );
float Y_new = d_cdf_norm[ bin_index ];
float X_new = x * ( Y_new / y );
float Z_new = ( 1 - x - y ) * ( Y_new / y );
float r_new = ( X_new * 3.2406f ) + ( Y_new * -1.5372f ) + ( Z_new * -0.4986f );
float g_new = ( X_new * -0.9689f ) + ( Y_new * 1.8758f ) + ( Z_new * 0.0415f );
float b_new = ( X_new * 0.0557f ) + ( Y_new * -0.2040f ) + ( Z_new * 1.0570f );
d_r_new[ image_index_1d ] = r_new;
d_g_new[ image_index_1d ] = g_new;
d_b_new[ image_index_1d ] = b_new;
}
}
__global__
void recombineChannels(const float* const redChannel,
const float* const greenChannel,
const float* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
__global__ void normalize_cdf(
unsigned int* d_input_cdf,
float* d_output_cdf,
int n
)
{
const float normalization_constant = 1.f / d_input_cdf[n - 1];
int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x;
if ( global_index_1d < n )
{
unsigned int input_value = d_input_cdf[ global_index_1d ];
float output_value = input_value * normalization_constant;
d_output_cdf[ global_index_1d ] = output_value;
}
}
void findmaxmin(const float* d_in, float &ans,int size, int maxmin)
{
int block_size = 1024;
int curr_size = size;
float *d_curr_in;
cudaMalloc(&d_curr_in, size * sizeof(float));
cudaMemcpy(d_curr_in, d_in, size*sizeof(float), cudaMemcpyDeviceToDevice);
float *d_out;
while(1)
{
int newSize = curr_size/block_size +1;
cudaMalloc(&d_out, sizeof(float) * newSize);
dim3 threads(block_size);
dim3 blocks(newSize);
if(maxmin == 0)
kernel_maxmin<<<blocks, threads, sizeof(float)*block_size>>>(d_curr_in, d_out, curr_size, 0);
else
kernel_maxmin<<<blocks, threads, sizeof(float)*block_size >>>(d_curr_in, d_out, curr_size, 1);
cudaMalloc(&d_curr_in, sizeof(float)*newSize);
cudaMemcpy(d_curr_in, d_out, sizeof(float)* newSize, cudaMemcpyDeviceToDevice);
if(newSize == 1)
break;
curr_size = newSize;
}
cudaMemcpy(&ans, d_out, sizeof(float), cudaMemcpyDeviceToHost);
}
void preprocess(uchar4* d_image, float** d_luminance, unsigned int** d_cdf, size_t &numRows, size_t &numCols, unsigned int *numberOfBins)
{
float *d_red, *d_blue, *d_green;
int numBins = 1024;
dim3 blockSize(32,32,1);
const dim3 gridSize(numRows/blockSize.x+1, numCols/blockSize.y+1,1);
cudaMalloc(&d_red, sizeof( float) * numRows * numCols);
cudaMalloc(&d_blue, sizeof(float) * numRows * numCols);
cudaMalloc(&d_green, sizeof( float) * numRows * numCols);
separateChannels<<<gridSize, blockSize>>>(d_image, numRows, numCols, d_red, d_blue, d_green);
cudaMalloc(&d_x, sizeof( float) * numRows * numCols);
cudaMalloc(&d_y, sizeof(float) * numRows * numCols);
cudaMalloc(&d_logY, sizeof(float) * numRows * numCols);
rgb_to_xyY<<<gridSize, blockSize>>>(d_red, d_blue, d_green, d_x, d_y, d_logY, .0001f, numRows, numCols) ;
*d_luminance = d_logY;
//allocate memory for the cdf of the histogram
*numberOfBins = numBins;
(cudaMalloc(&d_cdf, sizeof(unsigned int) * numBins));
(cudaMemset(d_cdf, 0, sizeof(unsigned int) * numBins));
}
void toneMap(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
findmaxmin(d_logLuminance, min_logLum, numRows*numCols, 0);
findmaxmin(d_logLuminance, max_logLum, numRows*numCols, 1);
int *d_bins;
cudaMalloc(&d_bins, sizeof(int)*numBins);
cudaMemset(d_bins, 0, sizeof(int)*numBins);
int size = numRows*numCols;
dim3 threads(1024,1,1);
dim3 blocks((size/threads.x)+1,1,1);
kernel_histo<<<blocks, threads>>>(d_logLuminance, d_bins, min_logLum, max_logLum, size, numBins);
dim3 scan_blocks(numBins/threads.x +1);
kernel_scan<<<scan_blocks, threads>>>(d_bins, numBins);
cudaMemcpy(d_cdf, d_bins, sizeof(int) * numBins, cudaMemcpyDeviceToDevice);
}
void postProcess(uchar4* d_out, const float* const d_logLuminance,
unsigned int* const d_cdf,
size_t numRows, size_t numCols,
float min_log_Y, float max_log_Y)
{
int numBins = 1024;
const int numPixels = numRows * numCols;
const int numThreads = 192;
float *d_cdf_normalized;
cudaMalloc(&d_cdf_normalized, sizeof(float) * numBins);
normalize_cdf<<< (numBins + numThreads - 1) / numThreads,
numThreads>>>(d_cdf,
d_cdf_normalized,
numBins);
float log_Y_range = max_log_Y - min_log_Y;
const dim3 blockSize(32, 16, 1);
const dim3 gridSize( (numCols + blockSize.x - 1) / blockSize.x,
(numRows + blockSize.y - 1) / blockSize.y );
//next perform the actual tone-mapping
//we map each luminance value to its new value
//and then transform back to RGB space
float *d_red, *d_blue, *d_green;
cudaMalloc(&d_red, sizeof( float) * numRows * numCols);
cudaMalloc(&d_blue, sizeof(float) * numRows * numCols);
cudaMalloc(&d_green, sizeof(float) * numRows * numCols);
tonemap<<<gridSize, blockSize>>>(d_x, d_y, d_logY,
d_cdf_normalized,
d_red, d_green, d_blue,
min_log_Y, max_log_Y,
log_Y_range, numBins,
numRows, numCols);
recombineChannels<<<gridSize, blockSize>>>(d_red, d_blue, d_green, d_out, numRows, numCols);
}
uchar4* hdr_ops(uchar4* d_image, size_t numRows, size_t numCols)
{
float *d_luminance;
unsigned int *d_cdf;
unsigned int numBins;
float min_logLum, max_logLum;
min_logLum = 0.f;
max_logLum = 1.f;
preprocess(d_image, &d_luminance, &d_cdf, numRows, numCols, &numBins);
toneMap(d_luminance, d_cdf, min_logLum, max_logLum, numRows, numCols, numBins);
uchar4* d_out;
cudaMalloc(&d_out, sizeof(uchar4) * numRows * numCols);
postProcess(d_out, d_luminance, d_cdf, numRows, numCols, min_logLum, max_logLum);
return d_out;
}
|
3,569 | #include "cuda_runtime.h"
#include <stdio.h>
#include <time.h>
const int Row = 2 * 2;
const int Col = 2 * 2;
//2维网格2维线程块 最常用
__global__ void VectorAdd(float** a, float** b, float** c) {
int thread_x_id = blockIdx.x * blockDim.x + threadIdx.x; //x是线程块的x做行 线程的x做列
int thread_y_id = blockIdx.y * blockDim.y + threadIdx.y; //y是线程块的y做行 线程的y做列
//printf("blockIdx.y/x: %d/%d\tblockDim.y/x: %d/%d\tthreadIdx.y/x: %d/%d\tthread_y/x: %d/%d\n", blockIdx.y, blockIdx.x, blockDim.y, blockDim.x, threadIdx.y, threadIdx.x, thread_y_id, thread_x_id);
if (thread_x_id < Col && thread_y_id < Row) {
c[thread_y_id][thread_x_id] = a[thread_y_id][thread_x_id] + b[thread_y_id][thread_x_id];
}
}
int main() {
int start = clock();
//二维指针 指向一维指针(数据指针)
float* a[Row] = { NULL };
float* b[Row] = { NULL };
float* c[Row] = { NULL };
float data_a[Row * Col] = { 0.0 };
float data_b[Row * Col] = { 0.0 };
float data_c[Row * Col] = { 0.0 };
//二维指针 指向一维指针(数据指针)
float** device_a = NULL;
float** device_b = NULL;
float** device_c = NULL;
float* device_data_a = NULL;
float* device_data_b = NULL;
float* device_data_c = NULL;
//分配显存
cudaMalloc((void**)&device_a, sizeof(float*) * Row);
cudaMalloc((void**)&device_b, sizeof(float*) * Row);
cudaMalloc((void**)&device_c, sizeof(float*) * Row);
cudaMalloc((void**)&device_data_a, sizeof(float) * Row * Col);
cudaMalloc((void**)&device_data_b, sizeof(float) * Row * Col);
cudaMalloc((void**)&device_data_c, sizeof(float) * Row * Col);
for (int i = 0; i < Row * Col; i++) {
data_a[i] = i;
data_b[i] = i;
}
//主机二级指针存放着设备一级指针(数据指针)的地址
//再通过cudacopy 传给设备二级指针 让设备二级指针和设备一级指针关联(数据指针)
//而设备一级指针已经通过主机一级指针cudacopy了
for (int i = 0; i < Row; i++) {
a[i] = device_data_a + i * Col;
b[i] = device_data_b + i * Col;
c[i] = device_data_c + i * Col;
}
//将内存中a和b数组的值复制到GPU中显存中
cudaMemcpy(device_a, a, sizeof(float*) * Row, cudaMemcpyHostToDevice);
cudaMemcpy(device_b, b, sizeof(float*) * Row, cudaMemcpyHostToDevice);
cudaMemcpy(device_c, c, sizeof(float*) * Row, cudaMemcpyHostToDevice);
cudaMemcpy(device_data_a, data_a, sizeof(float) * Row * Col, cudaMemcpyHostToDevice);
cudaMemcpy(device_data_b, data_b, sizeof(float) * Row * Col, cudaMemcpyHostToDevice);
//一个kernel函数由一个gpu的一个grid执行
//调用核函数 cpu调用 gpu运行
dim3 dim_block(16, 32); //一个线程块block包含 512个线程threads(最多不超过512个)
dim3 dim_grid((Col + dim_block.x - 1) / dim_block.x, (Row + dim_block.y - 1) / dim_block.y); //一个grid网格包含n / 512个线程块blocks(为了充分利用sm blocks尽可能多)
VectorAdd<<<dim_grid, dim_block>>>(device_a, device_b, device_c);
//1级指针设备 给1级指针主机 赋值
cudaMemcpy(data_c, device_data_c, sizeof(float) * Row * Col, cudaMemcpyDeviceToHost);
for (int i = 0; i < Row * Col; i++)
printf("%.0f + %.0f = %.0f\t", data_a[i], data_b[i], data_c[i]);
int end = clock();
printf("\n程序耗时:%ds\n", (end - start) / 1000);
//释放gpu显存
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_c);
cudaFree(device_data_a);
cudaFree(device_data_b);
cudaFree(device_data_c);
return 0;
}
|
3,570 | #include <stdio.h>
#include "cuda.h"
#include <assert.h>
#define N 2 //16
__device__ int bar(int x) {
return x + 1;
}
__global__ void foo(int *A) {
A[threadIdx.x] = bar(threadIdx.x);
}
|
3,571 | #include <math.h>
#include <stdio.h>
#include <cuda_runtime.h>
#define f_(i,j) f_[(i) + (j)*(m)]
#define Z(i,j) Z[(i) + (j)*m]
__constant__ float s_H[121];
//B is a partition of the images with dimensions thread number + patchSize
//A our extended array with padding
__global__ void Zcalc(float const * const A,float const * const B, float *Z,float const * const H,int patchSize,float filtsigma, int m, int n)
{
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int x_local=threadIdx.x;
int y_local = threadIdx.y;
int pad =(patchSize-1)/2;
int dimension=blockDim.x+patchSize-1;
extern __shared__ float Memory[];
float *s_A=&Memory[0];//local block position in s_A
float *g_A=&Memory[dimension*dimension];//blocks global potition in g_A
if(x<m-2*pad && y<n-2*pad){
s_A[x_local +y_local*dimension]=B[x_local+y_local*dimension];
if(x_local>blockDim.x-patchSize){
s_A[(x_local+patchSize-1) + y_local*dimension]=B[x_local+patchSize-1 +y_local*dimension];
}
if(y_local>blockDim.y-patchSize){
s_A[x_local + (y_local+patchSize-1)*dimension]=B[x_local + (y_local+patchSize-1)*dimension];
}
if(x_local>blockDim.x-patchSize && y_local>blockDim.y-patchSize ){
s_A[x_local+patchSize-1 + (y_local+patchSize-1)*dimension]=B[x_local+patchSize-1 + (y_local+patchSize-1)*dimension];
}
g_A[x_local +y_local*dimension]=A[x+y*m];
if(x_local>blockDim.x-patchSize){
g_A[(x_local+patchSize-1) + y_local*dimension]=A[x+patchSize-1 +y*m];
}
if(y_local>blockDim.y-patchSize){
g_A[x_local + (y_local+patchSize-1)*dimension]=A[x + (y+patchSize-1)*m];
}
if(x_local>blockDim.x-patchSize && y_local>blockDim.y-patchSize ){
g_A[x_local+patchSize-1 + (y_local+patchSize-1)*dimension]=A[x+patchSize-1 + (y+patchSize-1)*m];
}
__syncthreads();
}
if(x<m-2*pad && y<n-2*pad){
int counter=0;
float temp=0,FNij=0,z_local=Z(x+pad,y+pad);
for(int i=pad;i<dimension-pad;i++){
for(int j=pad;j<dimension-pad;j++){
for(int p=-pad;p<=pad;p++){
for(int l=-pad;l<=pad;l++){
temp=(g_A[(x_local+pad +l)+(y_local+pad + p)*dimension]-s_A[(i+l) + (j+p)*dimension])*s_H[counter];
FNij=FNij+temp*temp;
counter++;
}
}
z_local=z_local+expf(-(FNij/filtsigma));
FNij=0;
counter=0;
}
}
Z[x+pad + (y+pad)*m]=z_local;
}
}
__global__ void fCalc(float const * const A,float const * const B,float const * const Z,float const * const H,float *f_,int patchSize, float filtsigma, int m, int n){
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int x_local=threadIdx.x;
int y_local = threadIdx.y;
int pad =(patchSize-1)/2;
int dimension=blockDim.x+patchSize-1;
extern __shared__ float Memory[];
float *s_A=&Memory[0];//local block position in s_A
float *g_A=&Memory[dimension*dimension];//blocks global position g_A
if(x<m-2*pad && y<n-2*pad){
s_A[x_local +y_local*dimension]=B[x_local+y_local*dimension];
if(x_local>blockDim.x-patchSize){
s_A[(x_local+patchSize-1) + y_local*dimension]=B[x_local+patchSize-1 +y_local*dimension];
}
if(y_local>blockDim.y-patchSize){
s_A[x_local + (y_local+patchSize-1)*dimension]=B[x_local + (y_local+patchSize-1)*dimension];
}
if(x_local>blockDim.x-patchSize && y_local>blockDim.y-patchSize ){
s_A[x_local+patchSize-1 + (y_local+patchSize-1)*dimension]=B[x_local+patchSize-1 + (y_local+patchSize-1)*dimension];
}
g_A[x_local +y_local*dimension]=A[x+y*m];
if(x_local>blockDim.x-patchSize){
g_A[(x_local+patchSize-1) + y_local*dimension]=A[x+patchSize-1 +y*m];
}
if(y_local>blockDim.y-patchSize){
g_A[x_local + (y_local+patchSize-1)*dimension]=A[x + (y+patchSize-1)*m];
}
if(x_local>blockDim.x-patchSize && y_local>blockDim.y-patchSize ){
g_A[x_local+patchSize-1 + (y_local+patchSize-1)*dimension]=A[x+patchSize-1 + (y+patchSize-1)*m];
}
__syncthreads();
}
if(x<m-2*pad && y<n-2*pad){
int i,j,k,l,counter=0;
float temp=0,FNij=0,Z_local=Z(x+pad,y+pad),f_local=f_(x+pad,y+pad);
for(i=pad;i<dimension-pad;i++){
for(j=pad;j<dimension-pad;j++){
for(k=-pad;k<=pad;k++){
for(l=-pad;l<=pad;l++){
temp=(g_A[(x_local+pad +l)+(y_local+pad + k)*dimension]-s_A[(i+l) + (j+k)*dimension])*s_H[counter];
FNij=FNij+temp*temp;
counter++;
}
}
f_local=f_local+(1/Z_local)*(expf(-(FNij/filtsigma)))*s_A[i+j*dimension];
FNij=0;
counter=0;
}
}
f_[x+pad + (y+pad)*m]=f_local;
}
}
/*
__global__ void Zcalc(float const *const A,float const * const B,float const * const H,float *Z,int patchSize,float filtSigma,int m , int n)
{ //global coordinates
int x=blockIdx.x*blockDim.x + threadIdx.x;
int y=blockIdx.y*blockDim.y + threadIdx.y;
//block coordinates
int xblock=threadIdx.x;
int yblock=threadIdx.y;
extern __shared__ float Memory[];
int dimension=blockDim.x + patchSize-1;
float *s_A=&Memory[0];
float *g_A=&Memory[dimension*dimension];
if(x<m-patchSize+1 && y<n-patchSize+1)
{
s_A[xblock + yblock*dimension]=B[xblock+yblock*dimension];
__syncthreads();
if(xblock>blockDim.x-patchSize){
s_A[xblock+patchSize-1+ yblock*dimension]=B[xblock+patchSize-1 + yblock*dimension];
}
__syncthreads();
if(yblock>blockDim.y-patchSize){
s_A[xblock+ (yblock+patchSize-1)*dimension ]=B[xblock+ (yblock+patchSize-1)*dimension ];
}
__syncthreads();
if(xblock>blockDim.x-patchSize && yblock>blockDim.y-patchSize){
s_A[xblock+patchSize-1 + (yblock+patchSize-1)*dimension]=B[xblock+patchSize-1 + (yblock+patchSize-1)*dimension];
}
__syncthreads();
//global coordinates of block
g_A[xblock + yblock*dimension]=A[x + y*m];
__syncthreads();
if(xblock>blockDim.x-patchSize){
g_A[xblock+patchSize-1 + yblock*dimension]=A[x+patchSize-1 + y*m];
}
__syncthreads();
if(yblock>blockDim.y-patchSize){
g_A[xblock+ (yblock+patchSize-1)*dimension ]=A[x+ (y+patchSize-1)*m ];
}
__syncthreads();
if(xblock>blockDim.x-patchSize && yblock>blockDim.y-patchSize){
g_A[xblock+patchSize-1 + (yblock+patchSize-1)*dimension]=A[x+patchSize-1 + (y+patchSize-1)*m];
}
}
__syncthreads();
patchSize=(patchSize-1)/2;
if(x<m-2*patchSize && y<n-2*patchSize)
{
int i,j,k,l,counter=0;
float FNij=0,temp=0;
float Z_local=Z(x+patchSize,y+patchSize);
for(i=patchSize;i<dimension-patchSize;i++){
for(j=patchSize;j<dimension-patchSize;j++){
for(k=-patchSize;k<=patchSize;k++){
for(l=-patchSize;l<=patchSize;l++){
temp=(g_A[xblock+patchSize+l + (yblock+patchSize+k)*dimension]-s_A[i+l + (j+k)*dimension])*H[counter];
temp=temp*temp;
FNij=FNij+temp;
counter++;
}
}
Z_local=Z_local+expf(- (FNij/filtSigma));
FNij=0;
counter=0;
}
}
Z(x+patchSize,y+patchSize)=Z_local;
}
}
__global__ void fCalc(float const *const A,float const * const B,float const * const H,float const * const Z,float *f_,int patchSize,float filtSigma,int m , int n)
{ //global coordinates
int x=blockIdx.x*blockDim.x + threadIdx.x;
int y=blockIdx.y*blockDim.y + threadIdx.y;
//block coordinates
int xblock=threadIdx.x;
int yblock=threadIdx.y;
extern __shared__ float Memory[];
int dimension=blockDim.x + patchSize-1;
float *s_A=&Memory[0];
float *g_A=&Memory[dimension*dimension];
if(x<m-patchSize+1 && y<n-patchSize+1)
{
s_A[xblock + yblock*dimension]=B[xblock+yblock*dimension];
__syncthreads();
if(xblock>blockDim.x-patchSize){
s_A[xblock+patchSize-1+ yblock*dimension]=B[xblock+patchSize-1 + yblock*dimension];
}
__syncthreads();
if(yblock>blockDim.y-patchSize){
s_A[xblock+ (yblock+patchSize-1)*dimension ]=B[xblock+ (yblock+patchSize-1)*dimension ];
}
__syncthreads();
if(xblock>blockDim.x-patchSize && yblock>blockDim.y-patchSize){
s_A[xblock+patchSize-1 + (yblock+patchSize-1)*dimension]=B[xblock+patchSize-1 + (yblock+patchSize-1)*dimension];
}
__syncthreads();
//global coordinates of block
g_A[xblock + yblock*dimension]=A[x + y*m];
__syncthreads();
if(xblock>blockDim.x-patchSize){
g_A[xblock+patchSize-1 + yblock*dimension]=A[x+patchSize-1 + y*m];
}
__syncthreads();
if(yblock>blockDim.y-patchSize){
g_A[xblock+ (yblock+patchSize-1)*dimension ]=A[x+ (y+patchSize-1)*m ];
}
__syncthreads();
if(xblock>blockDim.x-patchSize && yblock>blockDim.y-patchSize){
g_A[xblock+patchSize-1 + (yblock+patchSize-1)*dimension]=A[x+patchSize-1 + (y+patchSize-1)*m];
}
}
__syncthreads();
patchSize=(patchSize-1)/2;
if(x<m-2*patchSize && y<n-2*patchSize)
{
int i,j,k,l,counter=0;
float FNij=0,temp=0;
float Z_local=Z(x+patchSize,y+patchSize),f_local=f_(x+patchSize,x+patchSize);
for(i=patchSize;i<dimension-patchSize;i++){
for(j=patchSize;j<dimension-patchSize;j++){
for(k=-patchSize;k<=patchSize;k++){
for(l=-patchSize;l<=patchSize;l++){
temp=(g_A[xblock+patchSize+l + (yblock+patchSize+k)*dimension]-s_A[i+l + (j+k)*dimension])*H[counter];
temp=temp*temp;
FNij=FNij+temp;
counter++;
}
}
f_local=f_local+(1/Z_local)*(expf(- (FNij/filtSigma)))*(s_A[i+(j)*dimension]);
FNij=0;
counter=0;
}
}
f_(x+patchSize,y+patchSize)=f_local;
}
}
*/
|
3,572 | // m0 m1 m2
// m3 m4 m5
// m6 m7 m8
__global__ void sfilter(float *src, float *dst, long ldc,
float m0, float m1, float m2, float m3, float m4, float m5, float m6, float m7, float m8)
{
long x = blockIdx.x;
long y = blockIdx.y;
float i0 = src[(x-1)+(y-1)*ldc]*m0;
float i1 = src[(x) +(y-1)*ldc]*m1;
float i2 = src[(x+1)+(y-1)*ldc]*m2;
float i3 = src[(x-1)+(y) *ldc]*m3;
float i4 = src[(x) + y * ldc]*m4;
float i5 = src[(x+1)+(y) *ldc]*m5;
float i6 = src[(x-1)+(y+1)*ldc]*m6;
float i7 = src[(x) +(y+1)*ldc]*m7;
float i8 = src[(x+1)+(y+1)*ldc]*m8;
dst[x+y*ldc] = i0 + i1 + i2 + i3 + i4 + i5 + i6 + i7 + i8;
}
|
3,573 |
#include "main.cuh"
#include <stdio.h>
void test(){
printf("\n");
} |
3,574 |
#include <stdio.h>
const int N = 128;
const int blocksize = 2;
__global__
void matrix_add(float *a, float *b, float* c)
{
int y = (blockIdx.x * blockDim.x + threadIdx.x);
int x = (blockIdx.y * blockDim.y + threadIdx.y);
int offset = x + (y * N);
//printf("x: %d, y: %d \n", x, y);
c[offset] = a[offset]+b[offset];
}
int main()
{
const int size = N*N*sizeof(float);
float* a = new float[N*N];
float* b = new float[N*N];
float* result = new float[N*N];
float theTime = 0;
float* c;
float* aa;
float* bb;
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
{
a[i+j*N] = 10 + i;
b[i+j*N] = (float)j / N;
}
cudaError_t err;
cudaEvent_t myEvent;
cudaEvent_t mySecondEvent;
cudaEventCreate(&myEvent);
cudaEventSynchronize(myEvent);
cudaEventCreate(&mySecondEvent);
cudaEventRecord(myEvent, 0);
cudaMalloc( (void**)&c, size );
cudaMalloc( (void**)&aa, size );
cudaMalloc( (void**)&bb, size );
cudaMemcpy(aa, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(bb, b, size, cudaMemcpyHostToDevice);
dim3 dimBlock( blocksize, blocksize );
dim3 dimGrid( N/blocksize, N/blocksize );
matrix_add<<<dimGrid, dimBlock>>>(aa,bb,c);
err = cudaPeekAtLastError();
if(err) printf("cudaPeekAtLastError %d %s\n", err, cudaGetErrorString(err));
cudaThreadSynchronize();
cudaMemcpy( result, c, size, cudaMemcpyDeviceToHost );
cudaFree( c );
cudaFree( aa );
cudaFree( bb );
cudaEventRecord(mySecondEvent, 0);
cudaEventSynchronize(mySecondEvent);
cudaEventElapsedTime(&theTime, myEvent, mySecondEvent);
printf("Elapsed time: %f \n", theTime/1000);
/*
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
printf("%0.2f ", result[i+j*N]);
}
printf("\n");
}*/
printf("done\n");
return EXIT_SUCCESS;
}
|
3,575 | #include <cstdio>
#include <string>
#include <cassert>
#include <iostream>
#include <cstddef>
#include <vector>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include <limits>
using namespace std;
#define int8_t char
#define int16_t short
#define int32_t int
#define int64_t long long
//
// DEFAULt functions for work with cuda
//
#define CSC(call) do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n", __FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while (0)
cudaEvent_t start, stop;
float t;
void time_start() {
CSC(cudaEventCreate(&start));
CSC(cudaEventCreate(&stop));
CSC(cudaEventRecord(start, 0));
}
void time_end() {
CSC(cudaGetLastError());
CSC(cudaEventRecord(stop, 0));
CSC(cudaEventSynchronize(stop));
CSC(cudaEventElapsedTime(&t, start, stop));
printf("time = %f\n", t);
CSC(cudaEventDestroy(start));
CSC(cudaEventDestroy(stop));
}
const int BLOCK_SIZE = 1024;
const int GRID_SIZE = 16384;
//const int64_t INF = 2100000000;
const int64_t INF = 2147483647;
__global__ void k_blocksort(int64_t *arr, int len) {
if (blockIdx.x * BLOCK_SIZE >= len) {
return;
}
__shared__ int64_t block[BLOCK_SIZE];
int idx = threadIdx.x * 2;
int offset = blockIdx.x * BLOCK_SIZE;
int nstep = BLOCK_SIZE / 2;
// printf("offset %d", offset);
block[idx] = arr[offset + idx];
block[idx+1] = arr[offset + idx+1];
int64_t tmp;
// __syncthreads();
// if (idx < 10)
// printf("%d, %d\n", idx, block[idx]);
for (int k = 0; k < nstep; k++) {
for (int i = idx+1; i < idx+3; ++i){
__syncthreads();
if (i < BLOCK_SIZE) {
if (block[i-1] > block[i]) {
// printf("swap %d %d\n", i-1, i);
tmp = block[i-1];
block[i-1] = block[i];
block[i] = tmp;
}
}
}
}
__syncthreads();
arr[offset + idx] = block[idx];
arr[offset + idx+1] = block[idx+1];
}
__global__ void k_merge(int64_t *arr, int len, bool odd) {
int offset = blockIdx.x * BLOCK_SIZE * 2;
int idx = threadIdx.x;
int64_t tmp;
if (odd) offset += BLOCK_SIZE;
if (offset + BLOCK_SIZE * 2 > len) return;
__shared__ int64_t block[BLOCK_SIZE * 2];
int idt = 2 * idx;
block[idt] = arr[offset + idt];
block[idt+1] = arr[offset + idt+1];
__syncthreads();
idt = BLOCK_SIZE * 2 - idx - 1;
if (idx < BLOCK_SIZE && block[idx] > block[idt]) {
tmp = block[idx];
block[idx] = block[idt];
block[idt] = tmp;
}
int step = BLOCK_SIZE / 2;
while (step != 0) {
__syncthreads();
idt = idx;
if ((idx / step) % 2 == 1) {
idt -= step - BLOCK_SIZE;
}
if (block[idt] > block[idt + step]) {
tmp = block[idt];
block[idt] = block[idt+step];
block[idt+step] = tmp;
}
step /= 2;
}
idt = 2 * idx;
__syncthreads();
arr[offset + idt] = block[idt];
arr[offset + idt+1] = block[idt+1];
}
int main() {
time_t tm, tmc;
time(&tm);
time(&tmc);
// fprintf(stderr, "%d: %f\t", __LINE__, difftime(tmc, tm));
time(&tm);
int n;
fread(&n, sizeof(int), 1, stdin);
fprintf(stderr, "n=%d\t", n);
int rn = n;
if (n % BLOCK_SIZE != 0) {
n += BLOCK_SIZE - n % BLOCK_SIZE;
}
int64_t *arr = new int64_t[n];
int l;
// fprintf(stderr, "\n");
for (int i = 0; i < n; i++) {
l = INF;
if (i < rn) {
fread(&l, sizeof(int), 1, stdin);
//fprintf(stderr, "%d ", l);
}
arr[i] = l;
}
//fprintf(stderr, "\n");
time(&tmc);
// fprintf(stderr, "%d: %f\t", __LINE__, difftime(tmc, tm));
time(&tm);
int64_t *darr;
CSC(cudaMalloc(&darr, sizeof(int64_t) * n));
CSC(cudaMemcpy(darr, arr, sizeof(int64_t) * n, cudaMemcpyHostToDevice));
k_blocksort<<<GRID_SIZE, BLOCK_SIZE / 2>>>(darr, n);
CSC(cudaGetLastError());
//CSC(cudaMemcpy(arr, darr, sizeof(int64_t) * n, cudaMemcpyDeviceToHost));
//cout << "after sort" << endl;
//for (int i = 0; i < rn; i++) {
// l = (int)arr[i];
// cout << l << " ";
// //fwrite(&l, sizeof(int), 1, stdout);
//}
//cout << endl;
time(&tmc);
// fprintf(stderr, "%d: %f\t", __LINE__, difftime(tmc, tm));
time(&tm);
fprintf(stderr, "num of steps %d\n", n / BLOCK_SIZE);
if (n > BLOCK_SIZE) {
for (int step = 0; step < n / BLOCK_SIZE; step++) {
k_merge<<<GRID_SIZE, BLOCK_SIZE>>>(darr, n, step & 1);
CSC(cudaGetLastError());
//CSC(cudaMemcpy(arr, darr, sizeof(int64_t) * n, cudaMemcpyDeviceToHost));
//cout << "after step" << endl;
//for (int i = 0; i < rn; i++) {
// l = (int)arr[i];
// cout << l << " ";
// //fwrite(&l, sizeof(int), 1, stdout);
//}
//cout << endl;
}
}
CSC(cudaMemcpy(arr, darr, sizeof(int64_t) * rn, cudaMemcpyDeviceToHost));
CSC(cudaFree(darr));
time(&tmc);
// fprintf(stderr, "%d: %f\t", __LINE__, difftime(tmc, tm));
time(&tm);
for (int i = 0; i < rn; i++) {
l = (int)arr[i];
// cout << l << " ";
fwrite(&l, sizeof(int), 1, stdout);
}
// cout << endl;
time(&tmc);
// fprintf(stderr, "%d: %f\t", __LINE__, difftime(tmc, tm));
time(&tm);
}
|
3,576 | #include "Utils.cuh"
#include "cp4Conv2d.cuh"
#include "cp4Conv2dBackwardData.cuh"
#include "cp4Conv2dBackwardFilter.cuh"
#include "cp4Conv2dForward.cuh"
#include <iostream>
#include <stdlib.h>
using namespace std;
/*******************************************************************************
* Unified memory Tensorized call of Convolution in GPU
******************************************************************************/
Tensor CP::Conv2dForward(Tensor const Input,
Tensor const FT,
Tensor const FC,
Tensor const FY,
Tensor const FX,
unsigned pad) {
tensor_shape params;
params.N = Input.shape[0];
params.C = Input.shape[1];
params.H = Input.shape[2];
params.W = Input.shape[3];
params.pad = pad;
params.Rank = FT.shape[1];
params.T = FT.shape[0];
params.Y = FY.shape[0];
params.X = FX.shape[0];
Tensor Out{ params.N, params.T, params.H, params.W };
cp4_conv2d_forward_gpu(params,
Input.m_data,
FT.m_data,
FC.m_data,
FY.m_data,
FX.m_data,
Out.m_data);
return Out;
}
/*******************************************************************************
* Unified memory Tensorized call of Convolution Backward Data in GPU
******************************************************************************/
/* Tensor CP::Conv2dBackwardData(Tensor const Upstream, */
/* Tensor const FT, */
/* Tensor const FC, */
/* Tensor const FY, */
/* Tensor const FX, */
/* unsigned pad) { */
/* tensor_shape params; */
/* params.N = Upstream.shape[0]; */
/* params.T = Upstream.shape[1]; */
/* params.H = Upstream.shape[2]; */
/* params.W = Upstream.shape[3]; */
/* params.pad = pad; */
/* params.Rank = FT.shape[1]; */
/* params.C = FC.shape[0]; */
/* params.Y = FY.shape[0]; */
/* params.X = FX.shape[0]; */
/* Tensor Out{ params.N, params.C, params.H, params.W }; */
/* cp4_conv2d_backward_data_gpu(params, */
/* Upstream.m_data, */
/* FT.m_data, */
/* FC.m_data, */
/* FY.m_data, */
/* FX.m_data, */
/* Out.m_data); */
/* return Out; */
/* } */
/*******************************************************************************
* Unified memory Tensorized call of Convolution Backward Filter in GPU
******************************************************************************/
/* Tensor CP::Conv2dBackwardFilter(Tensor const dLdO, */
/* Tensor const In, */
/* Tensor const FT, */
/* Tensor const FC, */
/* Tensor const FY, */
/* Tensor const FX, */
/* unsigned pad) { */
/* tensor_shape s; */
/* s.N = dLdO.shape[0]; */
/* s.T = dLdO.shape[1]; */
/* s.H = dLdO.shape[2]; */
/* s.W = dLdO.shape[3]; */
/* s.pad = pad; */
/* s.Rank = FT.shape[1]; */
/* s.C = FC.shape[0]; */
/* s.Y = FY.shape[0]; */
/* s.X = FX.shape[0]; */
/* Tensor dFT{ s.T, s.Rank }; */
/* Tensor dFC{ s.C, s.Rank }; */
/* Tensor dFY{ s.Y, s.Rank }; */
/* Tensor dFX{ s.X, s.Rank }; */
/* /1* Tensor dFF{ s.T, s.C, s.Y, s.X }; *1/ */
/* /1* cp4_conv2d_backward_filter_full_gpu( *1/ */
/* /1* s, dFF.m_data, In.m_data, dLdO.m_data); *1/ */
/* /1* return dFF; *1/ */
/* cp4_conv2d_backward_filter_t_gpu(s, */
/* dFT.m_data, */
/* In.m_data, */
/* dLdO.m_data, */
/* FC.m_data, */
/* FY.m_data, */
/* FX.m_data); */
/* /1* cout << dFT.m_data[0] << endl; *1/ */
/* /1* cout << dFC.m_data[0] << endl; *1/ */
/* /1* cout << dFY.m_data[0] << endl; *1/ */
/* /1* cout << dFX.m_data[0] << endl; *1/ */
/* cp4_conv2d_backward_filter_c_gpu(s, */
/* dFC.m_data, */
/* In.m_data, */
/* dLdO.m_data, */
/* FT.m_data, */
/* FY.m_data, */
/* FX.m_data); */
/* cp4_conv2d_backward_filter_y_gpu(s, */
/* dFY.m_data, */
/* In.m_data, */
/* dLdO.m_data, */
/* FT.m_data, */
/* FC.m_data, */
/* FX.m_data); */
/* cp4_conv2d_backward_filter_x_gpu(s, */
/* dFX.m_data, */
/* In.m_data, */
/* dLdO.m_data, */
/* FT.m_data, */
/* FC.m_data, */
/* FY.m_data); */
/* return cp4recom(dFT, dFC, dFY, dFX); */
/* } */
/*******************************************************************************
* Run_convolution operation with a profile count loop
******************************************************************************/
std::pair<float, unsigned>
CP::run_convolution(tensor_shape p, unsigned PROFCOUNT) {
float* In;
float* Out;
float* FT;
float* FC;
float* FX;
float* FY;
cudaMalloc(&In, p.N * p.C * p.H * p.W * sizeof(float));
cudaMalloc(&FT, p.T * p.Rank * sizeof(float));
cudaMalloc(&FC, p.C * p.Rank * sizeof(float));
cudaMalloc(&FY, p.Y * p.Rank * sizeof(float));
cudaMalloc(&FX, p.X * p.Rank * sizeof(float));
cudaMalloc(&Out, p.N * p.T * p.H * p.W * sizeof(float));
unsigned mem =
sizeof(float) * ((p.N * p.T * p.H * p.W) + (p.N * p.C * p.H * p.W)
+ p.Rank * (p.T + p.C + p.X + p.Y));
float us = cp4_conv2d_forward_gpu(p, In, FT, FC, FY, FX, Out, PROFCOUNT);
cudaFree(In);
cudaFree(FT);
cudaFree(FC);
cudaFree(FY);
cudaFree(FX);
cudaFree(Out);
return std::make_pair(us,mem);
}
/*******************************************************************************
* Main function. call 1 instance of kernel execution
******************************************************************************/
int main(int argc, char** argv) {
unsigned N = 5;
unsigned C = 32;
unsigned H = 1024;
unsigned W = 1024;
unsigned pad = 1;
unsigned T = 32;
unsigned Y = 3;
unsigned X = 3;
unsigned Rank = 8;
if (argc != 11) {
cerr << "Using Default shape" << endl;
cudaSetDevice(0);
} else {
N = atoi(argv[1]);
C = atoi(argv[2]);
H = atoi(argv[3]);
W = atoi(argv[4]);
pad = atoi(argv[5]);
T = atoi(argv[6]);
Y = atoi(argv[7]);
X = atoi(argv[8]);
Rank = atoi(argv[9]);
cudaSetDevice(atoi(argv[10]));
}
tensor_shape params;
params.N = N;
params.C = C;
params.H = H;
params.W = W;
params.pad = pad;
params.Rank = Rank;
params.T = T;
params.Y = Y;
params.X = X;
CP::run_convolution(params, 1);
}
|
3,577 | /*
* vim: ts=8:sw=8:tw=79:noet
*
* Copyright (c) 2013, Colin Patrick McCabe
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#define ARR_SIZE 5
#define EXPECT_CUDASUCCESS(x) \
do { \
cudaError_t err = x; \
if (x != cudaSuccess) { \
fprintf(stderr, "error %d (%s) on line %d of %s\n", \
err, cudaGetErrorString(err), \
__LINE__, __FILE__); \
exit(1); \
} \
} while (0);
static int *gd_a, *gd_b, *gd_c;
__global__ void add(int *a, int *b, int *c)
{
int tid = blockIdx.x;
if (tid < ARR_SIZE) {
c[tid] = a[tid] + b[tid];
}
}
static void print_vector(const int * const a, size_t len)
{
size_t i;
const char *prefix = "";
for (i = 0; i < len; i++) {
printf("%s%d", prefix, a[i]);
prefix = ", ";
}
}
int main(void)
{
int i, a[ARR_SIZE], b[ARR_SIZE], c[ARR_SIZE];
for (i = 0; i < ARR_SIZE; i++) {
a[i] = i;
}
for (i = 0; i < ARR_SIZE; i++) {
b[i] = 1;
}
EXPECT_CUDASUCCESS(cudaMalloc((void**)&gd_a,
sizeof(int) * ARR_SIZE));
EXPECT_CUDASUCCESS(cudaMemcpy(gd_a, &a,
sizeof(int) * ARR_SIZE, cudaMemcpyHostToDevice));
EXPECT_CUDASUCCESS(cudaMalloc((void**)&gd_b,
sizeof(int) * ARR_SIZE));
EXPECT_CUDASUCCESS(cudaMemcpy(gd_b, &b,
sizeof(int) * ARR_SIZE, cudaMemcpyHostToDevice));
EXPECT_CUDASUCCESS(cudaMalloc((void**)&gd_c,
sizeof(int) * ARR_SIZE));
add<<<ARR_SIZE, 1>>>(gd_a, gd_b, gd_c);
EXPECT_CUDASUCCESS(cudaMemcpy(c, gd_c,
sizeof(int) * ARR_SIZE, cudaMemcpyDeviceToHost));
printf("initial vector a: ");
print_vector(a, ARR_SIZE);
printf("\ninitial vector b: ");
print_vector(b, ARR_SIZE);
printf("\nfinal vector c: ");
print_vector(c, ARR_SIZE);
printf("\n");
cudaFree(gd_a);
cudaFree(gd_b);
cudaFree(gd_c);
return EXIT_SUCCESS;
}
|
3,578 | #include <iostream>
__global__ void normal(int *a, int *b, int *c, int len)
{
int myrow = blockIdx.x;
__shared__ int smem[256];
while(myrow <len)
{
for (int i = 0; i < len; i ++)//which col of right matrix
{
int tid = threadIdx.x;
int res = 0;
while( tid < len ) // vector vector multiplication
{
res += a[myrow*len + tid] * b[tid*len + i];
tid += blockDim.x;
}
smem[threadIdx.x] = res;
__syncthreads();
for (int idx = blockDim.x/2; idx > 0; idx = idx/2)
{
if(threadIdx.x < idx)
{
int temp = smem[threadIdx.x] + smem[threadIdx.x + idx];
smem[threadIdx.x] = temp;
}
__syncthreads();
}
c[myrow*len + i] = smem[0];
}
}
myrow += gridDim.x;
}
__global__ void transpose(int *a, int *b, int *c, int len)
{
int myrow = blockIdx.x;
__shared__ int smem[256];
while(myrow <len)
{
for (int i = 0; i < len; i ++)//which col of right matrix
{
int tid = threadIdx.x;
int res = 0;
while( tid < len ) // vector vector multiplication
{
res += a[myrow*len + tid] * b[i*len + tid];
tid += blockDim.x;
}
smem[threadIdx.x] = res;
__syncthreads();
for (int i = blockDim.x/2; i > 0; i = i/2)
{
if(threadIdx.x < i)
{
int temp = smem[threadIdx.x] + smem[threadIdx.x + i];
smem[threadIdx.x] = temp;
}
__syncthreads();
}
c[myrow*len + i] = smem[0];
}
myrow += gridDim.x;
}
}
|
3,579 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void sinw(float *a, float *b) {
int i = blockIdx.x;
b[i]=sinf(a[i]);
}
int main(void)
{
int MAX = 10;
float a[MAX], b[MAX];
float *d_a, *d_b;
int size = sizeof(int)*MAX;
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
for (int i = 0; i < MAX; ++i)
{
a[i] = (3.14/4)*i;
}
printf("Array A:\n");
for (int i = 0; i < MAX; ++i)
printf("%f\t", a[i]);
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
sinw<<<MAX,1>>>(d_a, d_b);
cudaMemcpy(&b, d_b, size, cudaMemcpyDeviceToHost);
printf("\nFinal result:\n");
for (int i = 0; i < MAX; ++i)
printf("%f\t", b[i]);
printf("\n");
cudaFree(d_a);
cudaFree(d_b);
return 0;
}
|
3,580 | #include <stdio.h>
#include <cuda_runtime.h>
#include <iostream>
#include <fstream>
int main(int argc,char **argv)
{
std::ofstream myfile;
myfile.open ("seq_reduce.csv");
const unsigned int times = 10;
for (unsigned int i = 0; i<30; i++)
{
const unsigned int IN_SIZE = 1<<i;
const unsigned int IN_BYTES = sizeof(unsigned int)*IN_SIZE;
const unsigned int OUT_SIZE = 1;
const unsigned int OUT_BYTES = sizeof(unsigned int)*OUT_SIZE;
printf("\ni = %d\n", i);
printf("\n ARRAY_SIZE = %d\n", IN_SIZE);
printf(" ARRAY_BYTES = %d\n", IN_BYTES);
unsigned int * h_in = (unsigned int*)malloc(IN_BYTES);
unsigned int * h_out = (unsigned int*)malloc(OUT_BYTES);
for (unsigned int j = 0; j<IN_SIZE; j++) {h_in[j] = 1;}
// setting up time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// running the code on the CPU $times times
for (unsigned int k = 0; k<times; k++)
{
h_out[0] = 0;
for (unsigned int l = 0; l < IN_SIZE; ++l)
{
h_out[0] += h_in[l];
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// calculating time
float elapsedTime = .0f;
cudaEventElapsedTime(&elapsedTime, start, stop);
elapsedTime = elapsedTime / ((float) times);
printf(" time: %.5f\n", elapsedTime);
myfile << elapsedTime << ",";
}
myfile.close();
return 0;
}
|
3,581 | #include "includes.h"
__global__ void gpu_distance(int* data, float* distance, int* point, int n, int dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n)
return;
float d = 0;
for(int j = 0; j<dim; j++)
d += abs(data[i*dim + j] - point[j]);
distance[i] = d;
} |
3,582 | #include <stdio.h>
#include <math.h>
#define N 2047
#define T 1024
__global__ void vecAdd(int *a, int *b, int *c);
int main() {
int *a, *b, *c;
int *d_a, *d_b, *d_c;
// initialize a and b with real values (NOT SHOWN)
int size = N * sizeof(int);
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
for (int i = 0; i < N; i++) {
a[i] = b[i] = i + 1;
c[i] = 0;
}
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
vecAdd<<<(int)ceil(N/(double)T),T>>>(d_a, d_b, d_c);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
printf("c[%*d] = %d\n", (int)log10(N+1), 0, c[0]);
printf("c[%*d] = %d\n", (int)log10(N+1), N-1, c[N-1]);
exit(0);
}
__global__ void vecAdd(int *a, int *b, int *c) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
c[i] = a[i] + b[i];
} |
3,583 | #include "ParamsCarrier.cuh"
ParamsCarrier* ParamsCarrier::INSTANCE;
ParamsCarrier::ParamsCarrier(){
} |
3,584 | #include <math.h>
#include <stdio.h>
// Array access macros
#define b(i,j) B[(i) + (j)*m*m]
__global__ void Denoising(double *I,double *B, int m, int n, int patchSize, double filtSigma) {
// Get pixel (x,y) in input
// I: image as a vector
// B: neighborCube
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int linearPixelId = i+j*m;
double D = 0;
if(linearPixelId<m*n) {
int l=0;
double sumI = 0;
double Z = 0;
// neighborhood index (row in B, start from top)
for(l=0; l<m*n; l++) {
D = 0;
// neighbor pixel index, left to right
for(int k=0; k<patchSize*patchSize; k++) {
// calculate euclidean distance between neighborhoods (dissimilarity)
// and add them to sum
D+=(b(linearPixelId,k)-b(l,k))*(b(linearPixelId,k)-b(l,k));
}
Z += exp(-sqrt(D)*sqrt(D)/filtSigma);
sumI += exp(-sqrt(D)*sqrt(D)/filtSigma)*I[l];
}
I[linearPixelId] = sumI/Z;
}
}
|
3,585 | #include <stdio.h>
#include <cuda.h>
class A {
public:
__host__ __device__ A(unsigned ongpu = 1) { printf("in A's constructor: on %s.\n", (ongpu ? "GPU" : "CPU")); }
};
__global__ void dkernel(unsigned n) {
A a;
//printf("in dkernel %d\n", blockIdx.x * blockDim.x + threadIdx.x);
}
#define BLOCKSIZE 32
int main() {
A b(0);
unsigned N = BLOCKSIZE;
dkernel<<<1, BLOCKSIZE>>>(N);
cudaThreadSynchronize();
return 0;
}
|
3,586 | #include "includes.h"
__global__ void ConvolutionColGPU(float *d_Dst,float *d_Src,float *d_Filter,int filterR){
int x =threadIdx.x;
int y =threadIdx.y;
float sum=0;
for (int k = -filterR; k <= filterR; k++) {
int d = y + k;
if (d >= 0 && d < blockDim.y) {
sum += d_Src[d * blockDim.x + x] * d_Filter[filterR - k];
}
d_Dst[y * blockDim.x + x] = sum;
}
} |
3,587 | #include <cuda.h>
__global__ void kernel() {}
extern "C" void cuda_kernel() { kernel<<<1, 1>>>(); }
|
3,588 | #include <iostream>
using namespace std;
__global__
void lambdaTestKernel(int *num)
{
auto func = [&] () { *num = 5; };
func();
}
void testDevice()
{
int num = 0;
int *d_num;
cudaMalloc(&d_num, sizeof(int));
cudaMemcpy(d_num, &num, sizeof(int), cudaMemcpyHostToDevice);
lambdaTestKernel <<<1, 1>>> (d_num);
cudaMemcpy(&num, d_num, sizeof(int), cudaMemcpyDeviceToHost);
cout << num << endl;
}
void testHost()
{
char str[] = "Another Hello World!";
void (*func1)() = [] () { cout << "Hello world" << endl; };
func1();
auto func2 = [] () { cout << "Hello world" << endl; };
func2();
auto func3 = [&] () { cout << str << endl; };
func3();
}
int main()
{
testHost();
testDevice();
return 0;
} |
3,589 | #include<stdio.h>
#include<cuda.h>
#include<math.h>
#include<sys/time.h>
__global__
void Matadd(char* A,int N)
{
}
int main()
{
for(int j=0;j<=30;j++)
{
cudaEvent_t start1,stop1;
float time1;
int i;
int N = pow(2,j);
size_t size = N;
printf ("\n The value of N is %d",N);
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
//pinned memory allocate in host memory
char* hA; cudaMallocHost(&hA, size);
for(i=0;i<N;i++)
{
hA[i] = rand()%20-10;
}
//allocate memory on the device (GPU)
char* dA;
cudaMalloc((void**) &dA,size);
//timing start for inclusive timing
cudaEventRecord(start1, 0);
//copy vectors from host memory to devie memory
cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice);
cudaEventRecord(stop1, 0);
cudaEventSynchronize(stop1);
//invoke GPU kernel, with two blocks each having eight threads
int threadsperblock = 16;
int blockspergrid = (N + threadsperblock - 1)/ threadsperblock;
//timing start for exclusive timing
//cudaEventRecord(start2, 0);
Matadd<<<blockspergrid,threadsperblock>>>(dA,N);
cudaMemcpy(hA, dA, size, cudaMemcpyDeviceToHost);
//cudaEventRecord(stop1, 0);
//cudaEventSynchronize(stop1);
cudaEventElapsedTime(&time1,start1,stop1);
printf("\n The data transfer time(in microsecs) for 2 to power %d is %f respectively \n",j,time1);
cudaFree(hA);
cudaFree(dA);
}
return 0;
}
|
3,590 | #include <stdio.h>
#include <sys/time.h>
#define SAMPLE_TEST_LEN 2048
#define SCALER 4096
#define LUT_SIZE 1024
__device__
short lut_sigmoid[LUT_SIZE] = {
2048,2056,2064,2072,2080,2088,2096,2104,
2112,2120,2128,2136,2144,2152,2160,2168,
2176,2184,2192,2200,2208,2216,2224,2232,
2239,2247,2255,2263,2271,2279,2287,2295,
2303,2311,2318,2326,2334,2342,2350,2358,
2365,2373,2381,2389,2397,2404,2412,2420,
2428,2435,2443,2451,2458,2466,2474,2481,
2489,2497,2504,2512,2519,2527,2535,2542,
2550,2557,2565,2572,2580,2587,2594,2602,
2609,2617,2624,2631,2639,2646,2653,2661,
2668,2675,2682,2690,2697,2704,2711,2718,
2726,2733,2740,2747,2754,2761,2768,2775,
2782,2789,2796,2803,2810,2817,2823,2830,
2837,2844,2851,2857,2864,2871,2878,2884,
2891,2898,2904,2911,2917,2924,2930,2937,
2943,2950,2956,2963,2969,2975,2982,2988,
2994,3001,3007,3013,3019,3026,3032,3038,
3044,3050,3056,3062,3068,3074,3080,3086,
3092,3098,3104,3110,3116,3121,3127,3133,
3139,3144,3150,3156,3161,3167,3173,3178,
3184,3189,3195,3200,3206,3211,3217,3222,
3227,3233,3238,3243,3249,3254,3259,3264,
3269,3275,3280,3285,3290,3295,3300,3305,
3310,3315,3320,3325,3330,3334,3339,3344,
3349,3354,3358,3363,3368,3372,3377,3382,
3386,3391,3395,3400,3404,3409,3413,3418,
3422,3427,3431,3435,3440,3444,3448,3452,
3457,3461,3465,3469,3473,3477,3481,3486,
3490,3494,3498,3502,3506,3510,3513,3517,
3521,3525,3529,3533,3536,3540,3544,3548,
3551,3555,3559,3562,3566,3570,3573,3577,
3580,3584,3587,3591,3594,3598,3601,3604,
3608,3611,3614,3618,3621,3624,3628,3631,
3634,3637,3640,3644,3647,3650,3653,3656,
3659,3662,3665,3668,3671,3674,3677,3680,
3683,3686,3689,3691,3694,3697,3700,3703,
3705,3708,3711,3714,3716,3719,3722,3724,
3727,3730,3732,3735,3737,3740,3742,3745,
3747,3750,3752,3755,3757,3760,3762,3764,
3767,3769,3772,3774,3776,3778,3781,3783,
3785,3788,3790,3792,3794,3796,3798,3801,
3803,3805,3807,3809,3811,3813,3815,3817,
3819,3821,3823,3825,3827,3829,3831,3833,
3835,3837,3839,3841,3843,3844,3846,3848,
3850,3852,3853,3855,3857,3859,3861,3862,
3864,3866,3867,3869,3871,3872,3874,3876,
3877,3879,3880,3882,3884,3885,3887,3888,
3890,3891,3893,3894,3896,3897,3899,3900,
3902,3903,3905,3906,3907,3909,3910,3912,
3913,3914,3916,3917,3918,3920,3921,3922,
3924,3925,3926,3927,3929,3930,3931,3932,
3934,3935,3936,3937,3938,3940,3941,3942,
3943,3944,3945,3947,3948,3949,3950,3951,
3952,3953,3954,3955,3956,3957,3958,3959,
3960,3961,3963,3964,3965,3966,3966,3967,
3968,3969,3970,3971,3972,3973,3974,3975,
3976,3977,3978,3979,3980,3980,3981,3982,
3983,3984,3985,3986,3986,3987,3988,3989,
3990,3990,3991,3992,3993,3994,3994,3995,
3996,3997,3997,3998,3999,4000,4000,4001,
4002,4003,4003,4004,4005,4005,4006,4007,
4007,4008,4009,4009,4010,4011,4011,4012,
4013,4013,4014,4015,4015,4016,4016,4017,
4018,4018,4019,4019,4020,4021,4021,4022,
4022,4023,4023,4024,4025,4025,4026,4026,
4027,4027,4028,4028,4029,4029,4030,4030,
4031,4031,4032,4032,4033,4033,4034,4034,
4035,4035,4036,4036,4037,4037,4038,4038,
4038,4039,4039,4040,4040,4041,4041,4041,
4042,4042,4043,4043,4043,4044,4044,4045,
4045,4045,4046,4046,4047,4047,4047,4048,
4048,4048,4049,4049,4050,4050,4050,4051,
4051,4051,4052,4052,4052,4053,4053,4053,
4054,4054,4054,4055,4055,4055,4056,4056,
4056,4057,4057,4057,4057,4058,4058,4058,
4059,4059,4059,4059,4060,4060,4060,4061,
4061,4061,4061,4062,4062,4062,4062,4063,
4063,4063,4063,4064,4064,4064,4064,4065,
4065,4065,4065,4066,4066,4066,4066,4067,
4067,4067,4067,4068,4068,4068,4068,4068,
4069,4069,4069,4069,4069,4070,4070,4070,
4070,4070,4071,4071,4071,4071,4071,4072,
4072,4072,4072,4072,4073,4073,4073,4073,
4073,4073,4074,4074,4074,4074,4074,4074,
4075,4075,4075,4075,4075,4075,4076,4076,
4076,4076,4076,4076,4077,4077,4077,4077,
4077,4077,4077,4078,4078,4078,4078,4078,
4078,4078,4079,4079,4079,4079,4079,4079,
4079,4079,4080,4080,4080,4080,4080,4080,
4080,4080,4081,4081,4081,4081,4081,4081,
4081,4081,4082,4082,4082,4082,4082,4082,
4082,4082,4082,4082,4083,4083,4083,4083,
4083,4083,4083,4083,4083,4084,4084,4084,
4084,4084,4084,4084,4084,4084,4084,4084,
4085,4085,4085,4085,4085,4085,4085,4085,
4085,4085,4085,4085,4086,4086,4086,4086,
4086,4086,4086,4086,4086,4086,4086,4086,
4086,4087,4087,4087,4087,4087,4087,4087,
4087,4087,4087,4087,4087,4087,4087,4088,
4088,4088,4088,4088,4088,4088,4088,4088,
4088,4088,4088,4088,4088,4088,4088,4089,
4089,4089,4089,4089,4089,4089,4089,4089,
4089,4089,4089,4089,4089,4089,4089,4089,
4089,4090,4090,4090,4090,4090,4090,4090,
4090,4090,4090,4090,4090,4090,4090,4090,
4090,4090,4090,4090,4090,4090,4090,4091,
4091,4091,4091,4091,4091,4091,4091,4091,
4091,4091,4091,4091,4091,4091,4091,4091,
4091,4091,4091,4091,4091,4091,4091,4091,
4091,4092,4092,4092,4092,4092,4092,4092,
4092,4092,4092,4092,4092,4092,4092,4092,
4092,4092,4092,4092,4092,4092,4092,4092,
4092,4092,4092,4092,4092,4092,4092,4092,
4092,4093,4093,4093,4093,4093,4093,4093,
4093,4093,4093,4093,4093,4093,4093,4093,
4093,4093,4093,4093,4093,4093,4093,4093,
4093,4093,4093,4093,4093,4093,4093,4093,
4093,4093,4093,4093,4093,4093,4093,4093,
4093,4093,4093,4093,4094,4094,4094,4094,
4094,4094,4094,4094,4094,4094,4094,4094,
4094,4094,4094,4094,4094,4094,4094,4094,
4094,4094,4094,4094,4094,4094,4094,4094,
4094,4094,4094,4094,4094,4094,4094,4094,
4094,4094,4094,4094,4094,4094,4094,4094,
4094,4094,4094,4094,4094,4094,4094,4094,
4094,4094,4094,4094,4094,4094,4094,4094,
4094,4094,4094,4094,4094,4095,4095,4095,
4095,4095,4095,4095,4095,4095,4095,4095};
__device__
short lut_tanh[LUT_SIZE] = {
0,32,64,96,128,160,192,224,
256,288,319,351,383,415,446,478,
509,541,572,604,635,666,697,728,
759,790,821,851,882,912,943,973,
1003,1033,1063,1093,1123,1152,1181,1211,
1240,1269,1298,1326,1355,1383,1412,1440,
1468,1496,1523,1551,1578,1605,1632,1659,
1686,1712,1739,1765,1791,1817,1842,1868,
1893,1918,1943,1968,1992,2016,2041,2064,
2088,2112,2135,2158,2181,2204,2227,2249,
2272,2294,2316,2337,2359,2380,2401,2422,
2443,2463,2484,2504,2524,2543,2563,2582,
2602,2621,2639,2658,2676,2695,2713,2731,
2748,2766,2783,2800,2817,2834,2851,2867,
2883,2899,2915,2931,2946,2962,2977,2992,
3007,3021,3036,3050,3064,3078,3092,3106,
3119,3133,3146,3159,3172,3185,3197,3210,
3222,3234,3246,3258,3270,3281,3293,3304,
3315,3326,3337,3347,3358,3368,3379,3389,
3399,3409,3419,3428,3438,3447,3456,3466,
3475,3483,3492,3501,3510,3518,3526,3535,
3543,3551,3559,3566,3574,3582,3589,3596,
3604,3611,3618,3625,3632,3639,3645,3652,
3659,3665,3671,3678,3684,3690,3696,3702,
3707,3713,3719,3724,3730,3735,3741,3746,
3751,3756,3761,3766,3771,3776,3781,3786,
3790,3795,3799,3804,3808,3812,3817,3821,
3825,3829,3833,3837,3841,3845,3848,3852,
3856,3859,3863,3867,3870,3873,3877,3880,
3883,3887,3890,3893,3896,3899,3902,3905,
3908,3911,3913,3916,3919,3922,3924,3927,
3929,3932,3934,3937,3939,3942,3944,3946,
3949,3951,3953,3955,3957,3960,3962,3964,
3966,3968,3970,3972,3973,3975,3977,3979,
3981,3983,3984,3986,3988,3989,3991,3993,
3994,3996,3997,3999,4000,4002,4003,4005,
4006,4007,4009,4010,4011,4013,4014,4015,
4016,4018,4019,4020,4021,4022,4024,4025,
4026,4027,4028,4029,4030,4031,4032,4033,
4034,4035,4036,4037,4038,4039,4039,4040,
4041,4042,4043,4044,4044,4045,4046,4047,
4048,4048,4049,4050,4050,4051,4052,4053,
4053,4054,4055,4055,4056,4056,4057,4058,
4058,4059,4059,4060,4061,4061,4062,4062,
4063,4063,4064,4064,4065,4065,4066,4066,
4067,4067,4067,4068,4068,4069,4069,4070,
4070,4070,4071,4071,4072,4072,4072,4073,
4073,4073,4074,4074,4074,4075,4075,4075,
4076,4076,4076,4077,4077,4077,4078,4078,
4078,4078,4079,4079,4079,4079,4080,4080,
4080,4080,4081,4081,4081,4081,4082,4082,
4082,4082,4082,4083,4083,4083,4083,4084,
4084,4084,4084,4084,4084,4085,4085,4085,
4085,4085,4085,4086,4086,4086,4086,4086,
4086,4087,4087,4087,4087,4087,4087,4087,
4088,4088,4088,4088,4088,4088,4088,4088,
4089,4089,4089,4089,4089,4089,4089,4089,
4089,4090,4090,4090,4090,4090,4090,4090,
4090,4090,4090,4090,4091,4091,4091,4091,
4091,4091,4091,4091,4091,4091,4091,4091,
4091,4092,4092,4092,4092,4092,4092,4092,
4092,4092,4092,4092,4092,4092,4092,4092,
4092,4093,4093,4093,4093,4093,4093,4093,
4093,4093,4093,4093,4093,4093,4093,4093,
4093,4093,4093,4093,4093,4093,4093,4094,
4094,4094,4094,4094,4094,4094,4094,4094,
4094,4094,4094,4094,4094,4094,4094,4094,
4094,4094,4094,4094,4094,4094,4094,4094,
4094,4094,4094,4094,4094,4094,4094,4095,
4095,4095,4095,4095,4095,4095,4095,4095,
4095,4095,4095,4095,4095,4095,4095,4095,
4095,4095,4095,4095,4095,4095,4095,4095,
4095,4095,4095,4095,4095,4095,4095,4095,
4095,4095,4095,4095,4095,4095,4095,4095,
4095,4095,4095,4095,4095,4095,4095,4095,
4095,4095,4095,4095,4095,4095,4095,4095,
4095,4095,4095,4095,4095,4095,4095,4095,
4095,4095,4095,4095,4095,4095,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096,
4096,4096,4096,4096,4096,4096,4096,4096};
__device__
void lstm_n5_o1(int input[SAMPLE_TEST_LEN], short output[SAMPLE_TEST_LEN])
{
int i, j, t;
float inW[4][5] = {
-0.00902497, 0.0130347, -0.305604, 0.0103134, -0.00143173,
-0.00892103, -0.00877193, -0.0158959, -0.00261989, 0.00238156,
-1.17159, -1.05888, -0.0252563, 1.32337, 0.896013,
0.112793, 0.107382, 0.459561, 0.112837, -0.0858938};
float intW[4][5][5] = {
{{0.01465, 0.017885, 0.00462623, -0.00366126, 0.00414583},
{0.00709106, 0.00612325, 0.00509018, 0.00629193, -0.00820282},
{0.0594903, 0.0594652, 0.0879106, -0.202968, 0.146663},
{0.0173266, -0.00258213, -0.00156304, -0.0161799, 0.0206139},
{0.00378391, 0.0190192, 0.0140174, 0.0183843, -0.00042357}},
{{-0.007224, -2.52633e-05, -0.00375626, 0.0171819, -0.0146835},
{0.0095475, 0.0111485, 0.00723207, -0.00279432, -0.00130744},
{-0.00358937, -0.0211212, -0.0445563, -0.0203464, 0.0123881},
{-0.00648264, -0.00841806, 0.00112013, 0.00435087, -0.0138258},
{0.00533612, -0.00909088, 0.00789575, 0.00117046, 0.00834566}},
{{0.74772, 0.635634, 0.730541, -1.11435, 0.814002},
{0.623608, 0.53032, 0.652992, -1.01461, 0.768323},
{0.120079, 0.113368, 0.0824013, -0.000308211, -0.0182162},
{-1.28265, -1.18123, -0.480213, 0.984297, -0.576107},
{-1.00799, -0.944089, -0.355751, 0.536079, -0.27723}},
{{0.0134795, 0.0447042, 0.015088, 0.0920375, -0.0777375},
{0.0384587, 0.0330071, 0.0205698, 0.0858556, -0.0671409},
{-0.63912, -0.570696, -0.0891825, 0.706698, -0.5},
{0.0172945, 0.0240723, 0.00149645, 0.0341813, -0.0418003},
{-0.0122831, -0.0280598, -0.00341253, -0.0265756, 0.0246845}}
};
float intB[4][5] = {
0.016, 0.0139732, -0.183891, 0.0139634, 0.00864378,
5.00094, 5.00059, 4.97023, 5.0002, 5.00032,
0.0676543, -0.0445895, 0.248995, -0.978814, -1.0258,
0.204404, 0.190113, -0.156202, 0.219446, -0.179526};
float outW[5] = {-0.4272, -0.33769, 0.167592, 0.50495, -0.502329};
float outB = -0.0394433;
short inWF[4][5] = {0};
short intWF[4][5][5] = {0};
short intBF[4][5] = {0};
short outWF[5] = {0};
short outBF = 0;
for (i = 0; i < 4; ++i) {
for (j = 0; j < 5; ++j) {
inWF[i][j] = (short) (inW[i][j] * SCALER);
}
}
for (i = 0; i < 4; ++i) {
for (j = 0; j < 5; ++j) {
for (t = 0; t < 5; ++t) {
intWF[i][j][t] = (short) (intW[i][j][t] * SCALER);
}
}
}
for (i = 0; i < 4; ++i) {
for (j = 0; j < 5; ++j) {
intBF[i][j] = (short) (intB[i][j] * SCALER);
}
}
for (i = 0; i < 5; ++i) {
outWF[i] = (short) (outW[i] * SCALER);
}
outBF = (short) (outB * SCALER);
short h_stateF[5] = {0};
short c_stateF[5] = {0};
short i_stateF[5] = {0};
short f_stateF[5] = {0};
short o_stateF[5] = {0};
short g_stateF[5] = {0};
short sampleinput_16b;
for (t = 0; t < SAMPLE_TEST_LEN; ++t) {
sampleinput_16b = (short) (input[t] + 120000) * 256 / 1875;
for (j = 0; j < 5; ++j) {
i_stateF[j] = (inWF[0][j] * sampleinput_16b) >> 15;
for (i = 0; i < 5; ++i)
i_stateF[j] += ((h_stateF[i] * intWF[0][j][i]) >> 12);
i_stateF[j] += intBF[0][j];
i_stateF[j] = i_stateF[j] >> 5;
if (i_stateF[j] >= LUT_SIZE)
i_stateF[j] = 4095;
else if (i_stateF[j] >= 0)
i_stateF[j] = lut_sigmoid[i_stateF[j]];
else if (i_stateF[j] > -LUT_SIZE)
i_stateF[j] = 4096 - lut_sigmoid[-i_stateF[j]];
else
i_stateF[j] = 1;
}
for (j = 0; j < 5; ++j) {
f_stateF[j] = (inWF[1][j] * sampleinput_16b) >> 15;
for (i = 0; i < 5; ++i)
f_stateF[j] += ((h_stateF[i] * intWF[1][j][i]) >> 12);
f_stateF[j] += intBF[1][j];
f_stateF[j] = f_stateF[j] >> 5;
if (f_stateF[j] >= LUT_SIZE)
f_stateF[j] = 4095;
else if (f_stateF[j] >= 0)
f_stateF[j] = lut_sigmoid[f_stateF[j]];
else if (f_stateF[j] > -LUT_SIZE)
f_stateF[j] = 4096 - lut_sigmoid[-f_stateF[j]];
else
f_stateF[j] = 1;
}
for (j = 0; j < 5; ++j) {
o_stateF[j] = (inWF[2][j] * sampleinput_16b) >> 15;
for (i = 0; i < 5; ++i)
o_stateF[j] += ((h_stateF[i] * intWF[2][j][i]) >> 12);
o_stateF[j] += intBF[2][j];
o_stateF[j] = o_stateF[j] >> 5;
if (o_stateF[j] >= LUT_SIZE)
o_stateF[j] = 4095;
else if (o_stateF[j] >= 0)
o_stateF[j] = lut_sigmoid[o_stateF[j]];
else if (o_stateF[j] > -LUT_SIZE)
o_stateF[j] = 4096 - lut_sigmoid[-o_stateF[j]];
else
o_stateF[j] = 1;
}
for (j = 0; j < 5; ++j) {
g_stateF[j] = (inWF[3][j] * sampleinput_16b) >> 15;
for (i = 0; i < 5; ++i)
g_stateF[j] += ((h_stateF[i] * intWF[3][j][i]) >> 12);
g_stateF[j] += intBF[3][j];
g_stateF[j] = g_stateF[j] >> 5;
if (g_stateF[j] >= LUT_SIZE)
g_stateF[j] = 4096;
else if (g_stateF[j] >= 0)
g_stateF[j] = lut_tanh[g_stateF[j]];
else if (g_stateF[j] > -LUT_SIZE)
g_stateF[j] = -lut_tanh[-g_stateF[j]];
else
g_stateF[j] = -4096;
}
for (j = 0; j < 5; ++j) {
c_stateF[j] = (((c_stateF[j] * f_stateF[j]) >> 8) + ((g_stateF[j] * i_stateF[j]) >> 12)) >> 4;
h_stateF[j] = c_stateF[j] >> 1;
if (h_stateF[j] >= LUT_SIZE)
h_stateF[j] = 4096;
else if (h_stateF[j] >= 0)
h_stateF[j] = lut_tanh[h_stateF[j]];
else if (h_stateF[j] > -LUT_SIZE)
h_stateF[j] = -lut_tanh[-h_stateF[j]];
else
h_stateF[j] = -4096;
h_stateF[j] = (h_stateF[j] * o_stateF[j]) >> 12;
}
output[t] = outBF;
for (j = 0; j < 5; ++j)
output[t] += ((h_stateF[j] * outWF[j]) >> 12);
}
}
__device__
void lstm_n5_o2(int input[SAMPLE_TEST_LEN], short output[SAMPLE_TEST_LEN])
{
int i, j, t;
float inW[4][5] = {
-0.133907, 0.0967799, -0.0249856, -0.0482016, 0.000138663,
-0.0025821, -0.0107074, -0.0135626, -0.0265616, -0.00990482,
0.0279149, 0.29944, 0.00367669, -0.0406378, -0.122106,
0.305937, -1.54966, 0.108542, -0.086096, -0.278674};
float intW[4][5][5] = {
{{0.0465599, -0.0784586, 0.0703757, -0.0961503, 0.103885},
{0.137839, 0.0785531, 0.172321, 0.00198996, 0.115174},
{0.0896546, -0.00207286, 0.0280649, 0.0300854, 0.0549556},
{0.0952124, 0.011873, 0.0253059, -0.00619738, 0.10025},
{-0.0796523, -0.0310471, 0.0336561, -0.0999846, -0.00944991}},
{{-0.00558139, -0.0249531, -0.0196812, -0.0283953, -0.00538974},
{0.0124158, 0.00739093, 0.00918819, -0.00951965, 0.00634635},
{-0.008908, 0.0113348, -0.00387874, 0.00339979, -0.000628876},
{-0.00832763, 0.0040069, 0.00346749, -0.0256792, 0.00539768},
{0.00337389, -0.0148225, -0.0283464, 0.00277652, 0.000571859}},
{{-0.00736941, 0.0578041, 0.141176, 0.00565979, -0.079775},
{-0.140356, 0.0521767, 0.0813636, -0.0342324, -0.0847605},
{0.0534741, 0.0335436, 0.0464466, 0.0670157, 0.0266309},
{0.0142565, -0.0397183, -0.0116136, -0.0507669, 0.0575363},
{-0.0518841, 0.0358612, 0.0333015, -0.119254, 0.0368938}},
{{-0.40111, 1.17447, 0.172804, 0.197255, 0.0786499},
{-0.307048, -0.923395, -0.362905, 0.194527, -0.438387},
{-0.671133, 0.728081, -0.520196, 0.0108215, -0.139992},
{-0.600645, 0.151967, 0.0101909, -0.235608, -0.367466},
{0.262652, 0.84919, -0.131239, 0.0756875, -0.261777}}
};
float intB[4][5] = {
-0.0421559, 0.246112, 0.0348797, -0.0619016, 0.0988568,
4.98184, 4.97131, 4.98673, 4.97446, 4.96925,
0.255813, 0.527195, 0.120779, -0.0979445, 0.02733,
0.0091722, 0.551458, -0.0521645, 0.0113755, 0.2287};
float outW[5] = {-0.592906, 0.576557, -0.38704, 0.0146919, -0.35076};
float outB = -0.0191289;
short inWF[4][5] = {0};
short intWF[4][5][5] = {0};
short intBF[4][5] = {0};
short outWF[5] = {0};
short outBF = 0;
for (i = 0; i < 4; ++i) {
for (j = 0; j < 5; ++j) {
inWF[i][j] = (short) (inW[i][j] * SCALER);
}
}
for (i = 0; i < 4; ++i) {
for (j = 0; j < 5; ++j) {
for (t = 0; t < 5; ++t) {
intWF[i][j][t] = (short) (intW[i][j][t] * SCALER);
}
}
}
for (i = 0; i < 4; ++i) {
for (j = 0; j < 5; ++j) {
intBF[i][j] = (short) (intB[i][j] * SCALER);
}
}
for (i = 0; i < 5; ++i) {
outWF[i] = (short) (outW[i] * SCALER);
}
outBF = (short) (outB * SCALER);
short h_stateF[5] = {0};
short c_stateF[5] = {0};
short i_stateF[5] = {0};
short f_stateF[5] = {0};
short o_stateF[5] = {0};
short g_stateF[5] = {0};
short sampleinput_16b;
for (t = 0; t < SAMPLE_TEST_LEN; ++t) {
sampleinput_16b = (short) (input[t] + 120000) * 256 / 1875;
for (j = 0; j < 5; ++j) {
i_stateF[j] = (inWF[0][j] * sampleinput_16b) >> 15;
for (i = 0; i < 5; ++i)
i_stateF[j] += ((h_stateF[i] * intWF[0][j][i]) >> 12);
i_stateF[j] += intBF[0][j];
i_stateF[j] = i_stateF[j] >> 5;
if (i_stateF[j] >= LUT_SIZE)
i_stateF[j] = 4095;
else if (i_stateF[j] >= 0)
i_stateF[j] = lut_sigmoid[i_stateF[j]];
else if (i_stateF[j] > -LUT_SIZE)
i_stateF[j] = 4096 - lut_sigmoid[-i_stateF[j]];
else
i_stateF[j] = 1;
}
for (j = 0; j < 5; ++j) {
f_stateF[j] = (inWF[1][j] * sampleinput_16b) >> 15;
for (i = 0; i < 5; ++i)
f_stateF[j] += ((h_stateF[i] * intWF[1][j][i]) >> 12);
f_stateF[j] += intBF[1][j];
f_stateF[j] = f_stateF[j] >> 5;
if (f_stateF[j] >= LUT_SIZE)
f_stateF[j] = 4095;
else if (f_stateF[j] >= 0)
f_stateF[j] = lut_sigmoid[f_stateF[j]];
else if (f_stateF[j] > -LUT_SIZE)
f_stateF[j] = 4096 - lut_sigmoid[-f_stateF[j]];
else
f_stateF[j] = 1;
}
for (j = 0; j < 5; ++j) {
o_stateF[j] = (inWF[2][j] * sampleinput_16b) >> 15;
for (i = 0; i < 5; ++i)
o_stateF[j] += ((h_stateF[i] * intWF[2][j][i]) >> 12);
o_stateF[j] += intBF[2][j];
o_stateF[j] = o_stateF[j] >> 5;
if (o_stateF[j] >= LUT_SIZE)
o_stateF[j] = 4095;
else if (o_stateF[j] >= 0)
o_stateF[j] = lut_sigmoid[o_stateF[j]];
else if (o_stateF[j] > -LUT_SIZE)
o_stateF[j] = 4096 - lut_sigmoid[-o_stateF[j]];
else
o_stateF[j] = 1;
}
for (j = 0; j < 5; ++j) {
g_stateF[j] = (inWF[3][j] * sampleinput_16b) >> 15;
for (i = 0; i < 5; ++i)
g_stateF[j] += ((h_stateF[i] * intWF[3][j][i]) >> 12);
g_stateF[j] += intBF[3][j];
g_stateF[j] = g_stateF[j] >> 5;
if (g_stateF[j] >= LUT_SIZE)
g_stateF[j] = 4096;
else if (g_stateF[j] >= 0)
g_stateF[j] = lut_tanh[g_stateF[j]];
else if (g_stateF[j] > -LUT_SIZE)
g_stateF[j] = -lut_tanh[-g_stateF[j]];
else
g_stateF[j] = -4096;
}
for (j = 0; j < 5; ++j) {
c_stateF[j] = (((c_stateF[j] * f_stateF[j]) >> 8) + ((g_stateF[j] * i_stateF[j]) >> 12)) >> 4;
h_stateF[j] = c_stateF[j] >> 1;
if (h_stateF[j] >= LUT_SIZE)
h_stateF[j] = 4096;
else if (h_stateF[j] >= 0)
h_stateF[j] = lut_tanh[h_stateF[j]];
else if (h_stateF[j] > -LUT_SIZE)
h_stateF[j] = -lut_tanh[-h_stateF[j]];
else
h_stateF[j] = -4096;
h_stateF[j] = (h_stateF[j] * o_stateF[j]) >> 12;
}
output[t] = outBF;
for (j = 0; j < 5; ++j)
output[t] += ((h_stateF[j] * outWF[j]) >> 12);
}
}
__global__
void lstm_task(int n, int *x, short *y1, short *y2)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
//if (i < n) y[i] = a*x[i] + y[i];
int i;
int sampleinput[SAMPLE_TEST_LEN];
short test_out1[SAMPLE_TEST_LEN];
short test_out2[SAMPLE_TEST_LEN];
if (idx < n)
{
for (i = 0; i < SAMPLE_TEST_LEN; ++i)
{
sampleinput[i] = x[idx * SAMPLE_TEST_LEN + i];
}
lstm_n5_o1(sampleinput, test_out1);
lstm_n5_o2(sampleinput, test_out2);
for (i = 0; i < SAMPLE_TEST_LEN; ++i)
{
y1[idx * SAMPLE_TEST_LEN + i] = test_out1[i];
y2[idx * SAMPLE_TEST_LEN + i] = test_out2[i];
}
}
}
int main(void)
{
int N = 1<<19;
int *x, *d_x;
short *y1, *y2, *d_y1, *d_y2;
FILE *ifp, *ofp;
struct timeval t1, t2, tr;
int i, j;
int sampleinput[SAMPLE_TEST_LEN];
short test_out1[SAMPLE_TEST_LEN];
short test_out2[SAMPLE_TEST_LEN];
// Read in sample input from "converted-lstm-in.txt" file
if (!(ifp = fopen("converted-lstm-in.txt", "r"))) {
printf("File converted-lstm-in.txt cannot be opened for read.\n");
return -1;
}
for (i = 0; i < SAMPLE_TEST_LEN; ++i) {
fscanf(ifp, "%d", &sampleinput[i]);
}
fclose(ifp);
// Open output.txt for output data write back.
if (!(ofp = fopen("output.txt", "w"))) {
printf("File output.txt cannot be opened for write.\n");
return -1;
}
x = (int*)malloc(N * SAMPLE_TEST_LEN * sizeof(int));
y1 = (short*)malloc(N * SAMPLE_TEST_LEN * sizeof(short));
y2 = (short*)malloc(N * SAMPLE_TEST_LEN * sizeof(short));
cudaMalloc(&d_x, N * SAMPLE_TEST_LEN * sizeof(int));
cudaMalloc(&d_y1, N * SAMPLE_TEST_LEN * sizeof(short));
cudaMalloc(&d_y2, N * SAMPLE_TEST_LEN * sizeof(short));
for (i = 0; i < N; i++) {
for (j = 0; j < SAMPLE_TEST_LEN; ++j) {
x[i * SAMPLE_TEST_LEN + j] = sampleinput[j];
}
}
for (int k = 9; k < 10; ++k)
{
gettimeofday(&t1, NULL);
for (j = 0; j < k; ++j)
{
cudaMemcpy(d_x, x, N * SAMPLE_TEST_LEN * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_y1, y1, N * SAMPLE_TEST_LEN * sizeof(short), cudaMemcpyHostToDevice);
cudaMemcpy(d_y2, y2, N * SAMPLE_TEST_LEN * sizeof(short), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
lstm_task<<<(N+63)/64, 64>>>(N, d_x, d_y1, d_y2);
cudaMemcpy(y1, d_y1, N * SAMPLE_TEST_LEN * sizeof(short), cudaMemcpyDeviceToHost);
cudaMemcpy(y2, d_y2, N * SAMPLE_TEST_LEN * sizeof(short), cudaMemcpyDeviceToHost);
}
gettimeofday(&t2, NULL);
timersub(&t1, &t2, &tr);
printf("Round %d Excute time: %.2f sec\n", k, -tr.tv_sec-(double)tr.tv_usec/1000000.0);
}
for (i = 0; i < SAMPLE_TEST_LEN; ++i) {
test_out1[i] = y1[10000 * SAMPLE_TEST_LEN + i];
test_out2[i] = y2[10000 * SAMPLE_TEST_LEN + i];
}
for (i = 0; i < SAMPLE_TEST_LEN; ++i)
fprintf(ofp, "%d,%d\n", test_out1[i], test_out2[i]);
fclose(ofp);
cudaFree(d_x);
cudaFree(d_y1);
cudaFree(d_y2);
free(x);
free(y1);
free(y2);
}
|
3,591 | #include<stdio.h>
#include<sys/time.h>
#include<stdlib.h>
#include<iostream>
using namespace std;
//----------------------------------- Structures and Globals---------------------------------------------
typedef struct {
int width;
int height;
} ArrayMetadata2D;
// metadata variables describing dimensionalities of all data structures involved in the computation
ArrayMetadata2D A_MD, B_MD, C_MD;
// pointers for input and output arrays in the host memory
float *A, *B, *C, *C_CPU;
// pointers for input and output arrays in the device memory (NVIDIA DRAM)
float *A_GPU, *B_GPU, *C_GPU;
//----------------------------------- host function definitions -----------------------------------------
void allocateAndInitializeAB();
void computeCpuMMM();
void copyMatricesToGPU();
void copyResultFromGPU();
void compareHostAndGpuOutput();
void die(const char *error);
void check_error(cudaError e);
__global__ void kernel(float * A_GPU, float * B_GPU, float * C_GPU, ArrayMetadata2D A_gpu_md, ArrayMetadata2D B_gpu_md);
//----------------------------------- CUDA function definitions -----------------------------------------
#define BLOCK_SIZE 10
//-------------------------------------------------------------------------------------------------------
int main(int argc, char **argv) {
A_MD.width = (argc > 1) ? atoi(argv[1]) : 100;
A_MD.height = (argc > 2) ? atoi(argv[2]) : A_MD.width;
B_MD.width = (argc > 3) ? atoi(argv[3]) : A_MD.height;
B_MD.height = (argc > 4) ? atoi(argv[4]) : B_MD.width;
C_MD.width = A_MD.width;
C_MD.height = B_MD.height;
printf("Matrix A is %d-by-%d\n", A_MD.width, A_MD.height);
printf("Matrix B is %d-by-%d\n", B_MD.width, B_MD.height);
printf("Matrix C is %d-by-%d\n", C_MD.width, C_MD.height);
allocateAndInitializeAB();
// matrix matrix multiplication in the CPU
clock_t start = clock();
computeCpuMMM();
clock_t end = clock();
double elapsed = (end - start) / (double) CLOCKS_PER_SEC;
printf("Computation time in the CPU: %f seconds\n", elapsed);
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
// width of b / BS, height of A / BS
dim3 dimGrid(B_MD.width/ BLOCK_SIZE, A_MD.height / BLOCK_SIZE);
clock_t start_gpu = clock();
clock_t start_copy = clock();
copyMatricesToGPU();
clock_t stop_copy = clock();
clock_t start_calc = clock();
kernel <<< dimGrid, dimBlock >>> (A_GPU, B_GPU, C_GPU, A_MD, B_MD);
clock_t stop_calc = clock();
// num blocks, num threads/block
clock_t start_copy2 = clock();
copyResultFromGPU();
clock_t stop_copy2 = clock();
clock_t end_gpu = clock();
double elapsed_gpu = (end_gpu - start_gpu) / (double) CLOCKS_PER_SEC;
printf("Total computation time: %f seconds\n", elapsed_gpu);
printf("Copy to GPU time : %f seconds\n", (stop_copy - start_copy)/ (double) CLOCKS_PER_SEC);
printf("Computation time : %f seconds\n", (stop_calc - start_calc)/ (double) CLOCKS_PER_SEC);
printf("Copy from GPU time : %f seconds\n", (stop_copy2 - start_copy2)/ (double) CLOCKS_PER_SEC);
compareHostAndGpuOutput();
printf("Speedup %f\n", (elapsed/elapsed_gpu));
return 0;
}
// allocate and initialize A and B using a random number generator
void allocateAndInitializeAB() {
size_t sizeofA = A_MD.width * A_MD.height * sizeof(float);
A = (float*) malloc(sizeofA);
srand(time(NULL));
for (int i = 0; i < A_MD.width; i++) {
for (int j = 0; j < A_MD.height; j++) {
int index = i * A_MD.height + j;
A[index] = (rand() % 1000) * 0.001;
}
}
size_t sizeofB = B_MD.width * B_MD.height * sizeof(float);
B = (float*) malloc(sizeofB);
for (int i = 0; i < B_MD.width; i++) {
for (int j = 0; j < B_MD.height; j++) {
int index = i * B_MD.height + j;
B[index] = (rand() % 1000) * 0.001;
}
}
}
// allocate memory in the GPU for all matrices, and copy A and B content from the host CPU memory to the GPU memory
void copyMatricesToGPU() {
size_t sizeofA = A_MD.width * A_MD.height * sizeof(float);
check_error(cudaMalloc((void **) &A_GPU, sizeofA));
check_error(cudaMemcpy(A_GPU, A, sizeofA, cudaMemcpyHostToDevice));
size_t sizeofB = B_MD.width * B_MD.height * sizeof(float);
check_error(cudaMalloc((void **) &B_GPU, sizeofB));
check_error(cudaMemcpy(B_GPU, B, sizeofB, cudaMemcpyHostToDevice));
size_t sizeofC = C_MD.width * C_MD.height * sizeof(float);
check_error(cudaMalloc((void **) &C_GPU, sizeofC));
}
// copy results from C_GPU which is in GPU card memory to C_CPU which is in the host CPU for result comparison
void copyResultFromGPU() {
size_t sizeofC = C_MD.width * C_MD.height * sizeof(float);
C_CPU = (float*) malloc(sizeofC);
check_error(cudaMemcpy(C_CPU, C_GPU, sizeofC, cudaMemcpyDeviceToHost));
}
// do a straightforward matrix-matrix multiplication in the CPU
// notice that this implementation can be massively improved in the CPU by doing proper cache blocking but we are
// not providing you the efficient CPU implementation as that reveals too much about the ideal GPU implementation
void computeCpuMMM() {
// allocate the result matrix for the CPU computation
size_t sizeofC = C_MD.width * C_MD.height * sizeof(float);
C = (float*) malloc(sizeofC);
// compute C[i][j] as the sum of A[i][k] * B[k][j] for all columns k of A
for (int i = 0; i < A_MD.width; i++) {
int a_i = i * A_MD.height;
int c_i = i * C_MD.height;
for (int j = 0; j < B_MD.height; j++) {
int c_index = c_i + j;
C[c_index] = 0;
for (int k = 0; k < B_MD.width; k++) {
int a_index = a_i + k;
int b_index = k * B_MD.height + j;
C[c_index] += A[a_index] * B[b_index];
}
}
}
}
__global__ void kernel(float * A_GPU, float * B_GPU, float * C_GPU, ArrayMetadata2D A_gpu_md, ArrayMetadata2D B_gpu_md) {
////////////////////////////////////
// Marcus's idea of how it should work
const int blockY = blockIdx.y; // the global block indexes
const int blockX = blockIdx.x;
// Get the reference to C starting at the row and column
// Essentially this is the whole block
// I've probably fixed up the index
float * C_block = &C_GPU[blockY * BLOCK_SIZE * A_gpu_md.height + blockX * BLOCK_SIZE];
const int threadY = threadIdx.y; // valued from 0:blocksize-1
const int threadX = threadIdx.x; // valued from 0:blocksize-1
// Th value we're going to shove into the final array
volatile float my_final_value = 0.0f;
// Let's loop over each block!
for (int i = 0; i < A_gpu_md.height / BLOCK_SIZE; i++) {
// Get the pointer to the block where A starts, for this iteration
float * A_block = &A_GPU[blockY * BLOCK_SIZE * A_gpu_md.height + i * BLOCK_SIZE ];
float * B_block = &B_GPU[i * BLOCK_SIZE * B_gpu_md.height + blockX * BLOCK_SIZE];
// Here's all the shared memory we'll need
__shared__ float sharedA[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float sharedB[BLOCK_SIZE][BLOCK_SIZE];
// Fill in that shared array with my column
sharedA[threadY][threadX] = A_block[threadY*A_gpu_md.width + threadX];
sharedB[threadY][threadX] = B_block[threadY*B_gpu_md.height + threadX];
// Wait till they've all loaded into shared
__syncthreads();
// Sum up all the elements that go from 0:BLOCKSIZE
// So the row of A and the column of B for 0 to BLOCKSIZE
for (int j = 0; j < BLOCK_SIZE; j++) my_final_value += sharedA[threadY][j] * sharedB[j][threadX];
// Wait until everyone's calculated
__syncthreads();
}
// Shove it back into the array
C_block[threadY * B_gpu_md.width + threadX] = my_final_value;
//
////////////////////////////////////////////////
/*
int srow = 0;
int scol = 0;
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
const int sizeOfWork = 10;
const int sizeOfBlock = 100;
// Where to start in the GPU matrix
int mIndex = threadId * sizeOfWork;
// copy the submatrix into shared memory
__shared__ float blockA[10][10];
__shared__ float blockB[10][10];
int blockIndex = mIndex;
for (int i = 0; i < sizeOfBlock; i++) {
blockA[srow][scol] = A_GPU[blockIndex];
blockB[srow][scol] = B_GPU[blockIndex];
// Jump a row when finished copying column
if (i == sizeOfWork) {
srow++;
blockIndex *= sizeOfWork;
}
scol++;
}
// Compute a partial row of C
int aRow = threadId;
int cIndex = mIndex;
// TODO: Transpose B for better load times
// Will need to switch order to keep coalesced
// Multiply a row of A
for (int aCol = 0; aCol < sizeOfWork; aCol++) {
// with each column of B
for (int bCol = 0; bCol < sizeOfWork; bCol++) {
float cell = 1;
for (int bRow = 0; bRow < sizeOfWork; bRow++) {
cell += (blockA[aRow][aCol] * blockB[bRow][bCol]);
}
// Store the result in C
C_GPU[cIndex] = cell;
cIndex++;
}
}
*/
}
// function to determine if the GPU computation is done correctly by comparing the output from the GPU with that
// from the CPU
void compareHostAndGpuOutput() {
int totalElements = C_MD.width * C_MD.height;
int missmatchCount = 0;
for (int i = 0; i < totalElements; i++) {
if (fabs(C[i] - C_CPU[i]) > 0.01) {
missmatchCount++;
//printf("mismatch at index %i: %f\t%f\n", i, C[i], C_CPU[i]);
} else {
//printf("match at index %i: %f\t%f\n", i, C[i], C_CPU[i]);
}
}
if (missmatchCount > 0) {
printf("Computation is incorrect: outputs do not match in %d indexes\n", missmatchCount);
} else {
printf("Computation is correct: CPU and GPU outputs match\n");
}
}
// Prints the specified error message and then exits
void die(const char *error) {
printf("%s", error);
exit(1);
}
// If the specified error code refers to a real error, report it and quit the program
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
|
3,592 | #include "includes.h"
#define RED 2
#define GREEN 1
#define BLUE 0
using namespace std;
extern "C"
__global__ void img2gray(unsigned char *imageInput, int width, int height, unsigned char *imageOutput){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if((row < height) && (col < width)){
imageOutput[row*width+col] = imageInput[(row*width+col)*3+RED]*0.299 + imageInput[(row*width+col)*3+GREEN]*0.587 \
+ imageInput[(row*width+col)*3+BLUE]*0.114;
}
} |
3,593 | /*
Implementing Sparse Matrix Vector multiplication (SpMV) in CUDA with one thread per row.
The matrix uses a Compressed Sparse Row (CSR) representation.
*/
#include <stdio.h>
#define MATRIX_SIZE 4
#define NON_ZERO_ELEMENTS 6
#define NUM_BLOCKS 2
#define NUM_THREADS 2
void print_vector(int* array){
for(unsigned int i = 0; i < MATRIX_SIZE; i++){
printf("%d ", array[i]);
}
}
__global__ void spmv(const int* value, const int* index, const int* row_ptr, int* d_in, int* d_out){
const unsigned int row = threadIdx.x + blockIdx.x * blockDim.x;
if(row < MATRIX_SIZE){
int start_row = row_ptr[row];
int end_row;
// Fixing special case for the last row
if(row == MATRIX_SIZE - 1){
end_row = NON_ZERO_ELEMENTS;
}else{
end_row = row_ptr[row + 1];
}
int sum = 0;
for(unsigned int i = start_row; i < end_row; i++){
sum += value[i] * d_in[index[i]];
}
d_out[row] = sum;
}
}
int main(){
const unsigned int MATRIX_BYTES = MATRIX_SIZE * sizeof(int);
const unsigned int ELEMENT_BYTES = NON_ZERO_ELEMENTS * sizeof(int);
// CSR definition
int h_value[NON_ZERO_ELEMENTS] = {1, -3, 2, 5, -2, 7};
int h_index[NON_ZERO_ELEMENTS] = {0, 2, 1, 0, 2, 3};
int h_row_ptr[MATRIX_SIZE] = {0, 2, 3, 5};
int* d_value;
int* d_index;
int* d_row_ptr;
cudaMalloc((void **) &d_value, ELEMENT_BYTES);
cudaMalloc((void **) &d_index, ELEMENT_BYTES);
cudaMalloc((void **) &d_row_ptr, MATRIX_BYTES);
cudaMemcpy(d_value, h_value, ELEMENT_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_index, h_index, ELEMENT_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_row_ptr, h_row_ptr, MATRIX_BYTES, cudaMemcpyHostToDevice);
// The vector
int h_in[MATRIX_SIZE] = {1, 1, 1, 1};
int h_out[MATRIX_SIZE];
int* d_in;
int* d_out;
cudaMalloc((void **) &d_in, MATRIX_BYTES);
cudaMalloc((void **) &d_out, MATRIX_BYTES);
cudaMemcpy(d_in, h_in, MATRIX_BYTES, cudaMemcpyHostToDevice);
spmv<<<NUM_BLOCKS, NUM_THREADS>>>(d_value, d_index, d_row_ptr, d_in, d_out);
cudaMemcpy(h_out, d_out, MATRIX_BYTES, cudaMemcpyDeviceToHost);
print_vector(h_out);
cudaFree(d_value);
cudaFree(d_index);
cudaFree(d_row_ptr);
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
3,594 | #include "includes.h"
/*
Detected 1 CUDA Capable device(s)
Device 0: "GeForce GT 320M"
CUDA Driver Version / Runtime Version 5.0 / 5.0
CUDA Capability Major/Minor version number: 1.2
Total amount of global memory: 1024 MBytes (1073741824 bytes)
( 3) Multiprocessors x ( 8) CUDA Cores/MP: 24 CUDA Cores
GPU Clock rate: 1100 MHz (1.10 GHz)
Memory Clock rate: 790 Mhz
Memory Bus Width: 128-bit
Max Texture Dimension Size (x,y,z) 1D=(8192), 2D=(65536,32768), 3D=(2048,2048,2048)
Max Layered Texture Size (dim) x layers 1D=(8192) x 512, 2D=(8192,8192) x 512
Total amount of constant memory: 65536 bytes
Total amount of shared memory per block: 16384 bytes
Total number of registers available per block: 16384
Warp size: 32
Maximum number of threads per multiprocessor: 1024
Maximum number of threads per block: 512
Maximum sizes of each dimension of a block: 512 x 512 x 64
Maximum sizes of each dimension of a grid: 65535 x 65535 x 1
Maximum memory pitch: 2147483647 bytes
Texture alignment: 256 bytes
Concurrent copy and kernel execution: Yes with 1 copy engine(s)
Run time limit on kernels: Yes
Integrated GPU sharing Host Memory: No
Support host page-locked memory mapping: Yes
Alignment requirement for Surfaces: Yes
Device has ECC support: Disabled
CUDA Device Driver Mode (TCC or WDDM): WDDM (Windows Display Driver Model)
Device supports Unified Addressing (UVA): No
Device PCI Bus ID / PCI location ID: 2 / 0
Compute Mode:
< Default (multiple host threads can use ::cudaSetDevice() with device simultaneously) >
deviceQuery, CUDA Driver = CUDART, CUDA Driver Version = 5.0, CUDA Runtime Version = 5.0, NumDevs = 1, Device0 = GeForce GT 320M
*/
__global__ void freqencyMethod2(char *d_dat,int len, int *d_freq)
{//·½·¨¶þ£¬ÏȽ«Êý¾ÝÔ×Ó¼Óµ½share memoryÖУ¬È»ºóÔÙÀÛ¼Óµ½ÏÔ´æÉÏ¡£
__shared__ int sfreq[26];//
if(threadIdx.x < 26)
sfreq[threadIdx.x] = 0;////ÏÈÇå¿Õ¡£
__syncthreads();
int gridsize = blockDim.x * gridDim.x;
int pos = 0;
for(int i=threadIdx.x + blockIdx.x*blockDim.x; i< len; i += gridsize)
{
pos = d_dat[i]-'a';
atomicAdd(&sfreq[pos],1);
}
__syncthreads();
if(threadIdx.x<26)///Èç¹ûÏÔ¿¨Ö§³ÖÔ×Ó¼Ó£¬¿ÉÒÔʹÓÃÔ×Ó¼Ó£¬Ö±½Ó¼Óµ½ÏÔ´æÉÏ¡£ÄÇÑù¾ÍûÓеڶþ²½¡£ 1.1¼°ÒÔÉÏÖ§³ÖÈ«¾ÖÏÔ´æµÄ32λÔ×Ó²Ù×÷¡£
atomicAdd(&d_freq[threadIdx.x],sfreq[threadIdx.x]);
} |
3,595 | // ----------------------------------------------------------------------------
// CUDA code to compute minimun distance between n points
//
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <limits>
#include <float.h>
#define MAX_POINTS 1048576
// ----------------------------------------------------------------------------
// Kernel Function to compute distance between all pairs of points
// Input:
// X: X[i] = x-coordinate of the ith point
// Y: Y[i] = y-coordinate of the ith point
// n: number of points
// Output:
// D: D[0] = minimum distance
//
#define block_size 1024
// Working minimum_distance function
__device__ unsigned int finished_blocks = 0;
__global__ void minimum_distance(float * X, float * Y, volatile float * D, int n)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i, z;
float dx, dy, temp_distance;
float minDist = FLT_MAX;
bool isLastBlockDone;
__shared__ float block_local_minimums[block_size];
if(idx < n - 1)
{
for(z = idx + 1; z<n; z++)
{
dx = X[z] - X[idx];
dy = Y[z] - Y[idx];
temp_distance = sqrtf(dx * dx + dy * dy);
if(temp_distance < minDist)
{
minDist = temp_distance;
}
}
block_local_minimums[threadIdx.x] = minDist;
__syncthreads();
// Compute the block local minimum
int largest_index = (n % block_size);
if(largest_index == 0)
{
largest_index = block_size;
}
else
{
if(blockIdx.x != n/block_size)
{
largest_index = block_size;
}
}
for(i = 1; i<largest_index; i *= 2)
{
if(threadIdx.x % (2 * i) == 0 && (threadIdx.x + i) < largest_index - 1)
{
if(block_local_minimums[threadIdx.x] > block_local_minimums[threadIdx.x + i])
{
block_local_minimums[threadIdx.x] = block_local_minimums[threadIdx.x + i];
}
__syncthreads();
}
}
if(threadIdx.x == 0)
{
D[blockIdx.x] = block_local_minimums[0];
int value = atomicInc(&finished_blocks, gridDim.x);
isLastBlockDone = (value == (gridDim.x - 1));
}
// Last thread in the list computes the global minimum and puts it in D[0]
if(isLastBlockDone && threadIdx.x == 0)
{
int num_blocks = n / block_size + (n % block_size != 0);
for(i = 1; i<num_blocks; i++)
{
if(D[0] > D[i])
{
D[0] = D[i];
}
}
}
}
}
// ----------------------------------------------------------------------------
// Host function to compute minimum distance between points
// Input:
// X: X[i] = x-coordinate of the ith point
// Y: Y[i] = y-coordinate of the ith point
// n: number of points
// Output:
// D: minimum distance
//
float minimum_distance_host(float * X, float * Y, int n) {
float dx, dy, Dij, min_distance, min_distance_i;
int i, j;
dx = X[1]-X[0];
dy = Y[1]-Y[0];
min_distance = sqrtf(dx*dx+dy*dy);
for (i = 0; i < n-1; i++) {
for (j = i+1; j < i+2; j++) {
dx = X[j]-X[i];
dy = Y[j]-Y[i];
min_distance_i = sqrtf(dx*dx+dy*dy);
}
for (j = i+1; j < n; j++) {
dx = X[j]-X[i];
dy = Y[j]-Y[i];
Dij = sqrtf(dx*dx+dy*dy);
if (min_distance_i > Dij) min_distance_i = Dij;
}
if (min_distance > min_distance_i) min_distance = min_distance_i;
}
return min_distance;
}
// ----------------------------------------------------------------------------
// Print device properties
void print_device_properties() {
int i, deviceCount;
cudaDeviceProp deviceProp;
cudaGetDeviceCount(&deviceCount);
printf("------------------------------------------------------------\n");
printf("Number of GPU devices found = %d\n", deviceCount);
for ( i = 0; i < deviceCount; ++i ) {
cudaGetDeviceProperties(&deviceProp, i);
printf("[Device: %1d] Compute Capability %d.%d.\n", i, deviceProp.major, deviceProp.minor);
printf(" ... multiprocessor count = %d\n", deviceProp.multiProcessorCount);
printf(" ... max threads per multiprocessor = %d\n", deviceProp.maxThreadsPerMultiProcessor);
printf(" ... max threads per block = %d\n", deviceProp.maxThreadsPerBlock);
printf(" ... max block dimension = %d, %d, %d (along x, y, z)\n",
deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]);
printf(" ... max grid size = %d, %d, %d (along x, y, z)\n",
deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]);
printf(" ... warp size = %d\n", deviceProp.warpSize);
printf(" ... clock rate = %d MHz\n", deviceProp.clockRate/1000);
}
printf("------------------------------------------------------------\n");
}
// ----------------------------------------------------------------------------
// Main program - initializes points and computes minimum distance
// between the points
//
int main(int argc, char* argv[]) {
// Host Data
float * hVx; // host x-coordinate array
float * hVy; // host y-coordinate array
float * hmin_dist; // minimum value on host
// Device Data
float * dVx; // device x-coordinate array
float * dVy; // device x-coordinate array
float * dmin_dist; // minimum value on device
// Device parameters
int MAX_BLOCK_SIZE; // Maximum number of threads allowed on the device
int blocks; // Number of blocks in grid
int threads_per_block; // Number of threads per block
// Timing variables
cudaEvent_t start, stop; // GPU timing variables
struct timespec cpu_start, cpu_stop; // CPU timing variables
float time_array[10];
// Other variables
int i, size, num_points;
float min_distance, sqrtn;
int seed = 0;
// Print device properties
print_device_properties();
// Get device information and set device to use
int deviceCount;
cudaDeviceProp deviceProp;
cudaGetDeviceCount(&deviceCount);
if (deviceCount > 0) {
cudaSetDevice(0);
cudaGetDeviceProperties(&deviceProp, 0);
MAX_BLOCK_SIZE = deviceProp.maxThreadsPerBlock;
} else {
printf("Warning: No GPU device found ... results may be incorrect\n");
}
// Timing initializations
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Check input
if (argc != 2) {
printf("Use: %s <number of points>\n", argv[0]);
exit(0);
}
if ((num_points = atoi(argv[argc-1])) < 2) {
printf("Minimum number of points allowed: 2\n");
exit(0);
}
if ((num_points = atoi(argv[argc-1])) > MAX_POINTS) {
printf("Maximum number of points allowed: %d\n", MAX_POINTS);
exit(0);
}
// Allocate host coordinate arrays
size = num_points * sizeof(float);
hVx = (float *) malloc(size);
hVy = (float *) malloc(size);
hmin_dist = (float *) malloc(size);
// Initialize points
srand48(seed); // UNCOMMENT THIS UNCOMMENT THIS UNCOMMENT THIS!!!
sqrtn = (float) sqrt(num_points);
for (i = 0; i < num_points; i++) {
hVx[i] = sqrtn * (float)drand48();
hVy[i] = sqrtn * (float)drand48();
}
// Allocate device coordinate arrays
cudaMalloc(&dVx, size);
cudaMalloc(&dVy, size);
cudaMalloc(&dmin_dist, size);
// Copy coordinate arrays from host memory to device memory
cudaEventRecord( start, 0 );
cudaMemcpy(dVx, hVx, size, cudaMemcpyHostToDevice);
cudaMemcpy(dVy, hVy, size, cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&(time_array[0]), start, stop);
// Invoke kernel
cudaEventRecord( start, 0 );
// Custom variables to create blocks
int num_blocks = num_points / (block_size) + ((num_points % (block_size)) != 0);
//
// Kernel function invocation
//
minimum_distance<<<num_blocks, block_size>>>(dVx, dVy, dmin_dist, num_points);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&(time_array[1]), start, stop);
// Copy result from device memory to host memory
cudaEventRecord( start, 0 );
cudaMemcpy(hmin_dist, dmin_dist, sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&(time_array[2]), start, stop);
// Compute minimum distance on host to check device computation
clock_gettime(CLOCK_REALTIME, &cpu_start);
min_distance = minimum_distance_host(hVx, hVy, num_points);
clock_gettime(CLOCK_REALTIME, &cpu_stop);
time_array[3] = 1000*((cpu_stop.tv_sec-cpu_start.tv_sec)
+0.000000001*(cpu_stop.tv_nsec-cpu_start.tv_nsec));
// Print results
printf("Number of Points = %d\n", num_points);
printf("GPU Host-to-device = %f ms \n", time_array[0]);
printf("GPU Device-to-host = %f ms \n", time_array[2]);
printf("GPU execution time = %f ms \n", time_array[1]);
printf("CPU execution time = %f ms\n", time_array[3]);
printf("Min. distance (GPU) = %e\n", hmin_dist[0]);
printf("Min. distance (CPU) = %e\n", min_distance);
printf("Relative error = %e\n", fabs(min_distance-hmin_dist[0])/min_distance);
// Free device memory
cudaFree(dVx);
cudaFree(dVy);
cudaFree(dmin_dist);
// Free host memory
free(hVx);
free(hVy);
}
|
3,596 | ////////////////////////////////////////////////////////////////////////////////
// Set Device
////////////////////////////////////////////////////////////////////////////////
void setdevice(void){
// variables
int num_devices;
int device;
cudaGetDeviceCount(&num_devices);
if (num_devices > 1) {
// variables
int max_multiprocessors;
int max_device;
cudaDeviceProp properties;
// initialize variables
max_multiprocessors = 0;
max_device = 0;
for (device = 0; device < num_devices; device++) {
cudaGetDeviceProperties(&properties, device);
if (max_multiprocessors < properties.multiProcessorCount) {
max_multiprocessors = properties.multiProcessorCount;
max_device = device;
}
}
cudaSetDevice(max_device);
}
}
|
3,597 | /*
* JCuda - Java bindings for NVIDIA CUDA driver and runtime API
* http://www.jcuda.org
*
*
* This code is based on the NVIDIA 'reduction' CUDA sample,
* Copyright 1993-2010 NVIDIA Corporation.
*/
#include <math.h>
extern "C"
__global__ void normalize(double *g_idata, double *g_odata, unsigned int n, int maxIndx)
{
double max = g_idata[maxIndx];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
g_odata[i] = exp(g_idata[i] - max);
}
}
extern "C"
__global__ void getTargetIndex(int n, int *index, double *w)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
if(w[i] == 1.0) {index[0] = i;}
}
}
extern "C"
__global__ void mismatch(int n, double* actual, double *target, int *mis)
{
mis[0] = 0;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
if(target[i] >= 0.5 && actual[i] < 0.5) {mis[0] = 1;}
if(target[i] < 0.5 && actual[i] >= 0.5) {mis[0] = 1;}
}
}
extern "C"
__global__ void setTargetIndex(int n, double *w, double *out)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
if(w[i] == 1.0) {out[i] -= 1.0;}
}
}
extern "C"
__global__ void setTargetIndexNormalize(int n, double sum, double *w, double *out, double* output)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
if(w[i] == 1.0) {output[0] = out[i]/sum;}
}
}
extern "C"
__global__ void backwardError(int n, double *actual, double *target, double* out)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
out[i] += (actual[i] - target[i]);
}
}
extern "C"
__global__ void difference(int n, double *actual, double *target, double* out)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
out[i] = (actual[i] - target[i]);
}
}
|
3,598 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
void query_device() {
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0)
printf("No CUDA support devices found!\n");
for (int devNo = 0; devNo < deviceCount; devNo++) {
cudaDeviceProp iProp;
cudaGetDeviceProperties(&iProp, devNo);
printf("Device %d: %s\n", devNo + 1, iProp.name);
printf(" Number of multiprocessors: %d\n", iProp.multiProcessorCount);
printf(" Clock rate: %d\n", iProp.clockRate);
printf(" Compute capability: %d.%d\n", iProp.major, iProp.minor);
printf(" Total amount of global memory: %4.2f KB\n",
(double)(iProp.totalGlobalMem / (1 << 10)));
printf(" Total amount of constant memory: %4.2f KB\n",
(double)(iProp.totalConstMem / (1 << 10)));
printf(" Total amount of shared memory per block: %4.2f KB\n",
(double)(iProp.sharedMemPerBlock / (1 << 10)));
printf(" Total amount of registers available per block: %4.2f KB\n",
(double)(iProp.regsPerBlock / (1 << 10)));
printf(" Total amount of global memory: %4.2f KB\n",
(double)(iProp.totalGlobalMem / (1 << 10)));
printf(" Warp size: %d\n", iProp.warpSize);
printf(" Maximum number of threads per block: %d\n", iProp.maxThreadsPerBlock);
printf(" Maximum number of threads per multiprocessor: %d\n",
iProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of warps per multiprocessor: %d\n",
iProp.warpSize / iProp.maxThreadsPerMultiProcessor);
printf(" Maximum grid size: (%d, %d, %d)\n", iProp.maxGridSize[0],
iProp.maxGridSize[1], iProp.maxGridSize[2]);
printf(" Maximum block size: (%d, %d, %d)\n\n",
iProp.maxThreadsDim[0], iProp.maxThreadsDim[1], iProp.maxThreadsDim[2]);
}
}
int main() {
query_device();
return 0;
} |
3,599 | /* datatools.c - support functions for the matrix examples
*/
#include <float.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
// Using single indexed arrays
void init_matrix(int m, int n, double *A, double value) {
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < n; j++) {
A[i * n + j] = value;
}
}
void print_matrix(int m, int n, double **A) {
int i, j;
printf("----------------------------------\n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
printf("%6.2f, ", A[i * m + j]);
}
printf("\n");
}
printf("----------------------------------\n");
}
void init_vector(int m, double *V) {
int i;
for (i = 0; i < m; i++)
*(V + i) = 1.0;
}
/*
int
check_results(char *comment, int m, int n, double **A) {
double relerr;
double *a = A[0];
double ref = 3.0;
int i, errors = 0;
char *marker;
double TOL = 100.0 * DBL_EPSILON;
double SMALL = 100.0 * DBL_MIN;
if ( (marker=(char *)malloc(m*n*sizeof(char))) == NULL ) {
perror("array marker");
exit(-1);
}
for (i=0; i<m*n; i++)
{
relerr = fabs((a[i]-ref));
if ( relerr <= TOL )
{
marker[i] = ' ';
}
else
{
errors++;
marker[i] = '*';
}
}
if ( errors > 0 )
{
printf("Routine: %s\n",comment);
printf("Found %d differences in results for m=%d n=%d:\n",
errors,m,n);
for (i=0; i<m*n; i++)
printf("\t%c a[%d]=%f ref[%d]=%f\n",marker[i],i,a[i],i,ref);
}
return(errors);
}
*/
/* Routine for allocating two-dimensional array */
/*
double **
malloc_2d(int m, int n)
{
int i;
if (m <= 0 || n <= 0)
return NULL;
double **A = malloc(m * sizeof(double *));
if (A == NULL)
return NULL;
A[0] = malloc(m*n*sizeof(double));
if (A[0] == NULL) {
free(A);
return NULL;
}
for (i = 1; i < m; i++)
A[i] = A[0] + i * n;
return A;
}
*/
void free_2d(double **A) {
free(A[0]);
free(A);
}
|
3,600 | #include <stdio.h>
#define N 578 // size of vectors
#define T 275 //number of threads per block
//Need to show array of a and b
__global__ void vecAdd(int *A ,int *B,int *C){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
int main(int argc, char *argv[]){
int blocks = (N + T - 1) / T;
srand(1234);
int i;
int size = N * sizeof ( int);
int a[N], b[N], c[N], *devA, *devB, *devC;
for (i = 0;i < N; i++){
a[i] = rand() % 100;
b[i] = rand() % 100;
}
cudaMalloc( (void**)&devA, size);
cudaMalloc( (void**)&devB, size);
cudaMalloc( (void**)&devC, size);
cudaMemcpy( devA, a, size, cudaMemcpyHostToDevice);
cudaMemcpy( devB, b, size, cudaMemcpyHostToDevice);
vecAdd<<<blocks,T>>>(devA,devB,devC);
cudaMemcpy( c, devC, size, cudaMemcpyDeviceToHost);
cudaFree( devA);
cudaFree( devB);
cudaFree( devC);
for (i = 0; i < N; i++){
printf("%d ",c[i]);
}
printf("\n");
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.