serial_no
int64 1
24.2k
| cuda_source
stringlengths 11
9.01M
|
|---|---|
2,101
|
#include <stdio.h>
#include <stdlib.h>
__global__ void kernel() {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
printf("Hello from thread.\n");
}
int main(){
int host_a, host_b, host_c;
int *dev_a, *dev_b, *dev_c;
int size = sizeof (int);
cudaMalloc((void**) &dev_a, size);
cudaMalloc((void**) &dev_b, size);
cudaMalloc((void**) &dev_c, size);
host_a = 2;
host_b = 7;
cudaMemcpy(dev_a, &host_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, &host_b, size, cudaMemcpyHostToDevice);
kernel <<< 1, 1 >>> ();
cudaDeviceSynchronize();
cudaMemcpy(&host_c, dev_c, size, cudaMemcpyDeviceToHost);
printf("C = %d \n", host_c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
printf("Hello, CUDA! \n");
}
|
2,102
|
#include "includes.h"
__global__ void matrix_add_matrix(float* mat1, float* mat2, float* mat3, int row, int col, int sign){
int id = blockIdx.x * blockDim.x + threadIdx.x;
int size = row*col;
if(id<size){
mat3[id] = mat1[id] + sign*mat2[id];
}
}
|
2,103
|
#include <stdlib.h>
#include <stdio.h>
__global__ void kernel1(int *a)
{
if(threadIdx.x > 2 )
a[blockIdx.x*blockDim.x + threadIdx.x]=100;
else
a[blockIdx.x*blockDim.x + threadIdx.x]=blockIdx.x;
}
int main() {
int n=20;
int memSize = n*sizeof(int);
int *a, *d_a;
a = (int*) malloc (n*sizeof(*a));
cudaMalloc( (void**) &d_a, memSize);
cudaMemcpy( d_a, a, memSize, cudaMemcpyHostToDevice);
dim3 block(4);
dim3 grid(n/block.x);
kernel1<<<grid,block>>>(d_a);
cudaMemcpy( a, d_a, memSize, cudaMemcpyDeviceToHost);
for(int j=0; j<n; j++)
printf("%d\n",a[j]);
cudaFree(d_a);
free(a);
return 0;
}
|
2,104
|
#include "includes.h"
__global__ void __minImpurityb(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens, int nnodes, int ncats, int nsamps) {}
|
2,105
|
// Device code
// A is assumed to be initialized by an
// initializer port to be uniformly 0.
// the length of the output is determined
// by a metaport.
// Given an input matrix A of length N'>N
// the ptask runtime code for this will
// allocate an output of size N. Output
// should be uniform scalar of size N
extern "C" __global__ void op(float* A, float * B, float scalar, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
B[i] = A[i]+scalar;
}
|
2,106
|
/*
* CUDALEAPINT.CU: program to integrate hamiltonian system using leapfrog
* and CUDA
* Based off of:
* https://courses.physics.ucsd.edu/2020/Winter/physics141/Assignments/leapint.c
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "cudaLeapint.cuh"
int main(int argc, char **argv)
{
/* Declaring Variables */
int n, mstep, nout, nstep;
float massBuffer[MAXPNT];
float rxBuffer[MAXPNT];
float ryBuffer[MAXPNT];
float rzBuffer[MAXPNT];
float vxBuffer[MAXPNT];
float vyBuffer[MAXPNT];
float vzBuffer[MAXPNT];
float* mass = NULL;
float* rx = NULL;
float* ry = NULL;
float* rz = NULL;
float* vx = NULL;
float* vy = NULL;
float* vz = NULL;
float* gm = NULL;
char * nBodyFile;
float tnow, dt;
/* GM Constants in AU^3/Day^2 */
float GMCONST[MAXPNT];
for (int i = 0; i < MAXPNT; i++) {
GMCONST[i] = 0.0;
}
/* Number of astronomical bodies */
if (argc <= 1) {
printf("Usage: %s [N-Body File]\n", argv[0]);
}
else {
nBodyFile = argv[1];
}
/* Parsing through the nBodyFile for celestial bodies */
printf("Reading values in from %s.\n", nBodyFile);
FILE* fp = fopen((const char *) nBodyFile, "r");
if (fp == NULL) {
return 0;
}
int lineNumber = 0;
char buffer[MAXBUFFER];
char * delimiters = " \t";
char * token;
char * s;
while (fgets(buffer, MAXBUFFER, fp)) {
s = buffer;
token = strtok(s, delimiters);
if (token != NULL) {
massBuffer[lineNumber] = atof(token);
}
token = strtok(NULL, delimiters);
if (token != NULL) {
rxBuffer[lineNumber] = atof(token);
}
token = strtok(NULL, delimiters);
if (token != NULL) {
ryBuffer[lineNumber] = atof(token);
}
token = strtok(NULL, delimiters);
if (token != NULL) {
rzBuffer[lineNumber] = atof(token);
}
token = strtok(NULL, delimiters);
if (token != NULL) {
vxBuffer[lineNumber] = atof(token);
}
token = strtok(NULL, delimiters);
if (token != NULL) {
vyBuffer[lineNumber] = atof(token);
}
token = strtok(NULL, delimiters);
if (token != NULL) {
vzBuffer[lineNumber] = atof(token);
}
lineNumber += 1;
}
n = lineNumber;
fclose(fp);
/* Allocating Unified Memory - accessible from CPU or GPU */
cudaSetDevice(0);
cudaMalloc(&mass, n*sizeof(float));
cudaMalloc(&rx, n*sizeof(float));
cudaMalloc(&ry, n*sizeof(float));
cudaMalloc(&rz, n*sizeof(float));
cudaMalloc(&vx, n*sizeof(float));
cudaMalloc(&vy, n*sizeof(float));
cudaMalloc(&vz, n*sizeof(float));
cudaMalloc(&gm, n*sizeof(float));
/* Copying memory to the device */
cudaMemcpy(mass, massBuffer, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(rx, rxBuffer, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(ry, ryBuffer, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(rz, rzBuffer, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(vx, vxBuffer, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(vy, vyBuffer, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(vz, vzBuffer, n*sizeof(float), cudaMemcpyHostToDevice);
/* Setting the gravitational constant for the peripheral bodies */
for (int i = 0; i < n; i++) {
GMCONST[i] = massBuffer[i];
}
cudaMemcpy(gm, GMCONST, n*sizeof(float), cudaMemcpyHostToDevice);
/* Setting initial time */
tnow = 0.0;
/* next, set integration parameters */
mstep = 700; /* number of steps to take */
nout = 1; /* steps between outputs */
dt = 0.02; /* timestep for integration */
/* Checking to see if n is a multiple of nThreads*deviceCount
If not, then round down
(As seen in David's code) */
int nParticles = nThreads * int (float(n) / (nThreads));
if (nParticles != n) {
n = nParticles;
}
int numBlocks = n / nThreads;
if (numBlocks == 0) {
numBlocks = 1;
}
printf("numBlocks: %d\nnThreads: %d\n\nParticles: %d\n", numBlocks, nThreads, n);
/* now, loop performing integration */
/* Gravity Acceleration */
{
FILE* outFile;
const char * filename;
char * outBuffer = strtok(nBodyFile, ".");
filename = strcat(outBuffer, "Sim.txt");
outFile = fopen(filename, "w");
/* Progress Bar Initialization */
int maxBar = 30;
char outputBar[maxBar];
outputBar[0] = '|';
outputBar[maxBar - 1]= '|';
for (int i = 1; i < maxBar - 1; i++) {
outputBar[i] = ' ';
}
printstate(rxBuffer, ryBuffer, rzBuffer,
vxBuffer, vyBuffer, vzBuffer, n, tnow, outFile, filename);
for (nstep = 0; nstep < mstep; nstep++) {
/* Progress Bar Handling/Management */
int v = round((((double) nstep) / ((double) mstep)) * 100.0);
int barIndex = (int) round(((double) v / 100.0) * maxBar) + 1;
if (barIndex < maxBar - 1) {
outputBar[barIndex] = '#';
}
printf("\r%s %d%%", outputBar, v);
fflush(stdout);
/* then call output routine */
leapstep <<<numBlocks, nThreads>>>(rx, ry, rz,
vx, vy, vz,
n, dt, gm, 0);
cudaDeviceSynchronize();
/* Copying memory from device to computer */
cudaMemcpy(massBuffer, mass, n*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(rxBuffer, rx, n*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(ryBuffer, ry, n*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(rzBuffer, rz, n*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(vxBuffer, vx, n*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(vyBuffer, vy, n*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(vzBuffer, vz, n*sizeof(float), cudaMemcpyDeviceToHost);
/* take integration step */
tnow = tnow + dt;
/* and update value of time */
/* Printing out current positions and velocities */
if (nstep % nout == 0) {
printstate(rxBuffer, ryBuffer, rzBuffer,
vxBuffer, vyBuffer, vzBuffer, n, tnow, outFile, filename);
}
}
cudaDeviceSynchronize();
/* Outputting to file */
printstate(rxBuffer, ryBuffer, rzBuffer,
vxBuffer, vyBuffer, vzBuffer, n, tnow, outFile, filename);
/* Cleaning up progress bar */
printf("\r%s %d%%\n", outputBar, 100);
fflush(stdout);
}
/* Freeing memory */
cudaFree(mass);
cudaFree(rx);
cudaFree(ry);
cudaFree(rz);
cudaFree(vx);
cudaFree(vy);
cudaFree(vz);
cudaFree(gm);
cudaDeviceReset();
}
/*
* LEAPSTEP: take one step using the leap-from integrator, formulated
* as a mapping from t to t + dt. WARNING: this integrator is not
* accurate unless the timestep dt is fixed from one call to another.
*/
__global__ void leapstep(float rx[], float ry[], float rz[],
float vx[], float vy[], float vz[],
int n, float dt, float gmConst[], int deviceOffset)
{
int index = deviceOffset + blockIdx.x * blockDim.x + threadIdx.x;
float3 ac3;
/* call acceleration code */
ac3 = accel(rx, ry, rz, n, gmConst, deviceOffset, index);
__syncthreads();
/* Applying acceleration to velocity */
vx[index] = vx[index] + 0.5 * dt * ac3.x;
vy[index] = vy[index] + 0.5 * dt * ac3.y;
vz[index] = vz[index] + 0.5 * dt * ac3.z;
/* Applying velocity to position */
rx[index] = rx[index] + dt * vx[index];
ry[index] = ry[index] + dt * vy[index];
rz[index] = rz[index] + dt * vz[index];
/* call acceleration code */
ac3 = accel(rx, ry, rz, n, gmConst, deviceOffset, index);
__syncthreads();
vx[index] = vx[index] + 0.5 * dt * ac3.x;
vy[index] = vy[index] + 0.5 * dt * ac3.y;
vz[index] = vz[index] + 0.5 * dt * ac3.z;
}
/*
* ACCEL: compute accelerations for harmonic oscillator(s).
*/
__device__
float3 accel(float* rx, float* ry, float* rz,
int n, float gmConst[], int deviceOffset, int index)
{
float3 ac3 = {0.0f, 0.0f, 0.0f};
if (index != 0) {
for (int j = 0; j < n; j++) {
if (j != index) {
float distVal = (rx[index]-rx[j])*(rx[index]-rx[j])
+(ry[index]-ry[j])*(ry[index]-ry[j])
+(rz[index]-rz[j])*(rz[index]-rz[j])
+ SOFT_FACTOR;
distVal = distVal * distVal * distVal;
distVal = 1.0f / sqrtf(distVal);
/* Summing up acceleration */
ac3.x += -(rx[index]-rx[j])*gmConst[j]*distVal;
ac3.y += -(ry[index]-ry[j])*gmConst[j]*distVal;
ac3.z += -(rz[index]-rz[j])*gmConst[j]*distVal;
}
__syncthreads();
}
}
return ac3;
}
/*
* PRINTSTATE: output system state variables.
*/
void printstate(float rx[], float ry[], float rz[],
float vx[], float vy[], float vz[],
int n, float tnow, FILE* outFile, const char * filename)
{
int i;
outFile = fopen(filename, "a+");
for (i = 0; i < n; i++) {
/* Printing out time, particle, position, and velocity */
fprintf(outFile,
"%8.4f\t%4d\t%20.6f\t%20.6f\t%20.6f\t%20.6f\t%20.6f\t%20.6f\n",
tnow, i, rx[i], ry[i], rz[i], vx[i], vy[i], vz[i]);
}
fclose(outFile);
}
|
2,107
|
#include "includes.h"
//Author: Adriel Kim
//6-27-2020
//Updated 7-5-2020
/*
Desc: Basic 2D matrix operations - element-wise addition, subtraction, multiplication, and division.
To do:
- Use vector instead of array?
- Be able to test for varying sizes of images. (For now we manually define with constant N)
- Add timer to compare CPU and GPU implementations
- Double check if all memory is freed
- Optimize by eliminating redundant calculations
- Test code on department servers
*/
//define imin(a,b) (a<b?a:b)//example of ternary operator in c++
//4176,2048
#define R 4176
#define C 2048
#define N (R*C)//# of elements in matrices
const int threadsPerBlock = 1024;//threads in a block. A chunk that shares the same shared memory.
const int blocksPerGrid = 8352;//imin(32, (N + threadsPerBlock - 1) / threadsPerBlock);//this will be our output array size for sumKernel.
using namespace std;
cudaError_t matrixOperation(double* c, const double* a, const double* b, unsigned int arrSize, int operation, float* kernel_runtime, float* GPU_transfer_time);
void CPUMatrixOperation(double* c, const double* a, const double* b, unsigned int arrSize, int operation);
long long start_timer();
long long stop_timer(long long start_time, const char *name);
//any advantages with mapping directly to strucutre of matrix? We're just representing 2D matrix using 1D array...
//it would be difficult to do the above since we want the operations to occur over abitrarily large matrices
//this can definitely be optimzied by elminating redundant calculations
//---------------------------------------------------------------------------------
__global__ void matrixAddKernel(double *c, const double *a, const double *b) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
c[tid] = a[tid] + b[tid];
//adds total number of running threads to tid, the current index.
tid += blockDim.x * gridDim.x;
}
}
|
2,108
|
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <stdint.h>
#include <assert.h>
#include <time.h>
#include <math.h>
/*
Author: Andrew DiPrinzio
Course: EN605.417.FA
*/
static const uint32_t DEFAULT_NUM_THREADS = 1024;
static const uint32_t DEFAULT_BLOCK_SIZE = 16;
static void usage(){
printf("Usage: ./assignment7 [-t <num_threads>] [-b <block_size>] [-h]\n");
printf("\t-t: Specify the number of threads. <num_threads> must be greater than 0. Optional (default %u)\n", DEFAULT_NUM_THREADS);
printf("\t-b: Specify the size of each block. <block_size> must be greater than 0. Optional (default %u)\n", DEFAULT_BLOCK_SIZE);
}
// Structure that holds program arguments specifying number of threads/blocks
// to use.
typedef struct {
uint32_t num_threads;
uint32_t block_size;
} Arguments;
// Parse the command line arguments using getopt and return an Argument structure
// GetOpt requies the POSIX C Library
static Arguments parse_arguments(const int argc, char ** argv){
// Argument format string for getopt
static const char * _ARG_STR = "ht:b:";
// Initialize arguments to their default values
Arguments args;
args.num_threads = DEFAULT_NUM_THREADS;
args.block_size = DEFAULT_BLOCK_SIZE;
// Parse any command line options
int c;
int value;
while ((c = getopt(argc, argv, _ARG_STR)) != -1) {
switch (c) {
case 't':
value = atoi(optarg);
args.num_threads = value;
break;
case 'b':
// Normal argument
value = atoi(optarg);
args.block_size = value;
break;
case 'h':
// 'help': print usage, then exit
// note the fall through
usage();
default:
exit(-1);
}
}
return args;
}
// Helper function to generate a random number within a defined range
__host__
int random(int min, int max){
return min + rand() / (RAND_MAX / (max - min + 1) + 1);
}
// simple kernel that adds vectors
__global__
void arrayAddition(int *device_a, int *device_b, int *device_result)
{
int threadId = threadIdx.x + blockIdx.x * blockDim.x ;
device_result[threadId]= device_a[threadId]+device_b[threadId];
}
//Setup and run a timed experement using CUDA streams
__host__
void run_streaming(int run_index, Arguments args)
{
int *host_a, *host_b, *host_result;
int *device_a, *device_b, *device_result;
int array_size = args.num_threads;
const unsigned int array_size_in_bytes = array_size * sizeof(int);
// create events for timing
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate( &start );
cudaEventCreate( &stop );
//configure stream
cudaStream_t stream;
cudaStreamCreate(&stream);
//Configure device and host memory
cudaMalloc( ( void**)& device_a, array_size_in_bytes );
cudaMalloc( ( void**)& device_b, array_size_in_bytes );
cudaMalloc( ( void**)& device_result, array_size_in_bytes );
cudaHostAlloc((void **)&host_a, array_size_in_bytes, cudaHostAllocDefault);
cudaHostAlloc((void **)&host_b, array_size_in_bytes, cudaHostAllocDefault);
cudaHostAlloc((void **)&host_result, array_size_in_bytes, cudaHostAllocDefault);
for(int index = 0; index < array_size; index++)
{
host_a[index] = random(0,100);
host_b[index] = random(0,100);
}
const unsigned int num_blocks = array_size / args.block_size;
const unsigned int num_threads_per_blk = array_size/num_blocks;
cudaEventRecord(start, 0);
//initialte streaming memory operation
cudaMemcpyAsync(device_a, host_a, array_size_in_bytes, cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(device_b, host_b, array_size_in_bytes, cudaMemcpyHostToDevice, stream);
/* Execute our kernel */
arrayAddition<<<num_blocks, num_threads_per_blk, 0, stream>>>(device_a, device_b, device_result);
cudaMemcpyAsync(host_result, device_result, array_size_in_bytes, cudaMemcpyHostToDevice, stream);
cudaStreamSynchronize(stream);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
// Print input and output for debugging purposes
/*
for(int index = 0; index < array_size; index++)
{
printf("a: %d\n", host_a[index]);
printf("a: %d\n", host_b[index]);
printf("Result: %d\n", host_result[index]);
}
*/
//Ensure all operations have completed and print sumary
cudaDeviceSynchronize();
printf("Execution: %d\n", run_index);
printf("\n Size of array : %d \n", array_size);
printf("\n Time taken: %3.1f ms \n", elapsedTime);
//memory cleanup
cudaFreeHost(host_a);
cudaFreeHost(host_b);
cudaFreeHost(host_result);
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_result);
}
int main(int argc, char ** argv)
{
Arguments args = parse_arguments(argc, argv);
printf("Num Threads: %u, Block Size: %u\n", args.num_threads, args.block_size);
//loop to run experement with differnt random data
for(int index = 0; index < 5; index++)
{
run_streaming(index,args);
}
cudaDeviceReset();
return EXIT_SUCCESS;
}
|
2,109
|
#define NDISP 48
typedef unsigned char uchar;
using namespace std;
__global__ void path_aggregate(int width, int height, int D, int P1, int P2,
int dir, int* Espace, cudaTextureObject_t Cost,
short* texturePerp, short* textureParallel)
{
__shared__ short L_p0[NDISP+2];
__shared__ short L_c0[NDISP+2];
__shared__ short delta0;
short Tp = 0, T = 0, Tp2 = 0;
float Kt = 0;
int P1a, P2a; int Td;
int dy, dx;
int tid = threadIdx.x;
int d = tid+1;
//just one thread to prevent bank conflicts
if(tid == 0)
L_p0[0] = L_p0[NDISP+1] = L_c0[0] = L_c0[NDISP+1] = SHRT_MAX;
if(tid < D)
L_p0[d] = SHRT_MAX;
if(tid == 0)
delta0 = SHRT_MAX;
int x = 0;
int y = 0;
//set initial coordinates of a block
switch(dir)
{
case 0:
x = 0; y = blockIdx.x; dy = 0; dx = 1;
break;
case 1:
if(blockIdx.x < height)
{ y = height-1-(blockIdx.x); x = 0; dy=1; dx=1;}
else
{ y = 0; x = (blockIdx.x)-height;}
break;
case 2:
y = 0; x = blockIdx.x; dy = 1; dx = 0;
break;
case 3:
if(blockIdx.x < width)
{ x = blockIdx.x; y = 0; dy = 1; dx = -1;}
else
{ y = blockIdx.x-width; x = width-1; dy = 1; dx = -1;}
break;
case 4:
x = width-1; y = blockIdx.x; dy = 0; dx= -1;
break;
case 5:
if(blockIdx.x < height)
{ x = width-1; y = blockIdx.x; dy=-1; dx = -1;}
else
{ y = height-1; x = blockIdx.x-height; dy=-1; dx = -1;}
break;
case 6:
y = height-1; x = blockIdx.x; dy = -1; dx = 0;
break;
case 7:
if(blockIdx.x < height)
{ x = 0; y = blockIdx.x; dy = -1; dx = 1;}
else
{ x = blockIdx.x-height; y = height-1; dy = -1; dx = 1;}
break;
}
__syncthreads();
//main loop of a thread
while(x >= 0 && y >= 0 && x < width && y < height)
{
//read texture and adjust penalties
Kt = 1;//textureParallel[y*width+x]/500;
T = texturePerp[y*width+x];
Td = abs(T-Tp2);
short C;
short bestsad; short bestdisp;
P1a = P1;
P2a = P2;
Tp2 = Tp;
Tp = T;
if(tid < D)
{
C = tex2D<short>(Cost, (d-1)*width+x, y);
if(C < SHRT_MAX-5)
{
L_c0[d] = C + min((int)L_p0[d], min((int)L_p0[d-1]+P1a, min((int)L_p0[d+1]+P1a, (int)delta0))) - delta0;
Espace[y*width*D + x*D + d-1] += (int)L_c0[d]*Kt;
L_p0[d] = L_c0[d];
}
else
{
L_c0[d] = SHRT_MAX;
Espace[y*width*D + x*D + d-1] += SHRT_MAX;
L_p0[d] = L_c0[d];
}
__syncthreads();
for(int i = blockDim.x, n = blockDim.x/2; n>0; n /= 2)
{
if((d-1) < n)
{
L_c0[d] = min((int)L_c0[d], (int)L_c0[d+n]);
}
if(n*2 != i)
{
L_c0[n+1] = L_c0[2*n+1];
n = n+1;
}
i = n;
__syncthreads();
}
delta0 = L_c0[1] + P2a;
}
__syncthreads();
x += dx; y += dy;
}
}
/* BM_cost
*
* Calculates block matching cost for D disparity levels
* between image1 and image2
*/
__global__ void BM_cost(cudaTextureObject_t image1, cudaTextureObject_t image2,
short* cost, int width, int height, int D)
{
extern __shared__ short I[];
int x = threadIdx.x;
int y = blockIdx.x;
I[x] = tex2D<uchar>(image2, x, y);
__syncthreads();
short i = tex2D<uchar>(image1, x, y);
int r = y*width*D + x;
for(int d=0; d<D; d++)
{
if(x >= d)
{
cost[r + d*width] = abs(i - I[x-d]);
}
else
cost[r + d*width] = SHRT_MAX;
}
}
__global__ void energy_minimalize(int *Energy, short* disparity, short* disparity2,
int D, int width, int height)
{
__shared__ int L[NDISP]; //storage for disparity values
__shared__ int cL[NDISP];
int d = threadIdx.x;
int x = blockIdx.x;
int y = blockIdx.y;
float d_refined;
//read energy for each disparity (one operation)
L[d] = Energy[y*width*D + x*D + d];
cL[d] = L[d];
__syncthreads();
//use reduction algorithm to minimalize energy
for(int i = blockDim.x, n = blockDim.x/2; n>0; n /= 2)
{
if(d < n)
{
L[d] = min(L[d], L[d+n]);
}
if(n*2 != i)
{
L[n] = L[2*n];
n = n+1;
}
i = n;
__syncthreads();
}
if(cL[d] == L[0])
{
if(0 < d && d < D-1)
{
int denom2 = max((Energy[(y * width * D) + (x * D) + d - 1]
+ Energy[(y * width * D) + (x * D) + d + 1]
- 2 * Energy[(y * width * D) + (x * D) + d]), 1);
d_refined = 10 * ((float) d) + (float)((Energy[(y * width * D)
+ (x * D) + d - 1] - Energy[(y * width * D)
+ (x * D) + d + 1]) + denom2) / (denom2 * 2);
}
else
{
d_refined = 10*(float)d;
}
disparity[y*width+x] = d_refined;
if(x-1-d >= 0)
disparity2[y*width+x-1-d] = d_refined;
}
__syncthreads();
}
__global__ void validate(short* disparity, short* disparity2, int width, int height, int disp12maxdiff)
{
int x = threadIdx.x;
int y = blockIdx.x;
int d0 = disparity[y*width + x];
if(d0 != -1)
{
int x2 = x - d0;
if(0 <= x2 && abs(disparity2[y*width + x2]-d0) > 10*disp12maxdiff)
{
disparity[y*width+x] = -1;
}
}
}
|
2,110
|
#include <stdlib.h>
#include <stdio.h>
// Kernel adding entries of the adjacent array entries (radius of 3) of a 1D array
//
// even better
// * one thread reads needed data into shared memory
// * every thread-block computes blockDim.x partial sums
// * data read from shared memory
// even better
// * every thread reads one entry into shared memory
// * every thread-block computes blockDim.x-6 partial sums
// * data read from shared memory
__global__ void kernel4(int n, int *a, int *b)
{
int i = (blockDim.x-6)*blockIdx.x+threadIdx.x-3;
int idx = threadIdx.x;
__shared__ int values[256];
int sum = 0;
values[idx] = (i>-1 && i<n ) ? a[i] : 0;
if( idx>2 && idx<256-3 ){
for( int j=-3; j<4; j++)
sum += values[ idx+j ];
b[i]=sum;
}
}
int main() {
int n=2000000;
int memSize = n*sizeof(int);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int *a, *d_a;
a = (int*) malloc (n*sizeof(*a));
cudaMalloc( (void**) &d_a, memSize);
int *b, *d_b;
b = (int*) malloc (n*sizeof(*b));
cudaMalloc( (void**) &d_b, memSize);
for(int j=0; j<n; j++){
a[j] = j;
b[j] = 0;
}
cudaMemcpy( d_a, a, memSize, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, b, memSize, cudaMemcpyHostToDevice);
dim3 block(256);
dim3 grid((n+block.x-7)/(block.x-6));
cudaEventRecord(start);
kernel4<<<grid,block>>>(n,d_a,d_b);
cudaEventRecord(stop);
cudaMemcpy( b, d_b, memSize, cudaMemcpyDeviceToHost);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("runtime [s]: %f\n", milliseconds/1000.0);
for(int j=0; j<10; j++)
printf("%d\n",b[j]);
cudaFree(d_a);
free(a);
cudaFree(d_b);
free(b);
return 0;
}
|
2,111
|
#include<stdio.h>
int main(){
int* series;
int const n = 1<<14;
int mSize = n*sizeof(int);
return 0;
}
|
2,112
|
/**
* Yuri Gorokhov
* lab 4 - Rows vs Columns
*/
#include <stdio.h>
#include <cuda.h>
#define ARRAY_SIZE 256
__global__ void kernel_row();
__global__ void kernel_col();
int main (void) {
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
kernel_row<<<1,ARRAY_SIZE>>>();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("kernel row time: %f\n", elapsedTime);
cudaEventRecord(start,0);
kernel_col<<<1,ARRAY_SIZE>>>();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("kernel col time: %f\n", elapsedTime);
return 0;
}
__global__ void kernel_row() {
__shared__ float A[ARRAY_SIZE][ARRAY_SIZE];
int sum = 0;
for(int i = 0; i < ARRAY_SIZE-1; i++) {
sum += A[threadIdx.x][i];
}
__syncthreads();
}
__global__ void kernel_col() {
__shared__ float A[ARRAY_SIZE][ARRAY_SIZE];
int sum = 0;
for(int i = 0; i < ARRAY_SIZE-1; i++) {
sum += A[i][threadIdx.x];
}
__syncthreads();
}
|
2,113
|
#include <cassert>
#include <cstdlib>
#include <iostream>
void print_matrix(int *m, int y, int x) {
for(int i = 0; i < y; i++) {
printf("[");
for(int j = 0; j < x; j ++)
printf("%d, ", m[i * x + j]);
printf("]\n");
}
printf("\n");
}
void init_matrix(int *m, int l) {
for (int i = 0; i < l; i++) {
m[i] = rand() % 100;
}
}
__global__ void matmul2d(int *a, int *b, int *c, int m, int n) {
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int temp = 0;
for (int k = 0; k < m; k++) {
temp += a[i * m + k] * b[k * n + j];
}
c[i * n + j] = temp;
}
void verify_result(int *a, int *b, int *c, int l, int m, int n) {
int temp = 0;
for(int i = 0; i < l; i++) {
for(int j = 0; j < n; j++) {
temp = 0;
for(int k = 0; k < m; k++) {
temp += a[i * m + k] * b[k * n + j]; //c[i][j] += a[i][k] * b[k][j]
}
assert(c[i * n + j] == temp);
}
}
printf("result[%d] = %d == %d = temp \n", l * n - 1, c[l * n - 1], temp);
}
int main() {
//a of size l * m and b of size m * n
//a @ b is of size l * n
int L = 1 << 9; //512
int M = 1 << 8; //256
int N = 1 << 10; //1024
size_t bytes_a = sizeof(int) * L * M;
size_t bytes_b = sizeof(int) * M * N;
size_t bytes_c = sizeof(int) * L * N;
int *a = new int[L * M];
int *b = new int[M * N];
int *c = new int[L * N];
init_matrix(a, L * M);
init_matrix(b, M * N);
int *d_a, *d_b, *d_c;
cudaMalloc(&d_a, bytes_a); cudaMalloc(&d_b, bytes_b); cudaMalloc(&d_c, bytes_c);
cudaMemcpy(d_a, a, bytes_a, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, bytes_b, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, bytes_c, cudaMemcpyHostToDevice);
int n_threads = 32;
dim3 block_dim(n_threads, n_threads);
dim3 grid_dim(N/n_threads, L/n_threads);
matmul2d<<<grid_dim, block_dim>>>(d_a, d_b, d_c, M, N);
cudaMemcpy(c, d_c, bytes_c, cudaMemcpyDeviceToHost);
verify_result(a, b, c, L, M, N);
std::cout << "COMPLETED SUCCESSFULLY! \n";
delete[] a; delete[] b; delete[] c;
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
2,114
|
#include <cuda_runtime.h>
#include <cufft.h>
__global__ void unpack_data_dual_channel(const unsigned char* const raw_data, cufftReal* const chan_a, cufftReal* const chan_b, const size_t num_bytes) {
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
// Make sure we are at the start of a new sample (every 3 bytes is a sample
// from each channel) and that we are not at the end of our data
if (idx % 3 != 0 || idx + 2 >= num_bytes) {
return;
}
// The data is stored in three bytes on the card
const unsigned char b0 = raw_data[idx + 0];
const unsigned char b1 = raw_data[idx + 1];
const unsigned char b2 = raw_data[idx + 2];
// The data from the card is stored in little endian:
// Data on card: AB CD EF
// Var name: b0 b1 b2
// Channel A: 0D AB
// Channel B: 0E FC
const unsigned int a_data = ((b1 & 0x0F) << 8) | b0;
const unsigned int b_data = ((b2 << 8) | (b1 & 0xF0)) >> 4;
// We also want to convert it to a float (a cufftReal type)
// TODO: Pass the value of 400 mV to this function
chan_a[(idx / 3)] = ((static_cast<float>(a_data) / 4096.f) - 0.5f) * 2.f * 0.400f; // 400 mV
chan_b[(idx / 3)] = ((static_cast<float>(b_data) / 4096.f) - 0.5f) * 2.f * 0.400f; // 400 mV
}
__global__ void unpack_data_single_channel(const unsigned char* const raw_data, cufftReal* const chan, const size_t num_bytes) {
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
// Make sure we are at the start of a new sample (every 3 bytes is a sample
// from each channel) and that we are not at the end of our data
if (idx % 3 != 0 || idx + 2 >= num_bytes) {
return;
}
// The data is stored in three bytes on the card
const unsigned char b0 = raw_data[idx + 0];
const unsigned char b1 = raw_data[idx + 1];
const unsigned char b2 = raw_data[idx + 2];
// The data from the card is stored in little endian:
// Data on card: AB CD EF
// Var name: b0 b1 b2
// Channel A: 0D AB
// Channel B: 0E FC
const unsigned int a_data = ((b1 & 0x0F) << 8) | b0;
const unsigned int b_data = ((b2 << 8) | (b1 & 0xF0)) >> 4;
// We also want to convert it to a float (a cufftReal type)
// TODO: Pass the value of 400 mV to this function
chan[(idx / 3)] = ((static_cast<float>(a_data) / 4096.f) - 0.5f) * 2.f * 0.400f; // 400 mV
chan[(idx / 3) + 1] = ((static_cast<float>(b_data) / 4096.f) - 0.5f) * 2.f * 0.400f; // 400 mV
}
void gpu_unpack_data_dual_channel(const unsigned char* const raw_data, cufftReal* const chan_a, cufftReal* const chan_b, const size_t num_bytes, const cudaStream_t stream) {
constexpr const unsigned threads_per_block = 1024; // Max size afforded by the GTX Quadro 4000
const unsigned num_blocks = static_cast<unsigned>(ceil(static_cast<double>(num_bytes) / static_cast<double>(threads_per_block)));
unpack_data_dual_channel<<<num_blocks, threads_per_block, 0, stream>>>(raw_data, chan_a, chan_b, num_bytes);
}
void gpu_unpack_data_single_channel(const unsigned char* const raw_data, cufftReal* const chan, const size_t num_bytes, const cudaStream_t stream) {
constexpr const unsigned threads_per_block = 1024; // Max size afforded by the GTX Quadro 4000
const unsigned num_blocks = static_cast<unsigned>(ceil(static_cast<double>(num_bytes) / static_cast<double>(threads_per_block)));
unpack_data_single_channel<<<num_blocks, threads_per_block, 0, stream>>>(raw_data, chan, num_bytes);
}
|
2,115
|
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <cuda.h>
//Code written by Alan Fleming
//CONSTANTS
#define MATRIXSIZE 2048
#define BLOCKSIZE 1024
void cpuHistogram(int* input, int* histogram, int size) {
for(int i = 0; i < size; i++) {
histogram[input[i]]++;
}
}
__global__ void histogram(int* input, int* histogram, int size) {
//get starting index for thread
int i = threadIdx.x + blockIdx.x * blockDim.x;
//calculate stride
int stride = blockDim.x * gridDim.x;
//preform histogram calculation
while( i < size) {
atomicAdd( &(histogram[input[i]]), 1);
i += stride;
}
}
//currently does not work for block sizes smaller than 256
__global__ void sharedHistogram(int* input, int* histogram, int size) {
//initialize shared memory for the block
__shared__ int privateHistogram[256];
if(threadIdx.x < 256) privateHistogram[threadIdx.x] = 0;
__syncthreads();
//get starting index for thread
int i = threadIdx.x + blockIdx.x * blockDim.x;
//calculate stride
int stride = blockDim.x * gridDim.x;
//preform histogram calculation
while( i < size) {
atomicAdd( &(privateHistogram[input[i]]), 1);
i += stride;
}
//ensure all threads have finished their additions
__syncthreads();
//add private histogram to public histogram
if(threadIdx.x < 256) {
atomicAdd( &(histogram[threadIdx.x]), privateHistogram[threadIdx.x]);
}
}
int main() {
int *input = (int *)malloc(sizeof(int) * MATRIXSIZE); //allocate space for array
int *cpuResult = (int *)malloc(sizeof(int) * 256); //allocate space for cpu output array
int *basicGPUResult = (int *)malloc(sizeof(int) * 256); //allocate space for gpu output array using global memory
int *sharedGPUResult = (int *)malloc(sizeof(int) * 256); //allocate space for gpu output array using shared memory
//intialize the input array
int init = 1325;
for(int i=0; i < MATRIXSIZE; i++){
init= 3125 * init % 65537;
input[i]= init % 256;
}
//clear the output arrays to ensure proper adding
for(int i = 0; i < 256; i++) {
cpuResult[i] = 0;
basicGPUResult[i] = 0;
sharedGPUResult[i] = 0;
}
//Test CPU
//Get start time
clock_t t1 = clock();
//Calculate reduction
cpuHistogram(input, cpuResult, MATRIXSIZE);
//Get stop time
clock_t t2 = clock();
//Calculate runtime
float cpuTime= (float(t2-t1)/CLOCKS_PER_SEC*1000);
//Allocate memory on GPU compution. dev_b is used to store the results of the first pass of reduction
int *dev_input, *dev_basicGPUResult, *dev_sharedGPUResult;
cudaMalloc((void **)(&dev_input), MATRIXSIZE *sizeof(int));
cudaMalloc((void **)(&dev_basicGPUResult), 256 *sizeof(int));
cudaMalloc((void **)(&dev_sharedGPUResult), 256 *sizeof(int));
//copy memory to gpu
cudaMemcpy(dev_input, input, MATRIXSIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_basicGPUResult, basicGPUResult, 256 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_sharedGPUResult, sharedGPUResult, 256 * sizeof(int), cudaMemcpyHostToDevice);
//calculate dimentions for gpu
dim3 dimBlock(BLOCKSIZE);
dim3 dimGrid(ceil(double(MATRIXSIZE)/dimBlock.x));
//~~WITHOUT SHARED MEMORY~~
//Set up cuda events for recording runtime
cudaEvent_t start,stop;
float basicGPUTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//calculate histogram without shared memory
histogram<<<dimGrid, dimBlock>>>(dev_input, dev_basicGPUResult, MATRIXSIZE);
//calculate runtime
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&basicGPUTime,start,stop);
//destroy cuda events
cudaEventDestroy(start);
cudaEventDestroy(stop);
//copy sum from gpu
cudaMemcpy(basicGPUResult, dev_basicGPUResult, 256 * sizeof(int), cudaMemcpyDeviceToHost);
//print speedup
printf("--WITHOUT SHARED MEMORY--\nCPU Runtime: %f\nGpu Runtime: %f\nSpeedup: %f\n", (double)cpuTime, (double)basicGPUTime, double(cpuTime / basicGPUTime));
//verify results
bool valid = true;
for(int i = 0; i < 256; i++) {
if(cpuResult[i] != basicGPUResult[i]) {
valid = false;
break;
}
}
if(valid) {
printf("TEST PASSED\n");
} else {
printf("TEST FAILED\n");
}
//~~WITH SHARED MEMORY~~
//Set up cuda events for recording runtime
float sharedGPUTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//calculate histogram with shared memory
histogram<<<dimGrid, dimBlock>>>(dev_input, dev_sharedGPUResult, MATRIXSIZE);
//calculate runtime
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&sharedGPUTime,start,stop);
//destroy cuda events
cudaEventDestroy(start);
cudaEventDestroy(stop);
//copy sum from gpu
cudaMemcpy(sharedGPUResult, dev_sharedGPUResult, 256 * sizeof(int), cudaMemcpyDeviceToHost);
//print speedup
printf("--WITH SHARED MEMORY--\nCPU Runtime: %f\nGpu Runtime: %f\nSpeedup: %f\n", (double)cpuTime, (double)sharedGPUTime, double(cpuTime / sharedGPUTime));
//verify results
valid = true;
for(int i = 0; i < 256; i++) {
if(cpuResult[i] != sharedGPUResult[i]) {
valid = false;
break;
}
}
if(valid) {
printf("TEST PASSED\n");
} else {
printf("TEST FAILED\n");
}
//free up memory before returning
free(input);
free(cpuResult);
free(basicGPUResult);
free(sharedGPUResult);
cudaFree(dev_input);
cudaFree(dev_basicGPUResult);
cudaFree(dev_sharedGPUResult);
return 0;
}
|
2,116
|
#include <fstream>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
__global__ void im2colOnDevice(unsigned int n, float* matAc, float* matA, int radiusF, int countF, int L, int M, int K, int C, int H)
{
for (int idx = blockIdx.x*blockDim.x+threadIdx.x; idx < n; idx += blockDim.x*gridDim.x)
{
int m = (idx/C)/L;
int l = (idx/C)%L;
int r = idx%C;
if (m < M)
{
int w = m+radiusF;
if (l < L)
{
int h = l+radiusF;
for (int q = 0, oq = -1*radiusF; oq <= radiusF; q++, oq++)
{
for (int p = 0, op = -1*radiusF; op <= radiusF; p++, op++)
{
if (r < C)
{
matAc[(r+C*(p+K*q))+countF*(l+L*m)] = matA[r+C*((h+op)+H*(w+oq))];
}
}
}
}
}
}
}
__global__ void gemm_gpu(double* a, double* b, double* c, int m, int n, int k)
{
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
double tmp = 0;
if (col < k && row < m)
{
for (int i = 0; i < n; i++)
{
tmp += a[row*n+i]*b[i*k+col];
}
c[row*k+col] = tmp;
}
}
int main(int argc, char const* argv[])
{
int W = 1024;
int H = 1024;
int C = 4;
int K = C;
int blockSize = 256;
int gridSize = 0;
float time_gpu;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int L = H-(K-1);
int M = W-(K-1);
int KERNELS_NUM = L*M*C;
int countA = H*W*C;
const size_t sizeA = countA*sizeof(float);
int radiusF = (K-1)/2;
int countF = K*K*C;
int countLR = L*M;
int countAc = countF*countLR;
const size_t sizeAc = countAc*sizeof(float);
float* matA = (float*)malloc(sizeA);
srand((unsigned)time(0));
for (int i = 0; i < countA; i++)
{
matA[i] = rand()%10;
}
float* devA, *devAc, *retAc;
cudaMalloc((void**)&devA, sizeA);
cudaMalloc((void**)&devAc, sizeAc);
retAc = (float*)malloc(sizeAc);
cudaMemcpy(devA, matA, sizeA, cudaMemcpyHostToDevice);
if (gridSize == 0) gridSize = (KERNELS_NUM+blockSize-1)/blockSize;
cudaEventRecord(start, 0);
im2colOnDevice <<<gridSize, blockSize >>> (KERNELS_NUM, devAc, devA, radiusF, countF, L, M, K, C, H);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time_gpu, start, stop);
cudaMemcpy(retAc, devAc, sizeAc, cudaMemcpyDeviceToHost);
printf("共用时间%f ms", time_gpu);
cudaFree(devA);
cudaFree(devAc);
free(matA);
free(retAc);
return 0;
}
|
2,117
|
#include "includes.h"
__global__ void ap_multiplication(float * values ,int * indeces,float* r ,float * p_sum ,int size)
{
int index = blockDim.x * blockIdx.x + threadIdx.x ;
p_sum[index] = 0;
__syncthreads() ;
if (index < size)
{
for (int i = 0 ; i<3 ; i++)
{
p_sum[index] += values[3*index + i] * r[indeces[3*index + i]] ;
}
__syncthreads() ;
}
}
|
2,118
|
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,int var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17) {
if (comp < var_1 * -0.0f) {
if (comp == var_2 * var_3 + -0.0f + (var_4 / (-1.6899E-43f * -1.1486E-42f))) {
float tmp_1 = +1.5608E-43f;
comp += tmp_1 + (var_6 * +0.0f - +1.0744E-21f - +1.2833E-37f / (-1.3369E-35f * -1.4943E-43f));
comp = var_7 * var_8 * var_9 / (+1.3722E-37f / +0.0f);
for (int i=0; i < var_5; ++i) {
comp = asinf(-1.9620E36f);
}
if (comp < -1.2650E35f + (var_10 - -1.3543E36f / var_11 + -1.7585E-43f)) {
float tmp_2 = +0.0f - var_12 - -1.4642E-36f;
float tmp_3 = +1.3939E-42f;
comp += tmp_3 / tmp_2 - var_13 + sqrtf((-1.1327E-42f * atan2f(var_14 * coshf(-1.2476E-42f + -1.7998E35f / (var_15 - (-1.9949E-7f / (-1.2379E-36f / +1.6939E34f)))), -1.9986E-36f - var_16 * var_17 - +1.2399E34f + (-1.0323E13f - +1.9440E35f))));
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
int tmp_6 = atoi(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18);
cudaDeviceSynchronize();
return 0;
}
|
2,119
|
#include "includes.h"
/*
* G2S
* Copyright (C) 2018, Mathieu Gravey (gravey.mathieu@gmail.com) and UNIL (University of Lausanne)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
//To use only for debugging purpose
#define PARTIAL_FFT
#ifndef FFTW_PLAN_OPTION
//FFTW_PATIENT
#define FFTW_PLAN_OPTION FFTW_ESTIMATE
#endif
// #if __cilk
// #define fillVectorized(name, begin, amount, value) name[begin:amount]=value;
// #else
#define fillVectorized(name, begin, amount, value) std::fill(name+begin,name+begin+amount,value);
// #endif
__global__ void setConditionement(unsigned size, unsigned* listIndex, float* listValueAtIndex, float* realSpaceArray, unsigned nbVar, unsigned var){
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < size)
{
realSpaceArray[listIndex[j]]=listValueAtIndex[j*nbVar+var];
}
}
|
2,120
|
#include "includes.h"
__device__ Query query_table(const int num_bucket, const int *bucket_start, const int key){
const unsigned int bucket_id = key;
const unsigned int list_start = (bucket_id > 0 ? bucket_start[bucket_id - 1] : 0);
const unsigned int next_list_start = bucket_start[bucket_id];
Query query(list_start, next_list_start);
return query;
}
__global__ void queryDevice(const int num_bucket, const int *bucket_start, const int key){
Query queryresult = query_table(num_bucket, bucket_start, key);
}
|
2,121
|
#include "includes.h"
__global__ void matrixMulKernel(float* d_M, float* d_N, float* d_P, int width){
//compute row and column of the target element to compute
int row = blockDim.y * blockIdx.y + threadIdx.y;
int col = blockDim.x * blockIdx.x + threadIdx.x;
//check for safety if target element is within matrix dimensions
if(row < width && col < width){
//perform "dot product" line and column
float sum = 0.0f;
for (int k = 0; k < width; ++k) {
sum += d_M[row * width + k] * d_N[k * width + col];
}
//assign target element value
d_P[row * width + col] = sum;
}
}
|
2,122
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <string.h>
#include <limits.h>
#include <stdbool.h>
#define MAX_EDGE 100000000
__global__ void BFS(int* off,int* edge,int* current,int* size,int N,int E,int* c_arr,int* c_size,int* dist){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < *size){
// printf("inside kernel %d %d\n",id,*size);
int node = current[id];
//extend this node
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start<end){
// add in list atomically in c_arr
int child = edge[start];
// printf("c %d \n",child);
if ( dist[child] < 0){
dist[child] = dist[node] + 1;
int index = atomicAdd(c_size,1);
c_arr[index]= child;
}
start++;
}
// printf("s %d\n",*c_size);
}
}
int main(){
// set start node same as destination
int startNode;
scanf("%d",&startNode);
FILE* fgraph = fopen("graph.txt","r");
int N,edge_size;
fscanf(fgraph,"%d %d\n",&N,&edge_size);
int* H_offset = (int*)malloc(sizeof(int)*N);
int* H_edges = (int*)malloc(sizeof(int)*edge_size);
for(int i=0;i<edge_size;i++){
fscanf(fgraph,"%d",&H_edges[i]);
}
for(int i=0;i<N;i++){
fscanf(fgraph,"%d",&H_offset[i]);
}
fclose(fgraph);
printf("completed input\n");
int* H_current_node = (int*)malloc(sizeof(int)*edge_size);
H_current_node[0]=startNode;
int* H_c_size = (int*)malloc(sizeof(int));
*H_c_size = 1;
int* H_visited = (int*)malloc(sizeof(int)*N);
memset(H_visited,-1,sizeof(int)*N);
H_visited[startNode]=0;
int* a0 = (int*)malloc(sizeof(int));
*a0=0;
int* a1 = (int*)malloc(sizeof(int));
*a1=1;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int* D_offset;
int* D_edges;
int* D_visited;
int* D_current_node1;
int* D_c_size1;
int* D_current_node2;
int* D_c_size2;
cudaMalloc(&D_offset,sizeof(int)*N);
cudaMalloc(&D_visited,sizeof(int)*N);
cudaMalloc(&D_edges,sizeof(int)*edge_size);
cudaMalloc(&D_current_node1,sizeof(int)*edge_size);
cudaMalloc(&D_c_size1,sizeof(int));
cudaMalloc(&D_current_node2,sizeof(int)*edge_size);
cudaMalloc(&D_c_size2,sizeof(int));
cudaMemcpy(D_offset,H_offset,sizeof(int)*N,cudaMemcpyHostToDevice);
cudaMemcpy(D_edges,H_edges,sizeof(int)*edge_size,cudaMemcpyHostToDevice);
cudaMemcpy(D_current_node1,H_current_node,sizeof(int)*edge_size,cudaMemcpyHostToDevice);
cudaMemcpy(D_visited,H_visited,sizeof(int)*N,cudaMemcpyHostToDevice);
cudaMemcpy(D_c_size1,a1,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(D_c_size2,a0,sizeof(int),cudaMemcpyHostToDevice);
int i=1;
cudaEventRecord(start);
while(*H_c_size>0){
int numThreads = 512;
int numBlocks = (*H_c_size+numThreads-1)/numThreads;
if(i%2==1){
//use array 1
BFS<<<numBlocks,numThreads>>>(D_offset,D_edges,D_current_node1,D_c_size1,N,edge_size,D_current_node2,D_c_size2,D_visited);
cudaMemcpy(H_c_size,D_c_size2, sizeof(int),cudaMemcpyDeviceToHost);
// reset the index
cudaMemcpy(D_c_size1,a0,sizeof(int),cudaMemcpyHostToDevice);
}
else{
//use array 2
BFS<<<numBlocks,numThreads>>>(D_offset,D_edges,D_current_node2,D_c_size2,N,edge_size,D_current_node1,D_c_size1,D_visited);
cudaMemcpy(H_c_size,D_c_size1, sizeof(int),cudaMemcpyDeviceToHost);
//reset index
cudaMemcpy(D_c_size2,a0,sizeof(int),cudaMemcpyHostToDevice);
}
i++;
}
cudaEventRecord(stop);
cudaMemcpy(H_visited,D_visited, sizeof(int)*N,cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
int max_level = 0;
for(int i=0;i<N;i++){
if(H_visited[i]>max_level){
max_level = H_visited[i];
}
// printf("%d, %d\n",i,H_visited[i]);
}
printf("max-level: %d\n",max_level);
printf("time: %f\n",milliseconds);
FILE* fptr = fopen("Hx.txt","w");
int count_unreachable = 0;
int farthest_val = 0;
int farthest_node = 0;
for(int i=0;i<N;i++){
if(H_visited[i]==-1)
count_unreachable++;
if(H_visited[i]>=farthest_val){
farthest_val = H_visited[i];
farthest_node = i;
}
fprintf(fptr,"%d ",H_visited[i]);
}
fclose(fptr);
printf("unreachable %d , %f\n",count_unreachable,count_unreachable/(N*1.0));
printf("farthest node: %d val: %d\n",farthest_node,farthest_val);
return 0;
}
|
2,123
|
// Scott Gordon and Steven Kundert
// CMPS 5433 - Colmenares
// Two Loops Project - Collatz Conjecture Verification
// Sequential implementation
#include <stdio.h> //standard IO
#include <stdint.h> //limits file
FILE *f = fopen("VerifyConjecture.txt", "w");//file for writing output
static const int NUM = 1024;
void hailStoneArray(long * a, long size) //our Sequential code
{
long i;
long iter = 0; //init values
unsigned long long n; //n can grow very large
for(i = 2; i < size; i++) //starting at two, loop for the problem size
{
iter = 0; //set count to zero
n = i; //which value are we computing?
while(n != 1) //while not converging to 1
{
iter++; //add one more interation
if((n % 2) == 0) //if even
{
n = n / 2; // n / 2
}
else //if odd
{ //
n = (3 * n + 1) / 2; //(3n+1)/2. since (3n+1) % 2 == 0, divide by 2
iter++; //add one iteration for shortcutted step
}
}
a[i] = iter; //store the count in the correct place in the array.
}
}
int main() //our main
{
const long Asize = (sizeof(long) *NUM); //size of array (also largest N)
long * a_h; //pointer for array
long * a_d;
a_h = (long *)malloc(Asize);//allocate array with zeros, make it correct size
hailStoneArray(a_h, NUM); //call our function
for (int x = 2; x < NUM; x++) //loop for output
{
fprintf(f,"It takes %d iterations for %d to reach 1 using the Collatz Conjecture\n", a_h[x], x);
} //print output
free(a_h); //free resources
return 0; //return
}
|
2,124
|
//===- kernels.cu ---------------------------------------------*--- C++ -*-===//
//
// Copyright 2022 ByteDance Ltd. and/or its affiliates. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//===----------------------------------------------------------------------===//
namespace brt {
namespace cuda {
namespace external_kernels {
template <typename T>
__global__ void add_kernel(const T *input_1, const T *input_2, T *output,
int n) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
output[idx] = input_1[idx] + input_2[idx];
}
}
// instantiate
template __global__ void add_kernel<float>(const float *, const float *,
float *, int);
template __global__ void add_kernel<int>(const int *, const int *, int *, int);
} // namespace external_kernels
} // namespace cuda
} // namespace brt
|
2,125
|
#include <cuda.h>
#include <cuda_runtime.h>
__device__ void add(const float* a, const float* b, float* output) {
*output = *a + *b;
}
|
2,126
|
#include <cuda_runtime.h>
#include <stdio.h>
#include <iostream>
using namespace std;
__global__ void func(float* ptr){
int pos = blockIdx.x * blockDim.x + threadIdx.x;
if(pos == 999){
ptr[999] = 5;
}
}
int main(){
float* ptr = nullptr;
// 因为核函数是异步的,因此不会立即检查到他是否存在异常
func<<<100, 10>>>(ptr);
//func<<<100, 1050>>>(ptr);
auto code1 = cudaPeekAtLastError();
cout << cudaGetErrorString(code1) << endl;
// 对当前设备的核函数进行同步,等待执行完毕,可以发现过程是否存在异常
auto code2 = cudaDeviceSynchronize();
cout << cudaGetErrorString(code2) << endl;
// 异常会一直存在,以至于后续的函数都会失败
float* new_ptr = nullptr;
auto code3 = cudaMalloc(&new_ptr, 100);
cout << cudaGetErrorString(code3) << endl;
return 0;
}
|
2,127
|
#include "includes.h"
__global__ void set_kernel(REAL* dst, REAL const value, std::size_t const count)
{
std::size_t const index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= count)
return;
dst[index] = value;
}
|
2,128
|
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
//134217728
double dwalltime(){
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
__global__ void sumM_kernel_cuda(double *d_matA,double *d_matB, unsigned long n){
unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < n)
d_matA[global_id] = d_matA[global_id] + d_matB[global_id];
if (global_id < n)
d_matA[global_id] = d_matA[global_id] + d_matB[global_id];
if (global_id < n)
d_matA[global_id] = d_matA[global_id] + d_matB[global_id];
if (global_id < n)
d_matA[global_id] = d_matA[global_id] + d_matB[global_id];
if (global_id < n)
d_matA[global_id] = d_matA[global_id] + d_matB[global_id];
if (global_id < n)
d_matA[global_id] = d_matA[global_id] + d_matB[global_id];
if (global_id < n)
d_matA[global_id] = d_matA[global_id] + d_matB[global_id];
if (global_id < n)
d_matA[global_id] = d_matA[global_id] + d_matB[global_id];
if (global_id < n)
d_matA[global_id] = d_matA[global_id] + d_matB[global_id];
if (global_id < n)
d_matA[global_id] = d_matA[global_id] + d_matB[global_id];
if (global_id < n)
d_matA[global_id] = d_matA[global_id] + d_matB[global_id];
if (global_id < n)
d_matA[global_id] = d_matA[global_id] + d_matB[global_id];
if (global_id < n)
d_matA[global_id] = d_matA[global_id] + d_matB[global_id];
if (global_id < n)
d_matA[global_id] = d_matA[global_id] + d_matB[global_id];
if (global_id < n)
d_matA[global_id] = d_matA[global_id] + d_matB[global_id];
if (global_id < n)
d_matA[global_id] = d_matA[global_id] + d_matB[global_id];
}
void checkparams(unsigned long *n, unsigned int *cb);
int main(int argc, char *argv[]){
if (argc != 2){
printf("Falta argumento: N\n");
return 0;
}
unsigned long N = atoi (argv[1]),tam_tot = N*N;
unsigned int CUDA_BLK = 16;
unsigned long numBytes = sizeof(double)*tam_tot;
checkparams(&tam_tot,&CUDA_BLK);
double *matA,*matB,*d_matA,*d_matB,timetick;
unsigned int i,j;
matA = (double *)malloc(numBytes);
matB = (double *)malloc(numBytes);
for (i = 0; i < tam_tot; i++){
matA[i] = i;
matB[i] = i;
}
cudaMalloc((void **) &d_matA, numBytes);
cudaMalloc((void **) &d_matB, numBytes);
cudaMemcpy(d_matA, matA, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
cudaMemcpy(d_matB, matB, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU
// Bloque unidimensional de hilos (*cb* hilos)
dim3 dimBlock(CUDA_BLK);
// Grid unidimensional (*ceil(n/cb)* bloques)
dim3 dimGrid((tam_tot + dimBlock.x - 1) / dimBlock.x);
timetick = dwalltime();
sumM_kernel_cuda<<<dimGrid, dimBlock>>>(d_matA, d_matB, tam_tot);
cudaThreadSynchronize();
printf("Tiempo para sumar las matrices: %f\n",dwalltime() - timetick);
cudaMemcpy(matA, d_matA, numBytes, cudaMemcpyDeviceToHost); // GPU -> CPU
/* for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
printf("%f|",matA[i*N+j]);
}
printf("\n");
}
printf("\n");*/
cudaFree(d_matA);
cudaFree(d_matB);
free(matA);
free(matB);
return 0;
}
void checkparams(unsigned long *n, unsigned int *cb){
struct cudaDeviceProp capabilities;
// Si menos numero total de hilos que tamaño bloque, reducimos bloque
if (*cb > *n)
*cb = *n;
cudaGetDeviceProperties (&capabilities, 0);
if (*cb > capabilities.maxThreadsDim[0]) {
*cb = capabilities.maxThreadsDim[0];
printf("->Núm. hilos/bloq cambiado a %d (máx por bloque para dev)\n\n",
*cb);
}
if (((*n + *cb - 1) / *cb) > capabilities.maxGridSize[0]) {
*cb = 2 * (*n - 1) / (capabilities.maxGridSize[0] - 1);
if (*cb > capabilities.maxThreadsDim[0]) {
*cb = capabilities.maxThreadsDim[0];
printf("->Núm. hilos/bloq cambiado a %d (máx por bloque para dev)\n",
*cb);
if (*n > (capabilities.maxGridSize[0] * *cb)) {
*n = capabilities.maxGridSize[0] * *cb;
printf("->Núm. total de hilos cambiado a %lu (máx por grid para \
dev)\n\n", *n);
} else {
printf("\n");
}
} else {
printf("->Núm. hilos/bloq cambiado a %d (%d máx. bloq/grid para \
dev)\n\n",
*cb, capabilities.maxGridSize[0]);
}
}
}
|
2,129
|
#include "cuda_runtime.h"
#include <iostream>
#include <fstream>
#include <array>
#include <chrono>
#include <random>
#include <vector>
#include <string>
#include <functional>
__global__ void main_histogram(const float* input, const long size, int* histogram, const int binsNumber) {
int indx = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
__shared__ int shared_bins[2048];
for(int i = threadIdx.x; i < binsNumber; i += blockDim.x)
shared_bins[i] = 0;
__syncthreads();
for(int i = indx; i < size; i += stride)
atomicAdd(&shared_bins[ static_cast<int>(input[i]) ], 1);
__syncthreads();
for(int i = threadIdx.x; i < binsNumber; i += blockDim.x)
atomicAdd(&histogram[i], shared_bins[i]);
}
__global__ void saturation(int* histogram, const int binsNumber, const int max_value) {
int indx = blockDim.x * blockIdx.x + threadIdx.x;
if( indx < binsNumber )
if(histogram[indx] > max_value)
histogram[indx] = max_value;
}
long CPU_histogram(const float* data, const long size, const int binsNumber, std::string name) {
std::vector<int> histogram;
histogram.resize(binsNumber);
auto start = std::chrono::high_resolution_clock::now();
for(long i = 0; i < size; i++)
histogram[static_cast<int>(data[i])]++;
auto stop = std::chrono::high_resolution_clock::now();
std::ofstream save;
save.open("CPU_histogram_"+name+".txt");
for(int i = 0; i < binsNumber; i++)
save << i << "\t" << histogram[i] << std::endl;
save.close();
return std::chrono::duration_cast<std::chrono::nanoseconds>(stop-start).count();
}
long GPU_histogram(const int SM, const int ID, const float* data, const long size, const int binsNumber, std::string name) {
cudaMemPrefetchAsync(data, size * sizeof(float), ID);
int* device_histogram;
cudaMallocManaged(&device_histogram, binsNumber * sizeof(int));
for(int i = 0; i < binsNumber; i++)
device_histogram[i] = 0;
cudaMemPrefetchAsync(device_histogram, binsNumber * sizeof(int), ID);
auto start = std::chrono::high_resolution_clock::now();
main_histogram<<<SM * 32, 256>>>(data, size, device_histogram, binsNumber);
cudaDeviceSynchronize();
auto stop = std::chrono::high_resolution_clock::now();
std::ofstream save;
save.open("GPU_histogram_" + name + "_.txt");
for(int i = 0; i < binsNumber; i++)
save << i << "\t" << device_histogram[i] << std::endl;
save.close();
cudaFree(device_histogram);
return std::chrono::duration_cast<std::chrono::nanoseconds>(stop-start).count();
}
//////////////////////////////////////////////////////////////////////////////////////////////
int main() {
std::random_device rd;
std::mt19937 generator{ rd() };
std::uniform_real_distribution<float> uniform_distribution{ 0.0, 2048.0 };
float* data;
const long size = 10e8;
const int binsNumber = 2048;
cudaMallocManaged(&data, size * sizeof(float));
int ID;
int SM;
cudaGetDevice(&ID);
cudaDeviceGetAttribute(&SM, cudaDevAttrMultiProcessorCount, ID);
for(long i = 0; i < size; i++)
data[i] = uniform_distribution(generator);
long CPU_time = CPU_histogram(data, size, binsNumber, "uniform");
long GPU_time = GPU_histogram(SM, ID, data, size, binsNumber, "uniform");
std::ofstream save;
save.open("time.txt");
save << CPU_time << std::endl << GPU_time;
/*
std::normal_distribution<float> normal_distribution{ 1024.0, 256.0 };
for(long i = 0; i < size; i++)
data[i] = normal_distribution(generator);
//CPU_time = CPU_histogram(data, size, binsNumber, "normal");
GPU_time = GPU_histogram(SM, ID, data, size, binsNumber, "normal");
*/
save << std::endl << std::endl << "normal" << std::endl << CPU_time << std::endl << GPU_time;
save.close();
cudaFree(data);
return 0;
}
|
2,130
|
/*
* http://code.google.com/p/thrust/wiki/QuickStartGuide#Introduction
* install thrust library by just unziping it's content to
* /usr/local/cuda/include/
* compile using : nvcc version.cu -o version
* WARNING: requires cuda 3.2 or greater
* nvcc version.cu -o version -I /path/to/thrust
*/
#include <thrust/version.h>
#include <iostream>
int main(void) {
int major = THRUST_MAJOR_VERSION;
int minor = THRUST_MINOR_VERSION;
std::cout << "Thrust v" << major <<" . "<< minor << std::endl;
return 0;
}
|
2,131
|
#include <stdio.h>
#include <signal.h>
// There are ways to get this data but I'm too lazy
#define CUDA_CORES 384
//#define N 7
#define N 606
#define ITERATIONS 864000
// http://www.wolframalpha.com/input/?i=gravitational+constant+in+km%5E3%2F%28Yg+*+s%5E2%29
#define GRAVITATIONAL_CONSTANT 66.7 // km^3 / (Yg * s^2)
//#define GRAVITATIONAL_CONSTANT 240300.0 // km^3 / (Yg * min^2)
#define TIME_STEP 0.1
#define SAVE_STEP 50
volatile sig_atomic_t kill_flag = 0; // if the program gets killed, flag for the main loop
void set_kill_flag(int sig){ // can be called asynchronously
kill_flag = 1; // set flag
}
void random_ints(int* a, int num) {
int i;
for(i = 0; i < num; ++i) {
a[i] = rand();
}
}
void random_doubles(double* a, int num, double multiplier) {
int i;
for(i = 0; i < num; i++) {
a[i] = (double)rand() / (double)RAND_MAX * multiplier;
}
}
void random_double4s(double4* a, int num, double m0, double m1, double m2, double m3) {
int i;
for(i = 0; i < num; i++) {
a[i].x = ((double)rand() / (double)RAND_MAX - 0.5) * m0;
a[i].y = ((double)rand() / (double)RAND_MAX - 0.5) * m1;
a[i].z = ((double)rand() / (double)RAND_MAX - 0.5) * m2;
a[i].w = ((double)rand() / (double)RAND_MAX) * m3;
}
}
void load_initial_data(double4 *in_pos, double4 *in_vel, int num_particles) {
FILE *ifp;
char *mode = "r";
ifp = fopen("input.csv", mode);
double w, x, y, z, xv, yv, zv;
if(ifp == NULL) fprintf(stderr, "OH NO! No file!\n");
for(int i = 0; i < num_particles; i++) {
fscanf(ifp, "%lf, %lf, %lf, %lf, %lf, %lf, %lf", &w, &x, &y, &z, &xv, &yv, &zv);
in_pos[i].w = w;
in_pos[i].x = x;
in_pos[i].y = y;
in_pos[i].z = z;
in_vel[i].w = 0.0;
in_vel[i].x = xv;
in_vel[i].y = yv;
in_vel[i].z = zv;
printf("%g, %g, %g, %g, %g, %g, %g\n", w, x, y,z, xv, yv, zv);
}
fclose(ifp);
}
void save_continue_csv(const char *filename, double4 *poss, double4 *vels) {
FILE *next_input = fopen(filename, "w");
for(int j = 0; j < N; j++)
fprintf(next_input, "%g,%g,%g,%g,%g,%g,%g\n", poss[j].w, poss[j].x, poss[j].y, poss[j].z, vels[j].x, vels[j].y, vels[j].z);
fclose(next_input);
printf("Saved.");
}
__device__ double3 interaction(double4 body_a, double4 body_b, double3 accel) {
double3 r;
r.x = body_b.x - body_a.x;
r.y = body_b.y - body_a.y;
r.z = body_b.z - body_a.z;
double dist_sq = r.x * r.x + r.y * r.y + r.z * r.z + 4e6;
//dist_sq += 4e6; // softening factor
double inv_dist = rsqrt(dist_sq);
double inv_dist_cube = inv_dist * inv_dist * inv_dist;
double accel_total = GRAVITATIONAL_CONSTANT * body_b.w * inv_dist_cube;
accel.x += r.x * accel_total;
accel.y += r.y * accel_total;
accel.z += r.z * accel_total;
return accel;
}
__device__ double3 tile_calculation(double4 body_a, double3 accel) {
int i;
extern __shared__ double4 shared_positions[];
//__shared__ double4 shared_positions[N];
//double4 *shared_positions = SharedMemory();
#pragma unroll 128
for(i = 0; i < blockDim.x; i++) {
accel = interaction(body_a, shared_positions[i], accel);
}
return accel;
}
__device__ double4 calculate_accel(double4 *positions, int num_tiles, int num_particles) {
extern __shared__ double4 shared_positions[];
double4 cur_body; // current block's body
int tile;
double3 accel = {0.0, 0.0, 0.0};
int gtid = blockIdx.x * blockDim.x + threadIdx.x;
cur_body = positions[gtid];
for(tile = 0; tile < num_tiles; tile++) {
int idx = tile * blockDim.x + threadIdx.x;
shared_positions[threadIdx.x] = positions[idx];
__syncthreads();
#pragma unroll 128
for(int counter = 0; counter < blockDim.x; counter++) {
accel = interaction(cur_body, shared_positions[counter], accel);
}
__syncthreads();
}
double4 accel4 = {accel.x, accel.y, accel.z, 0.0};
return accel4;
}
__global__ void integrate(double4 *positions, double4 *vels, int num_tiles, int num_particles) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index >= num_particles) {
return;
}
double4 position = positions[index];
double4 accel = calculate_accel(positions, num_tiles, num_particles);
double4 velocity = vels[index];
velocity.x += accel.x * TIME_STEP;
velocity.y += accel.y * TIME_STEP;
velocity.z += accel.z * TIME_STEP;
position.x += velocity.x * TIME_STEP;
position.y += velocity.y * TIME_STEP;
position.z += velocity.z * TIME_STEP;
__syncthreads();
positions[index] = position;
vels[index] = velocity;
}
int main(int argc, char *argv[]) {
signal(SIGINT, set_kill_flag);
int num_particles = N;
int block_size = num_particles;
int num_blocks = (num_particles + block_size-1) / block_size;
int num_tiles = (num_particles + block_size - 1) / block_size;
int shared_mem_size = block_size * 4 * sizeof(double); // 4 floats for pos
double4 *positions, *vels;
double4 *dev_positions, *dev_vels;
int size = N * sizeof(double4);
cudaMalloc((void**)&dev_positions, size);
cudaMalloc((void**)&dev_vels, size);
positions = (double4*)malloc(size);
vels = (double4*)malloc(size);
//int seed = time(NULL);
//srand(seed);
//random_double4s(positions, N, 6e8, 6e8, 6e3, 11.6 * 2.0);
//random_double4s(vels, N, 0.5e2, 0.5e2, 0.1, 0.0);
load_initial_data(positions, vels, N);
cudaMemcpy(dev_positions, positions, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_vels, vels, size, cudaMemcpyHostToDevice);
FILE *fp = fopen("output.csv", "w");
for(int i = 0; i < ITERATIONS; i++) {
integrate<<<num_blocks, block_size, shared_mem_size>>>(dev_positions, dev_vels, num_tiles, num_particles);
cudaMemcpy(positions, dev_positions, size, cudaMemcpyDeviceToHost);
cudaMemcpy(vels, dev_vels, size, cudaMemcpyDeviceToHost);
if(i % SAVE_STEP == 0) {
printf("%.2f\n", (double)i * 100.0 / (double)ITERATIONS);
for(int j = 0; j < N; j++)
fprintf(fp, "%g,%g,%g,%g,%g,%g\n", positions[j].x, positions[j].y, positions[j].z, vels[j].x, vels[j].y, vels[j].z);
}
if(kill_flag) {
break;
}
}
fclose(fp);
if(kill_flag) {
save_continue_csv("recovered-input.csv", positions, vels);
} else {
save_continue_csv("next-input.csv", positions, vels);
}
cudaFree(dev_positions);
cudaFree(dev_vels);
free(positions); free(vels);
return 0;
}
|
2,132
|
#include "includes.h"
/**
* Project TACO: Parallel ACO algorithm for TSP
* 15-418 Parallel Algorithms - Final Project
* Ivan Wang, Carl Lin
*/
#define MAX_THREADS 128
__global__ void copyBestPath(int i, int *bestPathResult, int *pathResults) {
memcpy(bestPathResult, &pathResults[i * MAX_ANTS], MAX_CITIES * sizeof(int));
}
|
2,133
|
#include "includes.h"
/******************************************************************************
* This program takes an initial estimate of m and c and finds the associated
* rms error. It is then as a base to generate and evaluate 8 new estimates,
* which are steps in different directions in m-c space. The best estimate is
* then used as the base for another iteration of "generate and evaluate". This
* continues until none of the new estimates are better than the base. This is
* a gradient search for a minimum in mc-space.
*
* To compile:
* nvcc -o cuda_linear_regression CUDA_linear_regression.c -lm
*
* To run:
* ./cuda_linear_regression
*
* Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
typedef struct point_t {
double x;
double y;
} point_t;
int n_data = 1000;
__device__ int d_n_data = 1000; //device varialble
//date set
point_t data[] = {
{82.73,128.67},{79.53,133.54},{66.86,124.65},{69.21,135.74},
{82.20,122.07},{84.32,120.46},{71.12,93.14},{85.64,121.42},
{69.22,116.28},{83.12,137.30},{84.31,113.18},{75.60,121.42},
{69.04,91.83},{85.41,131.06},{17.44,58.69},{68.92,119.86},
{69.95,110.05},{ 0.15, 5.39},{73.96,118.70},{27.70,64.64},
{97.97,158.15},{56.21,100.99},{30.27,48.32},{37.47,89.65},
{98.98,144.03},{92.61,133.89},{ 4.72,32.88},{19.51,57.43},
{94.74,145.50},{31.66,71.27},{94.76,134.53},{32.73,59.95},
{32.64,54.53},{38.78,69.06},{91.47,150.49},{77.99,119.35},
{33.38,65.87},{79.28,123.62},{39.69,72.53},{95.47,140.97},
{82.64,137.69},{25.53,51.33},{68.58,85.98},{92.25,132.34},
{74.79,101.30},{ 1.32,18.87},{53.85,95.13},{78.75,128.26},
{ 2.91,21.77},{90.68,128.55},{11.44,35.27},{30.72,56.54},
{49.06,74.08},{49.09,83.45},{62.54,104.58},{38.83,72.26},
{78.43,130.83},{69.49,122.49},{27.27,56.35},{80.06,131.95},
{ 5.73,39.00},{80.21,140.42},{ 8.47,36.12},{86.98,152.43},
{64.26,108.56},{95.74,133.36},{15.06,48.67},{31.96,72.31},
{95.27,141.34},{61.10,89.26},{27.51,68.47},{26.48,60.30},
{92.61,128.38},{ 8.25,47.51},{90.69,118.91},{45.40,79.96},
{23.59,53.12},{46.71,68.27},{21.15,50.29},{27.99,76.29},
{ 7.75,43.57},{13.70,43.56},{74.85,97.83},{50.93,103.11},
{33.80,64.85},{80.99,125.37},{92.41,126.27},{92.61,134.36},
{34.70,55.32},{35.07,55.04},{86.87,157.26},{41.99,90.46},
{16.27,44.43},{36.31,83.84},{22.35,73.11},{89.11,127.19},
{56.11,77.28},{51.90,75.07},{35.74,94.18},{10.66,29.60},
{61.27,114.15},{77.55,117.04},{61.17,99.68},{15.54,55.33},
{91.99,143.18},{12.91,21.82},{48.52,89.94},{54.88,90.86},
{73.59,131.33},{ 5.49,13.95},{92.31,147.29},{48.50,89.49},
{40.02,58.26},{48.22,81.96},{17.08,52.59},{34.27,66.17},
{59.06,94.26},{92.71,134.53},{37.70,65.30},{77.11,111.38},
{43.27,74.12},{79.71,123.45},{ 0.86,38.69},{ 3.00,17.76},
{56.03,80.33},{17.66,43.27},{18.39,47.08},{31.08,83.84},
{32.64,77.85},{51.68,84.57},{78.46,134.18},{ 9.57,40.28},
{68.38,98.26},{30.29,67.59},{86.15,131.86},{16.82,64.91},
{ 3.35,20.88},{65.78,98.73},{50.70,90.92},{38.26,71.11},
{85.52,132.23},{44.06,83.02},{44.09,86.42},{81.86,114.30},
{33.98,69.09},{93.80,147.73},{59.58,103.07},{98.75,154.73},
{88.98,120.59},{78.08,109.00},{82.77,133.94},{76.49,106.31},
{55.38,85.71},{46.56,79.57},{83.92,141.58},{81.38,133.52},
{ 4.88,35.01},{ 4.57,17.99},{57.96,90.07},{33.42,63.80},
{ 9.95,34.53},{47.14,92.75},{63.17,105.19},{95.01,163.93},
{30.36,57.81},{ 2.46,23.97},{69.75,115.88},{64.85,111.01},
{25.18,56.58},{69.84,104.78},{40.43,51.98},{75.61,107.05},
{36.75,69.37},{50.08,100.02},{64.97,103.68},{41.72,86.64},
{ 1.70,47.26},{99.93,141.75},{24.57,64.51},{75.23,116.35},
{ 1.95,18.53},{78.84,102.70},{67.38,97.71},{55.35,82.37},
{58.10,100.09},{53.10,96.07},{41.24,83.81},{68.86,111.98},
{87.36,86.88},{54.06,98.42},{64.12,90.56},{11.77,49.66},
{99.43,134.33},{55.24,99.18},{56.44,74.73},{39.47,62.99},
{ 8.94,48.15},{92.91,130.45},{87.68,138.76},{80.37,116.69},
{56.72,108.65},{ 0.76,24.26},{26.98,75.13},{ 0.39,42.16},
{81.99,138.50},{88.32,117.16},{51.01,87.42},{21.38,55.45},
{72.66,122.82},{18.04,53.56},{11.22,49.73},{36.75,60.26},
{64.81,90.19},{72.72,121.14},{24.03,74.08},{41.38,81.38},
{62.79,98.75},{63.66,109.17},{91.12,143.91},{ 7.41,34.06},
{94.05,131.99},{53.12,90.28},{68.31,114.79},{25.33,67.23},
{42.34,86.91},{94.61,131.38},{43.78,73.28},{50.18,78.10},
{81.64,135.88},{11.27,44.45},{41.03,76.34},{21.25,57.54},
{29.23,57.27},{35.74,75.16},{ 0.91,14.33},{30.08,59.05},
{23.99,56.25},{90.79,120.98},{99.22,152.22},{94.21,143.09},
{19.35,30.03},{82.04,113.25},{79.22,113.69},{83.40,144.06},
{55.82,80.85},{42.49,48.94},{17.60,55.62},{35.65,81.91},
{82.50,135.41},{81.15,114.46},{53.47,78.67},{44.30,73.73},
{32.88,80.28},{99.26,147.55},{76.32,110.24},{78.97,110.27},
{18.08,47.48},{87.01,140.40},{56.25,83.61},{42.62,55.40},
{15.95,16.25},{47.85,106.69},{ 6.61,35.83},{66.38,116.30},
{94.97,122.56},{42.29,73.37},{31.48,67.15},{69.67,105.40},
{30.41,65.31},{ 2.98,19.40},{ 8.12,48.34},{80.41,127.03},
{63.68,112.61},{24.60,78.23},{77.61,123.49},{39.87,38.20},
{77.80,109.59},{58.53,107.63},{23.97,62.36},{ 7.77,27.38},
{ 0.80,41.55},{ 6.45,32.91},{45.32,82.24},{35.56,59.56},
{65.05,97.68},{62.21,96.14},{86.61,121.99},{87.91,125.40},
{48.08,88.87},{ 2.41,40.02},{69.55,119.31},{22.07,61.86},
{61.87,121.40},{82.50,119.46},{26.97,38.40},{31.53,86.30},
{ 1.81,38.57},{72.57,108.34},{88.88,139.23},{63.90,95.79},
{93.29,135.35},{86.26,143.55},{63.62,94.76},{20.24,38.84},
{16.23,48.64},{72.87,108.22},{16.26,51.25},{37.86,66.06},
{57.53,81.37},{61.66,97.20},{49.48,84.98},{95.20,142.45},
{12.10,45.25},{47.79,84.80},{17.29,48.98},{47.11,87.23},
{85.74,119.95},{89.94,142.94},{97.68,155.27},{78.73,123.81},
{51.65,85.91},{52.82,96.05},{50.95,93.50},{16.14,37.21},
{16.73,41.57},{57.25,95.50},{78.47,136.77},{42.35,75.64},
{93.24,135.04},{12.56,38.20},{21.40,62.92},{70.60,136.98},
{44.04,83.57},{ 6.43,36.61},{12.01,50.32},{79.61,119.78},
{43.05,69.07},{14.42,53.01},{51.68,83.82},{25.59,55.77},
{ 9.14,31.58},{37.24,80.94},{15.73,69.21},{71.54,123.11},
{ 1.26,25.72},{ 4.25,38.46},{21.42,39.99},{44.12,79.01},
{31.12,64.63},{85.27,143.62},{43.25,79.30},{77.27,104.30},
{47.34,83.76},{90.57,125.82},{17.35,36.40},{82.01,130.41},
{81.58,124.10},{68.62,117.62},{47.48,79.29},{ 4.30,26.77},
{ 6.94,32.22},{11.71,55.76},{22.62,54.74},{58.43,89.61},
{69.10,111.51},{56.77,101.10},{67.10,102.75},{93.20,144.51},
{83.61,128.56},{71.97,116.09},{75.19,122.16},{48.03,79.67},
{97.95,143.80},{92.27,123.08},{23.88,63.39},{79.15,115.57},
{24.42,51.27},{12.58,34.65},{46.58,78.16},{ 1.29,37.96},
{17.09,45.61},{12.45,40.77},{82.75,107.46},{52.15,75.34},
{39.51,68.51},{31.71,64.23},{39.36,72.00},{12.16,37.99},
{83.13,127.76},{42.25,73.17},{45.32,77.14},{20.52,36.60},
{ 7.99,11.50},{23.34,55.47},{25.87,54.36},{78.73,112.49},
{55.60,94.90},{31.98,73.40},{85.93,137.12},{58.56,97.64},
{88.16,120.43},{78.65,136.60},{25.93,43.32},{84.83,136.32},
{68.09,102.12},{68.36,111.80},{39.80,69.69},{ 0.38,27.89},
{ 4.49,27.85},{32.53,66.32},{54.23,97.63},{19.98,67.32},
{90.62,143.43},{18.31,67.91},{95.66,146.41},{95.41,149.68},
{71.64,111.15},{23.02,44.96},{97.06,154.54},{41.58,75.95},
{79.80,130.01},{74.55,119.44},{72.19,113.27},{70.01,106.48},
{75.24,94.18},{19.82,60.09},{96.31,137.91},{ 2.21,27.44},
{40.52,70.36},{ 2.40,29.12},{35.24,57.25},{26.38,71.34},
{26.02,59.48},{34.73,66.07},{45.15,78.23},{ 9.35,32.58},
{19.37,57.18},{ 9.51,31.70},{15.03,49.81},{85.08,140.35},
{ 3.23,13.46},{58.26,108.47},{ 4.84,31.78},{49.49,83.50},
{35.55,70.67},{26.51,55.44},{20.12,53.39},{72.73,119.37},
{31.04,72.96},{30.66,58.35},{ 2.96,33.18},{18.68,31.50},
{91.41,138.24},{44.67,81.81},{81.57,135.26},{ 0.17,26.66},
{49.03,100.11},{54.47,102.27},{61.78,113.45},{22.67,59.51},
{89.80,143.05},{33.05,78.20},{67.76,108.19},{ 7.64,41.18},
{36.91,87.28},{95.44,147.27},{52.76,94.34},{ 3.52,29.51},
{87.39,118.48},{41.48,64.71},{ 1.44,14.21},{95.04,136.99},
{71.77,115.75},{23.39,47.58},{62.66,115.03},{15.98,34.38},
{29.06,62.77},{ 2.94,28.25},{71.50,119.18},{65.24,119.14},
{30.65,82.39},{16.36,38.82},{ 0.98,48.82},{33.19,56.41},
{27.49,64.34},{53.69,102.47},{28.15,52.58},{40.21,66.07},
{50.56,86.39},{74.71,97.44},{24.72,46.29},{48.05,80.63},
{34.99,52.13},{66.75,115.96},{17.62,49.17},{98.99,157.80},
{37.96,72.18},{56.88,105.06},{48.27,97.04},{71.18,138.90},
{46.35,82.02},{10.43,44.65},{24.14,42.85},{82.21,144.13},
{96.85,148.15},{93.68,126.32},{33.02,61.55},{66.73,108.51},
{83.89,136.35},{80.85,91.16},{79.21,128.88},{84.37,119.84},
{38.41,71.48},{47.49,85.53},{ 1.54,24.44},{68.32,106.44},
{22.82,54.16},{ 2.65,16.35},{19.91,53.53},{12.99,34.98},
{30.87,57.17},{44.10,83.88},{15.84,31.99},{36.46,59.74},
{26.25,79.73},{79.12,132.06},{86.26,132.45},{ 0.61,23.61},
{33.94,59.37},{99.92,145.88},{26.20,53.99},{69.77,115.40},
{69.07,107.00},{ 1.89,17.20},{38.25,81.40},{27.08,62.96},
{23.09,53.98},{55.56,86.93},{ 6.68,50.41},{22.86,49.26},
{17.25,50.25},{19.01,50.16},{35.07,85.09},{59.08,89.15},
{87.02,128.83},{ 1.57,27.68},{97.76,148.25},{70.78,108.00},
{38.01,65.83},{96.41,139.67},{ 2.86,22.44},{27.05,53.00},
{90.99,134.97},{86.60,145.27},{54.66,99.42},{67.61,107.07},
{85.16,137.50},{87.64,144.60},{14.69,36.65},{16.08,49.31},
{14.45,44.07},{65.91,98.39},{50.74,90.72},{ 6.98,31.11},
{52.76,83.96},{ 8.03,43.93},{17.58,52.58},{33.63,59.04},
{87.65,137.34},{77.97,142.54},{30.56,69.47},{59.61,114.61},
{14.05,53.07},{87.65,116.66},{33.19,75.96},{31.87,66.95},
{25.89,57.59},{48.60,75.67},{80.25,109.89},{ 6.61,24.27},
{ 4.56,44.00},{40.17,62.33},{92.32,117.73},{75.07,112.71},
{17.10,35.12},{39.06,66.60},{ 4.26,34.01},{52.95,102.49},
{45.73,76.57},{ 4.72,29.94},{ 2.01,17.54},{39.08,88.44},
{82.94,141.75},{44.51,90.97},{27.27,63.14},{60.16,95.38},
{41.26,72.59},{66.50,104.49},{58.37,110.13},{62.11,96.01},
{70.30,90.15},{18.47,47.61},{24.80,51.82},{79.02,133.40},
{96.61,147.92},{18.14,33.27},{ 0.83,51.20},{99.67,143.65},
{34.07,67.38},{57.28,110.02},{35.92,59.90},{66.15,124.45},
{81.82,135.08},{ 2.97,28.49},{95.97,135.79},{51.17,80.95},
{91.47,142.00},{94.09,121.08},{57.70,82.98},{67.96,100.92},
{81.91,132.34},{11.55,39.74},{86.59,126.05},{ 5.36,41.72},
{90.86,144.15},{81.02,137.56},{35.87,81.76},{63.73,105.92},
{78.29,129.54},{96.72,150.04},{14.97,61.93},{45.76,77.17},
{82.69,123.95},{85.82,132.89},{85.95,127.24},{15.04,36.92},
{89.91,112.87},{30.86,58.13},{ 5.77,42.22},{75.24,108.41},
{ 8.43,32.09},{90.70,147.99},{80.16,112.57},{42.81,73.54},
{82.47,123.41},{48.23,98.48},{77.48,143.96},{ 0.48,14.50},
{29.75,63.12},{88.76,137.72},{33.59,70.61},{22.74,43.51},
{82.15,116.11},{89.10,120.65},{26.56,68.17},{40.72,74.98},
{68.46,99.23},{34.82,66.71},{36.56,67.33},{72.32,114.23},
{29.65,65.99},{44.39,64.83},{82.08,116.35},{99.73,139.12},
{79.04,118.48},{20.78,42.05},{72.39,96.47},{90.62,147.11},
{35.99,59.11},{50.65,83.23},{59.04,100.47},{87.01,145.78},
{43.71,76.56},{95.61,151.81},{50.25,88.96},{69.64,122.07},
{40.07,79.38},{82.61,133.63},{20.84,39.75},{10.28,42.50},
{47.43,70.82},{30.47,67.19},{69.16,100.10},{46.06,74.00},
{93.78,152.76},{19.93,67.46},{79.61,130.88},{81.11,120.11},
{76.16,123.94},{75.84,111.70},{50.97,85.30},{47.35,90.59},
{93.21,115.44},{19.22,39.30},{11.67,29.58},{52.48,95.64},
{38.76,59.62},{ 2.74,-2.03},{18.99,63.67},{82.38,128.08},
{15.68,32.34},{39.19,83.38},{31.06,65.92},{28.91,73.05},
{19.01,59.69},{76.62,117.74},{36.82,91.33},{86.28,121.19},
{39.26,50.72},{41.45,70.26},{65.81,111.41},{77.09,117.88},
{78.96,128.48},{16.41,56.61},{39.54,64.11},{72.45,110.54},
{48.83,77.35},{27.61,51.82},{26.53,47.44},{83.06,111.09},
{97.06,127.57},{89.01,146.82},{89.44,141.17},{69.18,100.25},
{ 1.11,11.60},{71.63,123.66},{92.93,151.73},{99.46,165.34},
{36.49,71.56},{95.48,153.13},{65.33,102.37},{15.28,35.93},
{ 5.52,36.67},{ 0.78,42.47},{10.09,36.68},{ 5.75,37.39},
{52.34,89.11},{14.55,47.37},{67.92,113.35},{36.66,77.34},
{99.76,143.75},{26.67,58.72},{ 3.21,39.37},{87.70,124.12},
{90.03,131.24},{51.54,91.39},{62.86,98.04},{52.75,90.87},
{34.17,84.31},{62.00,89.08},{82.47,111.89},{61.38,123.48},
{47.17,84.64},{20.91,53.51},{96.96,131.54},{46.06,85.14},
{26.85,71.44},{91.67,138.51},{54.07,85.26},{51.63,89.63},
{94.04,140.80},{67.75,107.07},{29.24,76.71},{38.29,75.78},
{28.49,72.87},{60.51,102.28},{77.22,107.79},{99.25,145.86},
{33.11,52.32},{72.47,125.80},{21.97,59.23},{14.25,61.11},
{23.79,63.11},{77.78,109.13},{23.51,81.38},{66.92,110.89},
{79.81,109.80},{56.72,94.63},{59.60,110.57},{57.68,104.54},
{27.83,42.43},{47.80,87.68},{58.79,76.51},{78.33,126.71},
{85.14,128.99},{71.61,116.42},{58.09,96.85},{44.89,71.34},
{33.12,80.19},{98.79,130.09},{44.57,82.03},{88.63,142.61},
{61.96,98.55},{58.54,106.80},{19.17,61.00},{13.51,26.68},
{76.68,124.52},{82.62,138.53},{78.13,122.09},{37.10,60.33},
{ 8.82,48.63},{71.64,105.27},{68.44,115.07},{ 7.66,61.91},
{64.37,96.58},{54.90,88.28},{78.35,133.29},{79.84,129.58},
{ 3.09,28.37},{48.62,76.00},{38.26,63.99},{42.05,102.17},
{48.89,73.66},{54.38,100.05},{11.16,55.43},{63.24,110.69},
{68.17,114.15},{68.68,109.15},{53.43,90.23},{67.45,106.67},
{10.60,34.41},{56.81,90.86},{11.42,27.08},{36.93,93.13},
{41.64,89.77},{69.74,103.98},{23.07,55.12},{44.98,83.65},
{35.75,72.65},{14.80,56.15},{72.19,115.53},{51.10,80.69},
{96.54,140.10},{15.04,62.30},{21.17,56.15},{46.42,79.63},
{22.35,52.01},{35.47,54.95},{ 4.27,21.33},{84.37,139.55},
{43.95,93.24},{86.56,132.82},{44.35,83.36},{76.81,114.79},
{ 1.05,31.66},{32.76,76.15},{83.66,120.90},{12.14,42.52},
{25.85,55.83},{82.12,140.05},{75.33,126.93},{32.92,75.90},
{ 7.52,24.51},{25.42,41.55},{42.57,67.15},{87.36,150.38},
{ 0.51,17.68},{45.70,84.75},{58.74,88.68},{28.62,74.38},
{73.22,113.45},{78.64,114.25},{42.40,92.03},{84.22,132.25},
{54.24,73.34},{ 2.71,30.27},{54.11,84.97},{66.74,112.66},
{28.80,57.88},{87.02,146.20},{32.02,63.03},{59.57,94.41},
{40.46,79.73},{23.74,49.78},{87.58,140.94},{84.15,113.32},
{32.48,63.48},{ 4.59,25.85},{98.00,128.35},{12.23,37.43},
{66.17,102.97},{50.73,93.82},{74.68,137.79},{43.72,92.85},
{53.95,91.99},{54.47,105.25},{56.70,104.89},{16.59,46.52},
{71.56,115.18},{80.62,99.79},{71.29,101.42},{16.81,56.15},
{48.88,84.93},{ 8.41,40.02},{93.98,147.39},{39.20,86.04},
{61.75,90.80},{ 1.06,32.69},{21.40,33.33},{ 8.60,28.69},
{38.80,61.88},{14.41,38.37},{40.14,70.01},{69.45,105.44},
{14.41,43.93},{51.20,93.11},{39.10,57.10},{21.04,39.51},
{10.12,30.43},{70.13,93.88},{ 1.74,20.56},{12.23,34.33},
{98.81,151.87},{50.48,92.07},{ 6.98, 9.52},{24.08,69.94},
{15.72,40.89},{83.99,127.44},{47.21,90.46},{88.31,138.70},
{91.05,132.13},{45.22,62.24},{87.76,128.67},{99.37,168.24},
{94.38,140.24},{31.30,67.65},{40.85,84.03},{40.91,79.56},
{77.14,135.74},{50.92,80.52},{17.81,49.14},{90.30,135.15},
{28.44,64.60},{49.23,85.12},{81.63,141.58},{83.04,111.19},
{28.39,63.30},{ 8.61,44.11},{25.36,50.79},{51.35,93.32},
{64.49,80.42},{96.17,134.31},{96.10,144.32},{47.58,83.36},
{94.38,131.03},{41.97,69.05},{37.86,62.21},{26.97,65.30},
{37.57,88.95},{65.08,108.58},{17.68,39.80},{63.75,103.14},
{91.86,132.07},{76.35,121.19},{22.98,34.87},{96.46,140.54},
{ 9.38,31.40},{42.97,82.09},{20.56,49.02},{13.73,41.31},
{37.35,63.18},{69.54,105.57},{38.17,83.30},{47.04,80.34},
{48.79,98.00},{39.34,61.59},{82.57,125.55},{40.77,82.18},
{13.62,53.38},{35.33,95.17},{95.36,148.79},{20.25,62.00},
{47.48,86.54},{30.22,61.07},{83.90,120.30},{85.81,123.25},
{84.29,130.44},{52.84,95.43},{96.72,140.32},{ 3.29,45.68},
{71.77,98.66},{ 8.52,42.40},{22.55,54.27},{15.08,47.10},
{91.29,130.23},{16.48,40.04},{44.84,72.14},{34.44,73.42},
{36.26,78.30},{58.45,115.51},{96.59,150.22},{63.80,98.30},
{85.92,120.14},{93.68,129.88},{74.09,119.30},{99.44,136.93},
{88.39,131.55},{64.40,117.89},{13.87,47.30},{81.17,106.77}
};
__device__ double d_residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
__global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) {
/*
Using the current index calculation:
- The thread id
- The block id
- The number of threads per block
*/
int i = threadIdx.x + blockIdx.x * blockDim.x;
//Error of sum stored in the array
error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c);
}
|
2,134
|
//
// Created by igor on 03.06.2021.
//
#include <cstdio>
#include <fstream>
#include <iostream>
#include <sstream>
#include "Texture.cuh"
Texture::Texture(const char *path) {
std::ifstream inp(path, std::ios::in | std::ios::binary);
if (inp.is_open()) {
std::string line;
std::getline(inp, line);
if (line != "P6") {
std::cout << "Error. Unrecognized file format." << std::endl;
return;
}
std::getline(inp, line);
while (line[0] == '#') {
std::getline(inp, line);
}
std::stringstream dimensions(line);
try {
dimensions >> x;
dimensions >> y;
} catch (std::exception &e) {
std::cout << "Header file format error. " << e.what() << std::endl;
return;
}
std::getline(inp, line);
std::stringstream max_val(line);
try {
max_val >> max;
} catch (std::exception &e) {
std::cout << "Header file format error. " << e.what() << std::endl;
return;
}
uint size = x*y;
img = (ColorF*)malloc(sizeof(ColorF) * size);
char aux;
for (unsigned int i = 0; i < size; ++i) {
inp.read(&aux, 1);
img[i].r = (float)(unsigned char) aux;
inp.read(&aux, 1);
img[i].g = (float)(unsigned char) aux;
inp.read(&aux, 1);
img[i].b = (float)(unsigned char) aux;
}
} else {
std::cout << "Error. Unable to open " << path << std::endl;
}
inp.close();
}
Texture::~Texture() {
// free(img);
}
|
2,135
|
#include "includes.h"
__global__ void sync_ndconv_groups() { }
|
2,136
|
#include <stdio.h>
#include <iostream>
#include <string.h>
#include <sstream>
#include <fstream>
//#include <bits/stdc++.h>
//#include <stdlib.h>
//#include <time.h>
using namespace std;
/***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/
//#define N 1
#define C 384
#define H 15
#define W 15
#define R 3
#define S 3
#define M 384
#define E 13
#define F 13
#define U 1
__global__
void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch)
{
int row = threadIdx.y; int col = threadIdx.x;
{
if((row<height) && (col<width))//earlier it was num_wt*height & num_img*width
{
float prod = 0;
for (int i=0; i<wt_width; i++){
for (int j=0; j<wt_width; j++){
for(int k=0; k<num_ch; k++){
float ip = d_i[blockIdx.x*num_ch*ip_height*ip_height+k*ip_height*ip_height+(stride*(row)+i)*ip_height+(stride*(col)+j)];
float wt = d_w[(blockIdx.y)*num_ch*wt_width*wt_width+k*wt_width*wt_width+(i*wt_width+j)];
// float wt = s_w[k*wt_width*wt_width+(i*wt_width+j)];
prod += ip*wt;
}
}
}
if(prod>=0)
d_o[blockIdx.x*num_wt*blockDim.x*blockDim.y+(blockIdx.y)*blockDim.x*blockDim.y+row*blockDim.x+col] = prod;
}
}
}
void element_wise_mmul(float* output, float* input, float* weight, int batch_size)
{
int x,y,i,j,m,n,k;
for(n=0; n<batch_size; n++){
for (m=0 ; m<M; m++){
for (x=0; x<F; x++){
for(y=0; y<E; y++){
// OP[x][y] = 0; // adding bias to output
for (i=0; i<R; i++){
for (j=0; j<S; j++){
for(k=0; k<C; k++){
float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)];
float wt = weight[m*C*R*S+k*R*S+i*S+j];
float prod = ip*wt;
if (prod>=0)
output[n*E*F*M+m*E*F+x*E+y] += prod;
//OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j];
}}
}
}
}
}
}
}
int main(int argc, char* argv[])
{
int batch_size = atoi(argv[1]);
/*************INITALIZING MATRICES*********************************/
float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float));
//float IP[H][W];
float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float));
//float OP[F][E];
float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float));
float *WT = (float*) malloc(M*C*R*S*sizeof(float));
//float WT[R][S];
float* d_o;
float* d_i;
float* d_w;
//clock_t cpu_start, gpu_start, cpu_end, gpu_end;
//int a,b,c,d;
int c,d,m,n,k;
/*INITIALIZING WEIGHT MATRIX*/
for (m=0; m<M; m++){
for(k=0;k<C;k++){
for (c=0; c<R; c++){
for(d=0; d<S; d++){
//WT[c][d] = 2.0;
WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(RAND_MAX+1.0);
// WT[m*C*R*S+k*R*S+c*S+d] = 1;
}
}
}
}
/*INITIALIZING OUTPUT MATRIX*/
for (n=0; n<batch_size;n++){
for (m=0; m<M; m++){
for (c=0; c<F; c++){
for(d=0; d<E; d++){
//OP[c][d] = 0;
OP[n*M*F*E+m*F*E+c*E+d] = 0;
}
}
}
}
/*INITIALIZING INPUT MATRIX*/
for (n=0; n<batch_size; n++){
for(k=0;k<C;k++){
for (c=0; c<H; c++){
for(d=0; d<W; d++){
// IP[c][d] = (a+b+c+d);
if ((c==0) || (d==0) || (c==14) || (d==14))
IP[n*C*H*W+k*H*W+c*W+d] = 0;
else
IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(RAND_MAX+1.0);
}
}
}
}
cudaMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float));
cudaMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_w, M*C*R*S*sizeof(float));
cudaMemcpy(d_w, WT, M*C*R*S*sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_o, batch_size*M*E*F*sizeof(float));
//cpu_start = clock();
//element_wise_mmul(OP, IP, WT,batch_size);
//cpu_end = clock();
dim3 dimGrid(batch_size,384,1);
dim3 dimBlock(13,13,1);
//gpu_start = clock();
ew_gpu_mmul<<<dimGrid, dimBlock>>>(d_o,d_i,d_w,13,13,1,15,3,384,batch_size,384);
//gpu_end = clock();
cudaMemcpy(OPG,d_o, batch_size*M*E*F*sizeof(float), cudaMemcpyDeviceToHost);
float max_error = 0;
string filename = "layer_4_"+to_string(batch_size);
ifstream fin(filename.c_str());
string line ;
int g,h,s,u;
for (u=0;u<batch_size;u++){
for (s=0;s<M;s++){
for (g=0; g<F; g++){
for(h=0; h<E; h++){
getline(fin,line);
float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str()));
//float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]);
if(error > max_error)
max_error = error;
// printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
// printf("the output from GPU is %f for index %d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h);
}
}
}
}
fin.close();
printf("max error is %f\n", max_error);
//cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
//cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl;
cudaFree(d_o);
cudaFree(d_i);
cudaFree(d_w);
free(OPG);
free(IP);
free(WT);
free(OP);
return 0;
}
|
2,137
|
#include "device_launch_parameters.h"
#include <cuda_runtime.h>
#include<stdlib.h>
#include<iostream>
using namespace std;
#define SIZE 1024
//call from host func (like order to gpu)
__global__ void vectoradd(int* a, int* b, int* c,int n) {
int i = threadIdx.x;//to distinct thread
for (i = 0; i < n; i++)c[i] = a[i] + b[i];
//do sum of each thread
}
int main() {
int* a, * b, * c;//host variable
int* d_a, * d_b, * d_c;//device variable
//malloc to host memory
a = (int*)malloc(SIZE * sizeof(int));
b = (int*)malloc(SIZE * sizeof(int));
c = (int*)malloc(SIZE * sizeof(int));
//malloc to device memory
cudaMalloc(&d_a, SIZE * sizeof(int));
cudaMalloc(&d_b, SIZE * sizeof(int));
cudaMalloc(&d_c, SIZE * sizeof(int));
//initialize variables
for (int i = 0; i < SIZE; i++) {
a[i] = rand() % 1000;
b[i] = rand() % 1000;
}
//memcopy to device variables
cudaMemcpy(d_a, a, SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, SIZE * sizeof(int), cudaMemcpyHostToDevice);
//call vectoradd func using 1 block, 1024 threads
vectoradd <<< 1, SIZE >>> (d_a, d_b, d_c, SIZE);
//memcopy to host, save device to host
cudaMemcpy(a, d_a, SIZE * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(b, d_b, SIZE * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(c, d_c, SIZE * sizeof(int), cudaMemcpyDeviceToHost);
//print result of c variable
for (int i = 0; i < SIZE; i++)
cout << "c[" << i << "]=" << c[i] << "\n";
//memory free to host
free(a);
free(b);
free(c);
//memory free to device
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
2,138
|
__global__
void nekbone(double *w, double *u, double *g, double *d, const int N) {
const int e_size = N * N * N;
const int e_offset = e_size * blockIdx.x;
__shared__ double ur[1024];
__shared__ double us[1024];
__shared__ double ut[1024];
__shared__ double ul[1024];
__shared__ double d_s[128];
for (int it = threadIdx.x; it < e_size; it += blockDim.x) {
ul[it] = u[e_offset + it];
}
if (threadIdx.x < 128) {
d_s[threadIdx.x] = d[threadIdx.x];
}
__syncthreads();
int i, j, k;
for (int it = threadIdx.x; it < e_size; it += blockDim.x) {
j = it / N;
i = it - j * N;
k = j / N;
j -= k * N;
double wr = 0.0;
double ws = 0.0;
double wt = 0.0;
for (int n = 0; n < N; ++n) {
wr += d_s[n * N + i] * ul[N * (j + k * N) + n];
ws += d_s[n * N + j] * ul[N * (n + k * N) + i];
wt += d_s[n * N + k] * ul[N * (j + n * N) + i];
}
int g_offset = 6 * (e_offset + it);
ur[it] = g[g_offset + 0] * wr + g[g_offset + 1] * ws + g[g_offset + 2] * wt;
us[it] = g[g_offset + 1] * wr + g[g_offset + 3] * ws + g[g_offset + 4] * wt;
ut[it] = g[g_offset + 2] * wr + g[g_offset + 4] * ws + g[g_offset + 5] * wt;
}
__syncthreads();
for (int it = threadIdx.x; it < e_size; it += blockDim.x) {
j = it / N;
i = it - j * N;
k = j / N;
j -= k * N;
double s = 0.0;
for (int n = 0; n < N; ++n) {
s += d_s[i * N + n] * ur[N * (j + N * k) + n] +
d_s[j * N + n] * us[N * (n + N * k) + i] +
d_s[k * N + n] * ut[N * (j + N * n) + i];
}
w[e_offset + it] = s;
}
}
|
2,139
|
#include "includes.h"
__global__ void add(int *output, int length, int *n) {
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int blockOffset = blockID * length;
output[blockOffset + threadID] += n[blockID];
}
|
2,140
|
#include <stdio.h>
#include <iostream>
#include <fstream>
#define BUFLEN 500*1024*1024
using namespace std;
__constant__ int thread_counts;
__device__ unsigned int rol(const unsigned int value,
const unsigned int steps)
{
return ((value << steps) | (value >> (32 - steps)));
}
__device__ void clearWBuffert(unsigned int* buffert)
{
for (int pos = 16; --pos >= 0;)
{
buffert[pos] = 0;
}
}
__device__ void innerHash(unsigned int* result, unsigned int* w)
{
unsigned int a = result[0];
unsigned int b = result[1];
unsigned int c = result[2];
unsigned int d = result[3];
unsigned int e = result[4];
int round = 0;
#define sha1macro(func,val) \
{ \
const unsigned int t = rol(a, 5) + (func) + e + val + w[round]; \
e = d; \
d = c; \
c = rol(b, 30); \
b = a; \
a = t; \
}
while (round < 16)
{
sha1macro((b & c) | (~b & d), 0x5a827999)
++round;
}
while (round < 20)
{
w[round] = rol((w[round - 3] ^ w[round - 8] ^ w[round - 14] ^ w[round - 16]), 1);
sha1macro((b & c) | (~b & d), 0x5a827999)
++round;
}
while (round < 40)
{
w[round] = rol((w[round - 3] ^ w[round - 8] ^ w[round - 14] ^ w[round - 16]), 1);
sha1macro(b ^ c ^ d, 0x6ed9eba1)
++round;
}
while (round < 60)
{
w[round] = rol((w[round - 3] ^ w[round - 8] ^ w[round - 14] ^ w[round - 16]), 1);
sha1macro((b & c) | (b & d) | (c & d), 0x8f1bbcdc)
++round;
}
while (round < 80)
{
w[round] = rol((w[round - 3] ^ w[round - 8] ^ w[round - 14] ^ w[round - 16]), 1);
sha1macro(b ^ c ^ d, 0xca62c1d6)
++round;
}
#undef sha1macro
result[0] += a;
result[1] += b;
result[2] += c;
result[3] += d;
result[4] += e;
}
__device__ void calc(const void* src, const int bytelength, unsigned char* hash)
{
// Init the result array.
unsigned int result[5] = { 0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, 0xc3d2e1f0 };
// Cast the void src pointer to be the byte array we can work with.
const unsigned char* sarray = (const unsigned char*) src;
// The reusable round buffer
unsigned int w[80];
// Loop through all complete 64byte blocks.
const int endOfFullBlocks = bytelength - 64;
int endCurrentBlock;
int currentBlock = 0;
while (currentBlock <= endOfFullBlocks)
{
endCurrentBlock = currentBlock + 64;
// Init the round buffer with the 64 byte block data.
for (int roundPos = 0; currentBlock < endCurrentBlock; currentBlock += 4)
{
// This line will swap endian on big endian and keep endian on little endian.
w[roundPos++] = (unsigned int) sarray[currentBlock + 3]
| (((unsigned int) sarray[currentBlock + 2]) << 8)
| (((unsigned int) sarray[currentBlock + 1]) << 16)
| (((unsigned int) sarray[currentBlock]) << 24);
}
innerHash(result, w);
}
// Handle the last and not full 64 byte block if existing.
endCurrentBlock = bytelength - currentBlock;
clearWBuffert(w);
int lastBlockBytes = 0;
for (;lastBlockBytes < endCurrentBlock; ++lastBlockBytes)
{
w[lastBlockBytes >> 2] |= (unsigned int) sarray[lastBlockBytes + currentBlock] << ((3 - (lastBlockBytes & 3)) << 3);
}
w[lastBlockBytes >> 2] |= 0x80 << ((3 - (lastBlockBytes & 3)) << 3);
if (endCurrentBlock >= 56)
{
innerHash(result, w);
clearWBuffert(w);
}
w[15] = bytelength << 3;
innerHash(result, w);
// Store hash in result pointer, and make sure we get in in the correct order on both endian models.
for (int hashByte = 20; --hashByte >= 0;)
{
hash[hashByte] = (result[hashByte >> 2] >> (((3 - hashByte) & 0x3) << 3)) & 0xff;
}
}
__global__
void calculateHash(unsigned char *gdata, unsigned char *hash)
{
int i=blockIdx.x * blockDim.x + threadIdx.x,j=0;
if(i<=thread_counts){
calc(&gdata[i*1024*1024],1024*1024,&hash[i*20]);
//printf("%d---- %c\n",i,gdata[i*1024*1024]);
}
}
int readFile(char* filename, unsigned char * &buffer,int &charRead,int offset=0){
ifstream fileObject;
fileObject.open(filename, ios::in|ios::binary);
if(fileObject.is_open()){
fileObject.seekg (0, fileObject.end);
int length = fileObject.tellg();
fileObject.seekg (0, fileObject.beg);
int l;
if( offset >= length )
return 1;
if(length-offset >= BUFLEN){
l =BUFLEN;
buffer = new unsigned char[l];
}
else{
l= length-offset;
buffer = new unsigned char[l];
buffer[0]='S';
buffer[l-1] = 'B';
}
fileObject.seekg(offset);
fileObject.read((char*)buffer,l);
charRead = fileObject.gcount();
fileObject.close();
return 0;
}
return -1;
}
int main()
{
unsigned char *data1;
int charRead,offset=0,retStatus;
while(1){
retStatus = readFile("/home/ajay/test2.txt",data1,charRead,offset);
offset+= charRead;
if (retStatus == -1 || retStatus == 1)
break;
unsigned char *gdata;
unsigned char *hash;
const int gdsize = charRead*sizeof(char);
cudaMalloc((void**)&gdata,gdsize);
cudaError_t cuda_result_code = cudaGetLastError();
if (cuda_result_code!=cudaSuccess) {
printf("gdata message: %s\n",cudaGetErrorString(cuda_result_code));
}
int threads = charRead/(1024*1024);
//cout<<threads<<endl;
int blocks = 1 + threads/32;
//cout<<blocks<<endl;
const int hash_size = 20 * blocks * 32 * sizeof(unsigned char);
cudaMalloc((void**)&hash, hash_size);
cuda_result_code = cudaGetLastError();
if (cuda_result_code!=cudaSuccess) {
printf("hash message: %s\n",cudaGetErrorString(cuda_result_code));
}
cudaMemcpy( gdata, data1, gdsize, cudaMemcpyHostToDevice );
if (cuda_result_code!=cudaSuccess) {
printf(" memcpy message: %s\n",cudaGetErrorString(cuda_result_code));
}
cudaMemcpyToSymbol(thread_counts, &threads, sizeof(int));
calculateHash<<<blocks, 32>>>(gdata,hash);
cudaDeviceSynchronize();
if (cuda_result_code!=cudaSuccess) {
printf(" hashcalucation message: %s\n",cudaGetErrorString(cuda_result_code));
}
unsigned char * hash_host = new unsigned char[hash_size];
cudaMemcpy( hash_host, hash, hash_size, cudaMemcpyDeviceToHost);
cudaFree( gdata );
cudaFree( hash );
for(int i=0;i<=threads;i++)
{
for(int j=0;j<20;j++)
printf("%02x",hash_host[i*20+j]);
cout<<endl;
}
delete[] data1;
delete[] hash_host;
if(retStatus==1)
break;
}
printf("Completed");
return 1;
}
|
2,141
|
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
#include <arpa/inet.h>
#include <math.h>
#include "cs_cuda.h"
#include "cs_dbg.h"
#include "cs_helper.h"
#include "cs_edge_detect.h"
#include "cs_copy_box.h"
// #define CUDA_DBG
// #define CUDA_DBG1
__global__ void d_do_edge_detection ( int *fdp, int *tdp, int tbl_size,
int cx, int cy, int ex, int ey, int xy_size, int exy_size )
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
int *ofdp, mea, *fp, x, y, block, sum, i, j ;
double d ;
ofdp = fdp ;
while ( t_idx < tbl_size )
{
fdp = ofdp ;
i = t_idx % xy_size ;
y = i / cx ;
x = i % cx ;
if (( y >= ey ) && ( x >= ex ) && (( cy - y ) > ey ) && (( cx -x ) > ex ))
{
mea = fdp[ t_idx ] ;
block = t_idx / xy_size ;
fdp += block * xy_size ;
sum = 0 ;
for ( j = -ey ; j <= ey ; j++ )
{
fp = fdp + ( y + j ) * cx + ( x - ex ) ;
for ( i = -ex ; i <= ex ; i++ )
{
sum += *fp++ ;
}
}
sum -= mea ;
// exy_size take out the one in the center already ...
d = ((( double ) sum ) / (( double ) exy_size )) ;
// round up
tdp [ t_idx ] = (( int )( d + 0.5 )) - mea ;
} else
tdp [ t_idx ] = 0 ;
t_idx += CUDA_MAX_THREADS ;
}
}
// edge_x/y are from the center of the edge box on each side
// fromp will have the final data ... since we do the copy box
int
h_do_edge_detection ( int *fromp, int *top, int tbl_size, int cube_x,
int cube_y, int edge_x, int edge_y )
{
int nThreadsPerBlock = 512;
int nBlocks ; // = ( tbl_size + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
int cube_xy = cube_y * cube_x ;
int edge_xy = ( edge_x * 2 + 1 ) * ( edge_y * 2 + 1 ) - 1 ;
#ifdef CUDA_DBG1
fprintf(stderr, "%s: f %p t %p tblsize %d cube %d %d edge %d %d\n",
__func__, fromp, top, tbl_size, cube_x, cube_y, edge_x, edge_y ) ;
#endif
if ( tbl_size % cube_xy )
{
fprintf(stderr, "%s: error size %d cube %d \n", __func__,
tbl_size, cube_xy ) ;
return ( 0 ) ;
}
if ((( cube_x - ( edge_x * 2 + 1 )) < 0 ) ||
(( cube_y - ( edge_y * 2 + 1 )) < 0 ))
{
fprintf(stderr, "%s: error cube %d %d edge %d %d\n",
__func__, cube_x, cube_y, edge_x, edge_y ) ;
return ( 0 ) ;
}
h_block_adj ( tbl_size, nThreadsPerBlock, &nBlocks ) ;
d_do_edge_detection <<< nBlocks, nThreadsPerBlock >>> (
fromp, top, tbl_size, cube_x, cube_y, edge_x, edge_y,
cube_xy, edge_xy ) ;
cudaThreadSynchronize() ;
#ifdef CUDA_DBG
dbg_p_d_data_i("edge_detect", top, tbl_size ) ;
#endif
if ( !h_do_copy_box ( top, fromp, tbl_size, cube_x,
cube_y, edge_x, edge_y ))
{
return ( 0 ) ;
}
return ( 1 ) ;
}
|
2,142
|
#include "cufft.h"
#include "cuda_runtime_api.h"
typedef float2 Complex;
void
PerformCUDAFFT(float *inputData, float *outputData, unsigned int numSamples) {
cufftHandle plan;
cufftComplex *inputDataG, *outputDataG;
int i;
float *inputDataC, *outputDataC;
outputDataC = (float*) malloc(sizeof(float) * numSamples * 2);
inputDataC = (float*) malloc(sizeof(float) * numSamples * 2);
for (i = 0; i < numSamples; i++) {
inputDataC[i*2] = inputData[i];
inputDataC[i*2 + 1] = 0.0f;
}
cudaMalloc((void**)&inputDataG, sizeof(cufftComplex)*numSamples);
cudaMalloc((void**)&outputDataG, sizeof(cufftComplex)*numSamples);
cudaMemcpy(inputDataG, inputDataC, sizeof(cufftComplex)*numSamples, cudaMemcpyHostToDevice);
cufftPlan1d(&plan, numSamples, CUFFT_C2C, 1);
cufftExecC2C(plan, inputDataG, outputDataG, CUFFT_FORWARD);
cufftDestroy(plan);
cudaMemcpy(outputDataC, outputDataG, sizeof(cufftComplex)*numSamples, cudaMemcpyDeviceToHost);
cudaFree(inputDataG);
cudaFree(outputDataG);
for (i = 0; i < numSamples; i++) {
outputData[i] = outputDataC[i * 2];
}
free(outputDataC);
free(inputDataC);
}
|
2,143
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
using namespace std;
extern float *Ex, *dev_Ex, coe_Ex, dt, dz;
extern int size_space, size_Ex;
const float epsilon = 8.85e-12;
void Ex_init_allocate(int size_Ex)
{
Ex = (float *)malloc(size_Ex* sizeof(float));
cudaMalloc(&dev_Ex, size_Ex* sizeof(float));
}
void Ex_init_assignValue(int size_Ex)
{
for (int i = 0; i < size_Ex; i++){
Ex[i] = 0.f;
}
coe_Ex = dt / (epsilon * dz);
}
void Ex_transfer_host_device(int size_Ex)
{
cudaMemcpy(dev_Ex, Ex, size_Ex * sizeof(float), cudaMemcpyHostToDevice);
}
void Ex_transfer_device_host(int size_Ex)
{
cudaMemcpy(Ex, dev_Ex, size_Ex * sizeof(float), cudaMemcpyDeviceToHost);
}
__global__ void Ex_cmp_kernel(float* dev_Hy, float * dev_Ex, float coe_Ex, int step_space)
{
for (int i = 1; i < step_space; i++){
dev_Ex[i] = dev_Ex[i] - coe_Ex * (dev_Hy[i] - dev_Hy[i - 1]);
//test
//dev_Ex[i] = i / 10.0;
}
}
void Ex_checkout(int size)
{
cout << "Ex: size = " << size << endl;
cout << "coe_Ex = " << coe_Ex;
cout << "Ex: ";
for (int i = 0; i < size; i++)
{
cout << Ex[i] << "\t";
}
cout << endl;
}
void Ex_init(int size_space)
{
size_Ex = size_space + 1;
Ex_init_allocate(size_Ex);
Ex_init_assignValue(size_Ex);
}
__global__ void Ex_boundary_PEC_kernel(float* dev_Ex, int size_space)
{
dev_Ex[size_space] = 0.f;
}
|
2,144
|
/* Cuda GPU Based Program that use GPU processor for finding cosine of numbers */
/* --------------------------- header secton ----------------------------*/
#include<stdio.h>
#include<cuda.h>
#define COS_THREAD_CNT 2
#define N 10
/* --------------------------- target code ------------------------------*/
struct cosParams {
float *arg;
float *res;
int n;
//int *threadIdx;
};
__global__ void cos_main(struct cosParams parms)
{
int i;
for (i = threadIdx.x; i < parms.n; i += COS_THREAD_CNT)
{
parms.res[i] = __cosf(parms.arg[i] );
//parms.threadIdx[0] = (int)threadIdx.x;
}
}
/* --------------------------- host code ------------------------------*/
int main (int argc, char *argv[])
{
int i = 0;
cudaError_t cudaStat;
//int* cosThreadIdx = 0;
float* cosRes = 0;
float* cosArg = 0;
//int* threadIdx = (int *) malloc(N*sizeof(threadIdx));
float* arg = (float *) malloc(N*sizeof(arg[0]));
float* res = (float *) malloc(N*sizeof(res[0]));
struct cosParams funcParams;
/* ... fill arguments array "arg" .... */
for(i=0; i < N; i++ ){
arg[i] = (float)i;
}
cudaStat = cudaMalloc ((void **)&cosArg, N * sizeof(cosArg[0]));
cudaStat = cudaMalloc ((void **)&cosRes, N * sizeof(cosRes[0]));
//cudaStat = cudaMalloc ((void **)&threadIdx, N * sizeof(threadIdx[0]));
cudaStat = cudaMemcpy ( cosArg,
arg,
N * sizeof(arg[0]),
cudaMemcpyHostToDevice);
funcParams.res = cosRes;
funcParams.arg = cosArg;
funcParams.n = N;
//funcParams.threadIdx = cosThreadIdx;
cos_main<<<1,COS_THREAD_CNT>>>(funcParams);
cudaStat = cudaMemcpy( res,
cosRes,
N * sizeof(cosRes[0]),
cudaMemcpyDeviceToHost);
//cudaStat = cudaMemcpy( threadIdx,
// cosThreadIdx,
// N * sizeof(cosThreadIdx[0]),
// cudaMemcpyDeviceToHost);
for(i=0; i < N; i++ )
{
printf("%d: cosf(%f) = %f\n", arg[i], res[i]); //, threadIdx[i]);
}
}
/* nvcc cosine.cu -use_fast_math */
|
2,145
|
/* /\* */
/* * testconv_extras.cu */
/* * */
/* * Created on: Nov 10, 2009 */
/* * Author: Alex Krizhevsky (akrizhevsky@gmail.com) */
/* *\/ */
/* #include <assert.h> */
/* #include "testconv_extras.cuh" */
/* #include "convCPU.h" */
/* void test_conv_bw_fit_dyn_2per(int boardNum) { */
/* cudaSetDevice(boardNum > -1 ? boardNum : cutGetMaxGflopsDeviceId()); */
/* cublasInit(); */
/* NVMatrix::initDeviceProps(); */
/* NVMatrix::initRandom(7); */
/* uint timer = 0; */
/* cutilCheckError( cutCreateTimer( &timer)); */
/* int imgSize = 32, filterSize = 9; */
/* int numFilters = 64, numCases = 128; */
/* int filterPixels = filterSize * filterSize; */
/* int imgPixels = imgSize * imgSize; */
/* int numOutputsX = imgSize - filterSize + 1; */
/* int numOutputs = numOutputsX * numOutputsX; */
/* assert(numFilters % 16 == 0); */
/* printf("Images: %d, filters: %d\n", numCases, numFilters); */
/* printf("Image size: %dx%d, filter size: %dx%d\n", imgSize, imgSize, filterSize, filterSize); */
/* printf("Color: no\n"); */
/* Matrix filters(numFilters, filterPixels); */
/* Matrix images(numCases, imgPixels); */
/* Matrix targets(numCases, numFilters * numOutputs); */
/* filters.randomizeUniform(); */
/* images.randomizeUniform(); */
/* targets.apply(Matrix::ZERO); */
/* images.addScalar(1); */
/* NVMatrix nvFilters(filters, true); */
/* NVMatrix nvImages(images, true); */
/* NVMatrix nvTargets(targets, true); // eh why not */
/* cutilCheckError( cutResetTimer( timer)); */
/* cutilCheckError( cutStartTimer( timer)); */
/* //:IS:convCPU(images.getData(), filters.getData(), targets.getData(), imgSize, filterSize, numCases, numFilters); */
/* cutilCheckError( cutStopTimer( timer)); */
/* printf("CPU (partial) result:\n"); */
/* targets.print(0, 3, 0, 6); */
/* printf("CPU time: %.6f msec\n", cutGetTimerValue(timer)); */
/* filters.print(3,3); */
/* dim3 threads(8,8,8); */
/* dim3 blocks(numCases, numFilters / 16); */
/* cutilCheckError( cutResetTimer( timer)); */
/* cutilCheckError( cutStartTimer( timer)); */
/* int shmem = 4*((filterSize + threads.x - 1)*(filterSize + threads.y - 1) + 2*threads.z * filterSize * filterSize); */
/* conv_bw_fit_dyn_2per<9, 1><<<blocks, threads, shmem>>>(nvImages.getDevData(), nvFilters.getDevData(), nvTargets.getDevData(), imgSize); */
/* cudaThreadSynchronize(); */
/* cutilCheckError( cutStopTimer( timer)); */
/* printf("GPU (partial) result:\n"); */
/* nvTargets.print(0, 3, 0, 6); */
/* printf("GPU time: %.6f msec\n", cutGetTimerValue(timer)); */
/* // Compare results */
/* Matrix cpuNVTargets(targets); */
/* nvTargets.copyToHost(cpuNVTargets); */
/* cpuNVTargets.subtract(targets); */
/* cpuNVTargets.apply(Matrix::ABS); */
/* printf("Max diff between CPU/GPU: %.6f\n", cpuNVTargets.max()); */
/* } */
/* void test_conv_bw_nofit_dyn_1per(int imgSize, int filterSize, int threadsY, int threadsX, int boardNum) { */
/* cudaSetDevice(boardNum > -1 ? boardNum : cutGetMaxGflopsDeviceId()); */
/* cublasInit(); */
/* NVMatrix::initDeviceProps(); */
/* NVMatrix::initRandom(7); */
/* uint timer = 0; */
/* cutilCheckError( cutCreateTimer( &timer)); */
/* // int imgSize = 32, filterSize = 9; */
/* int numFilters = 64, numCases = 128; */
/* int filterPixels = filterSize * filterSize; */
/* int imgPixels = imgSize * imgSize; */
/* int numOutputsX = imgSize - filterSize + 1; */
/* int numOutputs = numOutputsX * numOutputsX; */
/* assert(numFilters % 16 == 0); */
/* assert(numOutputsX % threadsX == 0); */
/* printf("Images: %d, filters: %d\n", numCases, numFilters); */
/* printf("Image size: %dx%d, filter size: %dx%d\n", imgSize, imgSize, filterSize, filterSize); */
/* printf("Color: no\n"); */
/* Matrix filters(numFilters, filterPixels); */
/* Matrix images(numCases, imgPixels); */
/* Matrix targets(numCases, numFilters * numOutputs); */
/* filters.randomizeUniform(); */
/* images.randomizeUniform(); */
/* targets.apply(Matrix::ZERO); */
/* NVMatrix nvFilters(filters, true); */
/* NVMatrix nvImages(images, true); */
/* NVMatrix nvTargets(targets, true); // eh why not */
/* cutilCheckError( cutResetTimer( timer)); */
/* cutilCheckError( cutStartTimer( timer)); */
/* //:IS:convCPU(images.getData(), filters.getData(), targets.getData(), imgSize, filterSize, numCases, numFilters); */
/* cutilCheckError( cutStopTimer( timer)); */
/* printf("CPU (partial) result:\n"); */
/* targets.print(0, 3, 0, 6); */
/* printf("CPU time: %.6f msec\n", cutGetTimerValue(timer)); */
/* filters.print(3,3); */
/* int threadsZ = int(512.0 / (threadsX * threadsY)); */
/* int blocksY = int(ceil(float(numFilters) / (threadsZ))); */
/* bool checkFilterBounds = numOutputsX % filterSize != 0; */
/* assert(threadsZ > 0); */
/* dim3 threads(threadsX,threadsY,threadsZ); */
/* dim3 blocks(numCases, blocksY); */
/* cutilCheckError( cutResetTimer( timer)); */
/* cutilCheckError( cutStartTimer( timer)); */
/* int shmem = 4*((2*threads.x - 1)*(2*threads.y - 1) + threads.z * threads.x * threads.y); */
/* printf("Running %dx%d grid with %dx%dx%d blocks and %d bytes of shmem.\n", blocks.x, blocks.y, threads.x, threads.y, threads.z, shmem); */
/* if(checkFilterBounds) { */
/* conv_bw_nofit_dyn_1per<true,9,9, 1><<<blocks, threads, shmem>>>(nvImages.getDevData(), nvFilters.getDevData(), nvTargets.getDevData(), */
/* imgSize, filterSize, numFilters); */
/* } else { */
/* conv_bw_nofit_dyn_1per<false,9,9, 1><<<blocks, threads, shmem>>>(nvImages.getDevData(), nvFilters.getDevData(), nvTargets.getDevData(), */
/* imgSize, filterSize, numFilters); */
/* } */
/* cudaThreadSynchronize(); */
/* cutilCheckError( cutStopTimer( timer)); */
/* printf("GPU (partial) result:\n"); */
/* nvTargets.print(0, 3, 0, 6); */
/* printf("GPU time: %.6f msec\n", cutGetTimerValue(timer)); */
/* // Compare results */
/* Matrix cpuNVTargets(targets); */
/* nvTargets.copyToHost(cpuNVTargets); */
/* cpuNVTargets.subtract(targets); */
/* cpuNVTargets.apply(Matrix::ABS); */
/* printf("Max diff between CPU/GPU: %.6f\n", cpuNVTargets.max()); */
/* } */
/* void test_conv_bw_nofit_dyn_2per(int imgSize, int filterSize, int threadsY, int threadsX, int boardNum) { */
/* cudaSetDevice(boardNum > -1 ? boardNum : cutGetMaxGflopsDeviceId()); */
/* cublasInit(); */
/* NVMatrix::initDeviceProps(); */
/* NVMatrix::initRandom(7); */
/* uint timer = 0; */
/* cutilCheckError( cutCreateTimer( &timer)); */
/* // int imgSize = 32, filterSize = 9; */
/* int numFilters = 64, numCases = 128; */
/* int filterPixels = filterSize * filterSize; */
/* int imgPixels = imgSize * imgSize; */
/* int numOutputsX = imgSize - filterSize + 1; */
/* int numOutputs = numOutputsX * numOutputsX; */
/* assert(numFilters % 16 == 0); */
/* assert(numOutputsX % threadsX == 0); */
/* printf("Images: %d, filters: %d\n", numCases, numFilters); */
/* printf("Image size: %dx%d, filter size: %dx%d\n", imgSize, imgSize, filterSize, filterSize); */
/* printf("Color: no\n"); */
/* Matrix filters(numFilters, filterPixels); */
/* Matrix images(numCases, imgPixels); */
/* Matrix targets(numCases, numFilters * numOutputs); */
/* filters.randomizeUniform(); */
/* images.randomizeUniform(); */
/* targets.apply(Matrix::ZERO); */
/* NVMatrix nvFilters(filters, true); */
/* NVMatrix nvImages(images, true); */
/* NVMatrix nvTargets(targets, true); // eh why not */
/* cutilCheckError( cutResetTimer( timer)); */
/* cutilCheckError( cutStartTimer( timer)); */
/* //:IS:convCPU(images.getData(), filters.getData(), targets.getData(), imgSize, filterSize, numCases, numFilters); */
/* cutilCheckError( cutStopTimer( timer)); */
/* printf("CPU (partial) result:\n"); */
/* targets.print(0, 3, 0, 6); */
/* printf("CPU time: %.6f msec\n", cutGetTimerValue(timer)); */
/* filters.print(3,3); */
/* int threadsZ = int(512.0 / (threadsX * threadsY)); */
/* int blocksY = int(ceil(float(numFilters) / (2*threadsZ))); */
/* assert((numFilters % (threadsZ*2)) % 2 == 0); */
/* bool checkFilterBounds = numOutputsX % filterSize != 0; */
/* assert(threadsZ > 0); */
/* dim3 threads(threadsX,threadsY,threadsZ); */
/* dim3 blocks(numCases, blocksY); */
/* cutilCheckError( cutResetTimer( timer)); */
/* cutilCheckError( cutStartTimer( timer)); */
/* int shmem = 4*((2*threads.x - 1)*(2*threads.y - 1) + 2*threads.z * threads.x * threads.y); */
/* printf("Running %dx%d grid with %dx%dx%d blocks and %d bytes of shmem.\n", blocks.x, blocks.y, threads.x, threads.y, threads.z, shmem); */
/* assert(threadsX == 9 && threadsY == 9); */
/* if(checkFilterBounds) { */
/* conv_bw_nofit_dyn_2per<true,9,9, 1><<<blocks, threads, shmem>>>(nvImages.getDevData(), nvFilters.getDevData(), nvTargets.getDevData(), */
/* imgSize, filterSize, numFilters); */
/* } else { */
/* conv_bw_nofit_dyn_2per<false,9,9, 1><<<blocks, threads, shmem>>>(nvImages.getDevData(), nvFilters.getDevData(), nvTargets.getDevData(), */
/* imgSize, filterSize, numFilters); */
/* } */
/* cudaThreadSynchronize(); */
/* cutilCheckError( cutStopTimer( timer)); */
/* printf("GPU (partial) result:\n"); */
/* nvTargets.print(0, 3, 0, 6); */
/* printf("GPU time: %.6f msec\n", cutGetTimerValue(timer)); */
/* // Compare results */
/* Matrix cpuNVTargets(targets); */
/* nvTargets.copyToHost(cpuNVTargets); */
/* cpuNVTargets.subtract(targets); */
/* cpuNVTargets.apply(Matrix::ABS); */
/* printf("Max diff between CPU/GPU: %.6f\n", cpuNVTargets.max()); */
/* } */
/* void test_conv_bw_nofit_4x16_dynfilter_2per(int imgSize, int filterSize, int filterCacheY, int filterCacheX, int boardNum) { */
/* cudaSetDevice(boardNum > -1 ? boardNum : cutGetMaxGflopsDeviceId()); */
/* cublasInit(); */
/* NVMatrix::initDeviceProps(); */
/* NVMatrix::initRandom(7); */
/* uint timer = 0; */
/* cutilCheckError( cutCreateTimer( &timer)); */
/* // int imgSize = 32, filterSize = 9; */
/* int numFilters = 64, numCases = 128; */
/* int filterPixels = filterSize * filterSize; */
/* int imgPixels = imgSize * imgSize; */
/* int numOutputsX = imgSize - filterSize + 1; */
/* int numOutputs = numOutputsX * numOutputsX; */
/* assert(numFilters % 16 == 0); */
/* assert(filterSize % filterCacheX == 0); */
/* assert(filterSize % filterCacheY == 0); */
/* printf("Images: %d, filters: %d\n", numCases, numFilters); */
/* printf("Image size: %dx%d, filter size: %dx%d\n", imgSize, imgSize, filterSize, filterSize); */
/* printf("Color: no\n"); */
/* Matrix filters(numFilters, filterPixels); */
/* Matrix images(numCases, imgPixels); */
/* Matrix targets(numCases, numFilters * numOutputs); */
/* filters.randomizeUniform(); */
/* images.randomizeUniform(); */
/* targets.apply(Matrix::ZERO); */
/* NVMatrix nvFilters(filters, true); */
/* NVMatrix nvImages(images, true); */
/* NVMatrix nvTargets(targets, true); // eh why not */
/* cutilCheckError( cutResetTimer( timer)); */
/* cutilCheckError( cutStartTimer( timer)); */
/* //:IS:convCPU(images.getData(), filters.getData(), targets.getData(), imgSize, filterSize, numCases, numFilters); */
/* cutilCheckError( cutStopTimer( timer)); */
/* printf("CPU (partial) result:\n"); */
/* targets.print(0, 3, numOutputsX-10, 6); */
/* printf("CPU time: %.6f msec\n", cutGetTimerValue(timer)); */
/* bool checkOutputBounds = numOutputsX % 16 != 0; */
/* int blocksY = numFilters / 16; */
/* dim3 threads(16,4,8); */
/* dim3 blocks(numCases, blocksY); */
/* assert(filterCacheY == 8); */
/* assert(filterCacheX == 16); */
/* int shmem = (16 + filterCacheX - 1)*(4 + filterCacheY - 1) + 16*filterCacheX*filterCacheY; */
/* assert(shmem < 4096); */
/* printf("Filter cache size: %dx%d\n", filterCacheX, filterCacheY); */
/* printf("Using %d bytes of shared memory\n", shmem*4); */
/* cutilCheckError( cutResetTimer( timer)); */
/* cutilCheckError( cutStartTimer( timer)); */
/* if(checkOutputBounds) { */
/* conv_bw_nofit_4x16_dynfilter_2per<true,8,16, 1><<<blocks, threads>>>(nvImages.getDevData(), nvFilters.getDevData(), nvTargets.getDevData(), */
/* imgSize, filterSize); */
/* } else { */
/* conv_bw_nofit_4x16_dynfilter_2per<false,8,16, 1><<<blocks, threads>>>(nvImages.getDevData(), nvFilters.getDevData(), nvTargets.getDevData(), */
/* imgSize, filterSize); */
/* } */
/* cudaThreadSynchronize(); */
/* cutilCheckError( cutStopTimer( timer)); */
/* printf("GPU (partial) result:\n"); */
/* nvTargets.print(0, 3, numOutputsX-10, 6); */
/* printf("GPU time: %.6f msec\n", cutGetTimerValue(timer)); */
/* // Compare results */
/* Matrix cpuNVTargets(targets); */
/* nvTargets.copyToHost(cpuNVTargets); */
/* cpuNVTargets.subtract(targets); */
/* cpuNVTargets.apply(Matrix::ABS); */
/* printf("Max diff between CPU/GPU: %.6f\n", cpuNVTargets.max()); */
/* } */
|
2,146
|
#include <iostream>
#include <sstream>
#include <stdio.h>
#include <cuda.h>
/**
* \brief writes CUDA array to a text file
* \details copies the cuda array to a local buffer, writes to buffer to a new file, then frees the local memory
* @param[in] array_in - device pointer to array to write
* @param[in] N - number of elements to write
* @param[in] filename - name for the file
*/
void write_to_file(unsigned* array_in , unsigned N , std::string filename){
FILE* f = fopen(filename.c_str(),"w");
unsigned * hostdata = new unsigned [N];
cudaMemcpy(hostdata,array_in,N*sizeof(unsigned),cudaMemcpyDeviceToHost);
for(unsigned k = 0; k<N ;k++){
fprintf(f,"%u\n",hostdata[k]);
}
delete hostdata;
fclose(f);
}
|
2,147
|
#include <iostream>
#include <cstdlib>
#include <iomanip>
#include <cstring>
#include <cuda_runtime.h>
#include <cstdio>
#define CUDA_WARN(XXX) \
do { if (XXX != cudaSuccess) std::cerr << "CUDA Error: " << \
cudaGetErrorString(XXX) << ", at line " << __LINE__ \
<< std::endl; cudaDeviceSynchronize(); } while (0)
int main() {
double *h1 = new double[5];
double *h2 = new double[5];
double *d;
h1[0] = 7;
h1[1] = 4;
h1[2] = 1;
h1[3] = 8;
h1[4] = 9;
std::cout << "1) Mallocing Space and copying from host to device" << std::endl;
CUDA_WARN(cudaMalloc(&d, 5*sizeof(double)));
CUDA_WARN(cudaMemcpy(d, h1, 5*sizeof(double), cudaMemcpyHostToDevice));
std::cout << "\n2) Copying back from device to host" << std::endl;
CUDA_WARN(cudaMemcpy(h2, d, 5*sizeof(double), cudaMemcpyDeviceToHost));
std::cout << "\n3) Copied data (back on host): " << std::endl;
for(int i = 0; i < 5; i++) {
std::cout << h2[i] << std::endl;
}
return 0;
}
|
2,148
|
#include <cuda.h>
#include <cufft.h>
#include <cuda_profiler_api.h>
#include <stdio.h>
template<typename T>
__device__ __forceinline__ T ldg(const T* ptr) {
#if __CUDA_ARCH__ >= 350
return __ldg(ptr);
#else
return *ptr;
#endif
}
extern "C"
__global__
void Hadamard(
int nx
, int ny
, int nz
, cufftComplex * input_fk // input
, cufftComplex * kernel_fk // kernel
, cufftComplex * output_fk // output
)
{
int kz = blockIdx.x*blockDim.x + threadIdx.x;
int ky = blockIdx.y*blockDim.y + threadIdx.y;
if (kz < nz && ky < ny)
{
int k = nx*ny*kz + nx*ky;
for (int i = 0; i < nx; i++)
{
output_fk[k+i].x = ldg(&input_fk[k+i].x)*ldg(&kernel_fk[k+i].x) - ldg(&input_fk[k+i].y)*ldg(&kernel_fk[k+i].y);
output_fk[k+i].y = ldg(&input_fk[k+i].x)*ldg(&kernel_fk[k+i].y) + ldg(&input_fk[k+i].y)*ldg(&kernel_fk[k+i].x);
}
}
}
extern "C"
__global__
void Hadamard_slice_kernel(
int nx
, int ny
, int nz
, cufftComplex * input_fk // input
, cufftComplex * kernel_fk // kernel
, cufftComplex * output_fk // output
)
{
int kz = blockIdx.x*blockDim.x + threadIdx.x;
int ky = blockIdx.y*blockDim.y + threadIdx.y;
if (kz < nz && ky < ny)
{
int k = nx*ny*kz + nx*ky;
int k_slice = nx*ky;
for (int i = 0; i < nx; i++)
{
output_fk[k + i].x = ldg(&input_fk[k + i].x)*ldg(&kernel_fk[k_slice + i].x) - ldg(&input_fk[k + i].y)*ldg(&kernel_fk[k_slice + i].y);
output_fk[k + i].y = ldg(&input_fk[k + i].x)*ldg(&kernel_fk[k_slice + i].y) + ldg(&input_fk[k + i].y)*ldg(&kernel_fk[k_slice + i].x);
}
}
}
|
2,149
|
#include <stdio.h>
#define N 1
#define TPB 256
__global__ void mainKernel()
{
printf("Hello world! My threadId is %d\n", threadIdx.x);
}
int main()
{
// Launch kernel to compute and store distance values
mainKernel<<<N, TPB>>>();
cudaDeviceSynchronize();
return 0;
}
|
2,150
|
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<time.h>
//************variables globales***************
int msk=3, dimx=1040, dimy=1388, tam_imag=1388*1040;
//*******************kernel********************
__global__ void kernel (int *Gext_d,float *var_d){
int i, dimy_ext, id_p, M_d[9], dimy=1388,tam_imag=1388*1040,msk=3;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int idy = threadIdx.y + blockIdx.y*blockDim.y;
int offset=idx + idy*blockDim.x*gridDim.x;
int id=offset;
float X=0.f,Xprom=0.f,Y=0.f;
var_d[id]=0;
dimy_ext=dimy+2;
if(offset<tam_imag){
offset+=2*idy;
id_p=offset+(dimy+msk);
M_d[0]=Gext_d[offset];
M_d[1]=Gext_d[offset+1];
M_d[2]=Gext_d[offset+2];
M_d[3]=Gext_d[id_p-1];
M_d[4]=Gext_d[id_p];
M_d[5]=Gext_d[id_p+1];
M_d[6]=Gext_d[(id_p-1)+dimy_ext];
M_d[7]=Gext_d[id_p+dimy_ext];
M_d[8]=Gext_d[(id_p+1)+dimy_ext];
for(i=0;i<msk*msk;i++)
X+=M_d[i];
Xprom=((float)X)/(msk*msk);
for(i=0;i<msk*msk;i++)
Y+=(Xprom-M_d[i])*(Xprom-M_d[i]);
var_d[id]=Y/(msk*msk);
}
}
//*****************Funcion Main**********************
int main(int argc,char* argv[]){
//***************Declaracion de variables**************
int i,j,d,m,cont,tam_ext,init,fin;
int *Gext_d, *Gext_h;
//, **G;
float t, *var_d,*var_h;
FILE *archV, *matrizG;
init=atoi(argv[1]);
fin=atoi(argv[2]);
clock_t tinicio, t_GPU;
tinicio=clock();
tam_ext=(dimx+2)*(dimy+2);
//*************For que recorre todas las imagenes ************
for(d=init;d<=fin;d++){
printf("d=%d \n", d);
Gext_h=(int *)malloc(sizeof(int)*tam_ext);
cudaMalloc((void**)&Gext_d, tam_ext*sizeof(int));
var_h=(float *)malloc(sizeof(float)*tam_imag);
cudaMalloc((void**)&var_d,tam_imag*sizeof(float));
//*****************Lectura Matriz G****************
char rutaG[]="MiTesis/";
sprintf(rutaG, "%s%d%s","RGB/",d,"/G");
matrizG=fopen(rutaG,"r+");
cont=0;
for(i=0;i<dimx+2;i++){
for(j=0;j<dimy+2;j++){
if (i==0 || j==0 || i==dimx+1 || j==dimy+1){
Gext_h[cont]=0;
} else{
fscanf(matrizG, "%d", &Gext_h[cont]);
}
cont++;
}
}
fclose(matrizG);
//******************Llamado de kernel*******************
dim3 Grid(347,20);
dim3 Block(13,16);
cudaMemcpy(Gext_d,Gext_h,sizeof(int)*tam_ext,cudaMemcpyHostToDevice);
kernel<<<Grid,Block>>>(Gext_d,var_d);
cudaMemcpy(var_h,var_d,sizeof(float)*tam_imag,cudaMemcpyDeviceToHost);
//****************Almacenamiento matriz de Varianza**************
char rutaV[]="VARIANZAS/";
sprintf(rutaV, "%s%d", rutaV,d);
archV=fopen(rutaV,"w+");
for(m=0;m<tam_imag;m++){
if(m%dimy==0 && m!=0){
fprintf(archV,"\n");
}
fprintf(archV,"%f ",var_h[m]);
}
fclose(archV);
free(Gext_h);
free(var_h);
//free(G);
cudaFree(var_d);
cudaFree(Gext_d);
}
t_GPU=clock();
t = ((float)t_GPU-(float)tinicio)/CLOCKS_PER_SEC;
printf("\ntiempo de procesamiento de varianzas: %6.3fs\n",t);
return 0;
}//FIN funcion main()
|
2,151
|
// 提前退出与DR发生在同一个一级对象中,并且它们的父辈对象有循环
__global__ void test(float *A){
int i = threadIdx.x;
int x = 5;
while(x > 0){
A[i] = A[i+1];
x--;
if(i + x <= 3)
break; // or return
}
}
|
2,152
|
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <string.h>
#define MAXBLOCKSIZE 512
int Size;
float *a, *b, *finalVec;
float *m;
FILE *fp;
void InitProblemOnce(char *filename);
void InitPerRun();
void ForwardSub();
void BackSub();
/*
Calculation of Multiplier matrix to introduce zero's in each of the columns.
*/
__global__ void multiplier(float *m_cuda, float *a_cuda, int Size, int t)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if( idx>= Size-1-t) return;
//printf(" multiplier: \nIndex %d a1: %f a2: %f \n",idx,*(a_cuda+Size*(idx+t+1)+t),*(a_cuda+Size*t+t));
*(m_cuda+Size*(idx+t+1)+t) = *(a_cuda+Size*(idx+t+1)+t) / *(a_cuda+Size*t+t);
}
//Conversion of matrix to Upper Triangular Matrix
__global__ void upper(float *m_cuda, float *a_cuda, float *b_cuda,int Size, int j1, int t)
{
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
int yidx = blockIdx.y * blockDim.y + threadIdx.y;
if(xidx >= Size-1-t) return;
if(yidx >= Size-t) return;
a_cuda[Size*(xidx+1+t)+(yidx+t)] -= m_cuda[Size*(xidx+1+t)+t] * a_cuda[Size*t+(yidx+t)];
if(yidx == 0){
b_cuda[xidx+1+t] -= m_cuda[Size*(xidx+1+t)+(yidx+t)] * b_cuda[t];
}
}
void InitMat(float *ary, int nrow, int ncol);
void InitAry(float *ary, int ary_size);
void PrintMat(float *ary, int nrow, int ncolumn);
void PrintAry(float *ary, int ary_size);
void checkCUDAError(const char *msg);
unsigned int totalKernelTime = 0;
int main(int argc, char *argv[])
{
if(argc<2){
printf("Enter filename \n");
return 1;
}
InitProblemOnce(argv[1]);//Input file
InitPerRun(); //Initialize m with 0
//begin timing
struct timeval time_start;
gettimeofday(&time_start, NULL);
// run kernels
ForwardSub();
//end timing
struct timeval time_end;
gettimeofday(&time_end, NULL);
unsigned int time_total = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
printf("Matrix m is: \n");
PrintMat(m, Size, Size);
printf("Matrix a is: \n");
PrintMat(a, Size, Size);
printf("Array b is: \n");
PrintAry(b, Size);
BackSub();
printf("The final solution is: \n");
PrintAry(finalVec,Size);
printf("\nTime total (including memory transfers)\t%f sec\n", time_total * 1e-6);
printf("Time for CUDA kernels:\t%f sec\n",totalKernelTime * 1e-6);
free(m);
free(a);
free(b);
}
/*Initializing all matrices and arrays
*/
void InitProblemOnce(char *filename)
{
fp = fopen(filename, "r");
fscanf(fp, "%d", &Size);
a = (float *) malloc(Size * Size * sizeof(float));
InitMat(a, Size, Size);
b = (float *) malloc(Size * sizeof(float));
InitAry(b, Size);
m = (float *) malloc(Size * Size * sizeof(float));
}
/*------------------------------------------------------
** InitPerRun() -- Initialize the contents of the
** multipier matrix **m
**------------------------------------------------------
*/
void InitPerRun()
{
int i;
for (i=0; i<Size*Size; i++)
*(m+i) = 0.0;
}
/*------------------------------------------------------
** ForwardSub() -- Forward substitution of Gaussian
** elimination.
**------------------------------------------------------
*/
void ForwardSub()
{
int t;
float *m_cuda,*a_cuda,*b_cuda;
// allocate memory on GPU
cudaMalloc((void **) &m_cuda, Size * Size * sizeof(float));
cudaMalloc((void **) &a_cuda, Size * Size * sizeof(float));
cudaMalloc((void **) &b_cuda, Size * sizeof(float));
// copy memory to GPU
cudaMemcpy(m_cuda, m, Size * Size * sizeof(float),cudaMemcpyHostToDevice );
cudaMemcpy(a_cuda, a, Size * Size * sizeof(float),cudaMemcpyHostToDevice );
cudaMemcpy(b_cuda, b, Size * sizeof(float),cudaMemcpyHostToDevice );
int block_size,grid_size;
block_size = MAXBLOCKSIZE;
grid_size = (Size/block_size) + (!(Size%block_size)? 0:1);
dim3 dimBlock(block_size);
dim3 dimGrid(grid_size);
int blockSize2d, gridSize2d;
blockSize2d = 4;
gridSize2d = (Size/blockSize2d) + (!(Size%blockSize2d?0:1));
dim3 dimBlockXY(blockSize2d,blockSize2d);
dim3 dimGridXY(gridSize2d,gridSize2d);
// begin timing kernels
struct timeval time_start;
gettimeofday(&time_start, NULL);
for (t=0; t<(Size-1); t++) {
multiplier<<<dimGrid,dimBlock>>>(m_cuda,a_cuda,Size,t);
cudaThreadSynchronize();
upper<<<dimGridXY,dimBlockXY>>>(m_cuda,a_cuda,b_cuda,Size,Size-t,t);
cudaThreadSynchronize();
checkCUDAError("Upper");
}
// end timing kernels
struct timeval time_end;
gettimeofday(&time_end, NULL);
totalKernelTime = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
// copy memory back to CPU
cudaMemcpy(m, m_cuda, Size * Size * sizeof(float),cudaMemcpyDeviceToHost );
cudaMemcpy(a, a_cuda, Size * Size * sizeof(float),cudaMemcpyDeviceToHost );
cudaMemcpy(b, b_cuda, Size * sizeof(float),cudaMemcpyDeviceToHost );
cudaFree(m_cuda);
cudaFree(a_cuda);
cudaFree(b_cuda);
}
/*------------------------------------------------------
** BackSub() -- Backward substitution
**------------------------------------------------------
*/
void BackSub()
{
// create a new vector to hold the final answer
finalVec = (float *) malloc(Size * sizeof(float));
// solve "bottom up"
int i,j;
for(i=0;i<Size;i++){
finalVec[Size-i-1]=b[Size-i-1];
for(j=0;j<i;j++)
{
finalVec[Size-i-1]-=*(a+Size*(Size-i-1)+(Size-j-1)) * finalVec[Size-j-1];
}
finalVec[Size-i-1]=finalVec[Size-i-1]/ *(a+Size*(Size-i-1)+(Size-i-1));
}
}
void InitMat(float *ary, int nrow, int ncol)
{
int i, j;
printf("Initial Matrix A \n");
for (i=0; i<nrow; i++) {
for (j=0; j<ncol; j++) {
fscanf(fp, "%f", ary+Size*i+j);
printf("%f ",*(ary+Size*i+j));
}
printf("\n");
}
}
/*------------------------------------------------------
** PrintMat() -- Print the contents of the matrix
**------------------------------------------------------
*/
void PrintMat(float *ary, int nrow, int ncol)
{
int i, j;
for (i=0; i<nrow; i++) {
for (j=0; j<ncol; j++) {
printf("%f ", *(ary+Size*i+j));
}
printf("\n");
}
printf("\n");
}
/*------------------------------------------------------
** InitAry() -- Initialize the array (vector) by reading
** data from the data file
**------------------------------------------------------
*/
void InitAry(float *ary, int ary_size)
{
int i;
printf("\n Vector B is \n");
for (i=0; i<ary_size; i++) {
fscanf(fp, "%f", &ary[i]);
printf("%f ",ary[i]);
}
}
/*------------------------------------------------------
** PrintAry() -- Print the contents of the array (vector)
**------------------------------------------------------
*/
void PrintAry(float *ary, int ary_size)
{
int i;
for (i=0; i<ary_size; i++) {
printf("%.2f ", ary[i]);
}
printf("\n\n");
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
2,153
|
#include <stdio.h>
void helloCPU()
{
printf("Hello from the CPU.\n");
}
/*
* The addition of `__global__` signifies that this function
* should be launced on the GPU.
*/
__global__ void helloGPU()
{
printf("Hello from the GPU.\n");
}
int main()
{
helloCPU();
/*
* Add an execution configuration with the <<<...>>> syntax
* will launch this function as a kernel on the GPU.
*/
helloGPU<<<1, 1>>>();
/*
* `cudaDeviceSynchronize` will block the CPU stream until
* all GPU kernels have completed.
*/
cudaDeviceSynchronize();
}
|
2,154
|
/*
* misc.cpp
* GSPAN
*
* Created by Jinseung KIM on 09. 07. 19.
* Copyright 2009 KyungHee. All rights reserved.
*
*/
#include "gspan.cuh"
using namespace std;
const RMPath& DFSCode::buildRMPath() //buildRMPath là một phương thức của DFSCODE
{
rmpath.clear();
int old_from = -1;
for(int i = size() -1;i>=0;--i) //Duyệt qua tất cả các cạnh của DFS_CODE theo thứ tự từ cạnh cuối đến cạnh đầu (Duyệt ngược)
{
if ((*this)[i].from < (*this)[i].to && (rmpath.empty() || old_from == (*this)[i].to)) //nếu from < to và rmpath rỗng hoặc
{
rmpath.push_back(i);
old_from = (*this)[i].from;
}
}
return rmpath;
}
void History::build(Graph& g,PDFS* e)
{
clear(); //cái này là đối tượng history kế thừa từ vector<Edge*>, nên lệnh clear này là dọn dẹp vector<Edge*> của history.
edge.clear();
edge.resize(g.edge_size());
vertex.clear();
vertex.resize(g.size());
if(e){ //e ở đây chính là tham số thứ 2 A_1, e là một con trỏ lưu trử địa chỉ của A_1
push_back(e->edge); //Đưa embedding đang xét vào vector<Edge*> của g
edge[e->edge->id] = vertex[e->edge->from] = vertex[e->edge->to] = 1; //đồng thời đánh dấu các cạnh và đỉnh của g liên quan đến Embedding đó
for(PDFS* p = e->prev; p; p=p->prev){
push_back(p->edge);
edge[p->edge->id] = vertex[p->edge->from] = vertex[p->edge->to] = 1; //đánh dấu các cạnh trước đó đã kết nối với embedding đang xét.
}
std::reverse(begin(),end());
}
}
//Tìm các mở rộng cho các đỉnh còn lại trên Right Most Path, ngoại trừ đỉnh cuối cùng của DFS_CODE
bool get_forward_rmpath (Graph &graph, Edge *e, int minlabel, History& history, EdgeList &result) //Phát triển right most path, từ mỗi đỉnh của rightmostpath sẽ thêm vào các cạnh nào? Danh dách các cạnh sẽ được lưu vào result
{
result.clear ();
int tolabel = graph[e->to].label;
for (Vertex::edge_iterator it = graph[e->from].edge.begin() ; //Duyệt qua các cạnh kề với đỉnh 0 trong đồ thị có id=0
it != graph[e->from].edge.end() ; ++it)
{
int tolabel2 = graph[it->to].label;
if (e->to == it->to || minlabel > tolabel2 || history.hasVertex (it->to))//Cạnh không hợp lệ khi: có id của đỉnh to bằng với id của cạnh thuộc rmpath chính là e->to.
continue; //hoặc là những cạnh có nhãn đỉnh to nhỏ hơn minlabel hoặc là những cạnh đã thuộc DFS_CODE rồi.
if (e->elabel < it->elabel || (e->elabel == it->elabel && tolabel <= tolabel2)) //nhãn cạnh của cạnh đang xét nhỏ hơn nhãn cạnh mở rộng, hoặc nhãn cạnh đang xét phải bằng với nhãn cạnh mở rộng và nhãn đỉnh của đỉnh đang xét nhỏ hơn nhãn đỉnh mở rộng
result.push_back (&(*it));
}
return (! result.empty());
}
bool get_forward_pure (Graph &graph, Edge *e, int minlabel, History& history, EdgeList &result)
{
result.clear (); //Tìm các forward edge và lưu kết quả trong result.
for (Vertex::edge_iterator it = graph[e->to].edge.begin() ; //bắt đầu mở rộng cạnh từ đỉnh to, lưu ý đỉnh này phải thuộc right most path
it != graph[e->to].edge.end() ; ++it) //ở đây là duyệt qua tất cả các cạnh kề với đỉnh to trên right most path
{
if (minlabel > graph[it->to].label || history.hasVertex (it->to)) //Nếu đỉnh của cạnh muốn thêm đã thuộc DFS_CODE hoặc nhãn đỉnh mới < minlabel của DFS_CODE thì bỏ qua cạnh đó
continue; //bỏ qua cạnh đó.
result.push_back (&(*it));
}
return (! result.empty());
}
bool get_forward_root (Graph &g, Vertex &v, EdgeList &result)
{
result.clear (); //xóa tất cả các phần tử trong result, tức là làm rỗng result.
for (Vertex::edge_iterator it = v.edge.begin(); it != v.edge.end(); ++it) { //duyệt qua tất cả các cạnh của đỉnh v đang xét
if (v.label <= g[it->to].label) //nếu như nhãn của đỉnh v nhỏ hơn nhãn đỉnh 'to' kề với nó thì lưu cạnh đó vào EdgeList &result.
result.push_back (&(*it)); //it là một edge_iterator, về mặt kỹ thuật nó là một con trỏ, nó trỏ đến tất cả các cạnh kết nối với đỉnh v.
}
return (! result.empty()); //nếu result không rỗng tức là tồn tại forward_edge
}
Edge *get_backward (Graph &graph, Edge* e1, Edge* e2, History& history)
{
if (e1 == e2)
return 0;
for (Vertex::edge_iterator it = graph[e2->to].edge.begin() ;
it != graph[e2->to].edge.end() ; ++it)
{
if (history.hasEdge (it->id))
continue;
if ( (it->to == e1->from) &&
( (e1->elabel < it->elabel) ||
(e1->elabel == it->elabel) &&
(graph[e1->to].label <= graph[e2->to].label)
) )
{
return &(*it);
}
}
return 0;
}
|
2,155
|
__global__ void test(float *A, const int N){
int i = threadIdx.x;
float x = 0;
if (i < 8){
if (i < N){
x = A[i+1];
}
if (i % 2 == 1){
if (i < 4){
A[i] = x;
}
}
}
}
|
2,156
|
//
// Threshold-based Contrasting GPU implementation
//
#include <unistd.h>
#include <stdlib.h>
#define THREADS_PER_BLOCK 128
// CUDA kernel declaration
__global__ void cuda_flow_bitmap_kernel(unsigned char *u8data, unsigned char *u8res, unsigned int N, unsigned int thresh);
// C/C++ Wrapper
unsigned char *gpu_flow_recogu8_bitmap(unsigned char *u8data, unsigned char *u8res, unsigned int N, unsigned int thresh)
{
// Host memory
unsigned char *host_u8res;
// Device memory
unsigned char *dev_u8data;
unsigned char *dev_u8res;
size_t size = N * sizeof(unsigned char);
host_u8res = (unsigned char *)malloc(size);
// Allocated device memory
cudaMalloc((void **)&dev_u8data, size);
cudaMalloc((void **)&dev_u8res, size);
// Upload data to device memory
cudaMemcpy(dev_u8data, u8data, size, cudaMemcpyHostToDevice);
cuda_flow_bitmap_kernel<<<((N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>(dev_u8data, dev_u8res, N, thresh);
cudaMemcpy(host_u8res, dev_u8res, size, cudaMemcpyDeviceToHost);
cudaFree(dev_u8data);
cudaFree(dev_u8res);
return host_u8res;
}
// u8res contains a binary map of optical flow regions with motion intensity over the threshold
__global__ void cuda_flow_bitmap_kernel(unsigned char *u8data, unsigned char *u8res, unsigned int N, unsigned int thresh)
{
int idx = threadIdx.x + (blockDim.x *blockIdx.x);
if (idx < N) {
if (u8data[idx] >= thresh) {
u8res[idx] = 1;
}
else {
u8res[idx] = 0;
}
}
}
|
2,157
|
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cuda.h>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <ctype.h>
#define MAX_SAVES 1000
#define QTD_NUMBERS 3
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__
void GenSeq(uint32_t *divs, uint8_t*arr, uint32_t arrSize, uint32_t* index, uint32_t MAXgTid){
uint32_t ThrPerBlk = blockDim.x;
uint32_t MYbid = blockIdx.x;
uint32_t MYtid = threadIdx.x;
uint32_t MYgtid = ThrPerBlk * MYbid + MYtid;
uint8_t i;
// uint32_t qtdNumbers = sizeof(divs) - 1;
//uint8_t tempArr[qtd_numbers];
uint32_t offset = MYgtid * QTD_NUMBERS;
// printf("ThrPerBlk: %u, Mybid: %u, Mytid: %d, Mygtid: %u, offset: %u\n", ThrPerBlk, MYbid, MYtid, ThrPerBlk * MYbid + MYtid, offset);
if(offset >= MAXgTid)
return;
uint8_t tempArr[QTD_NUMBERS];
for(i = 0; i < QTD_NUMBERS; i++){
tempArr[i] = (MYgtid % divs[i]) / divs[i + 1];
}
uint32_t currIndex = atomicAdd(index, QTD_NUMBERS);
if(currIndex > MAX_SAVES)
return;
printf("ThrPerBlk: %u, Mybid: %u, Mytid: %d, Mygtid: %u, offset: %u, currIndex: %u\n", ThrPerBlk, MYbid, MYtid, MYgtid, offset, currIndex);
for(i = 0; i < QTD_NUMBERS; i++){
arr[currIndex] = tempArr[i];
currIndex++;
}
}
int main(){
cudaError_t cudaStatus;
cudaEvent_t time1, time2, time3;
float totalTime, kernelExecutionTime, tfrGPUtoCPU;
uint32_t ThrPerBlk = 32, NumBlocks = 0;
int NumGPUs = 0; cudaGetDeviceCount(&NumGPUs);
if (NumGPUs == 0){
std::cout << "\nNo CUDA Device is available\n";
exit(EXIT_FAILURE);
}
uint32_t i = 0;
uint32_t possibilities = 1;
uint8_t numbers[] = {3,3,3};
// uint8_t qtd_numbers = sizeof(numbers);
for(i = 0; i < QTD_NUMBERS; i++){
possibilities *= numbers[i];
}
uint32_t divs[QTD_NUMBERS + 1];
divs[0] = possibilities;
for(i = 0; i < QTD_NUMBERS; i++){
divs[i + 1] = divs[i] / numbers[i];
}
for(i = 0; i < QTD_NUMBERS + 1; i++){ //DEBUG
std::cout << divs[i] << ", ";
}
std::cout << std::endl;
// uint32_t arrSize = possibilities * QTD_NUMBERS * sizeof(uint8_t);
uint32_t MAXgTid = possibilities * QTD_NUMBERS * sizeof(uint8_t);
uint32_t arrSize = ((MAX_SAVES + QTD_NUMBERS -1) / QTD_NUMBERS) * QTD_NUMBERS;
std::cout << "arrSize: " << arrSize << std::endl; //DEBUG
uint8_t* sequencies = (uint8_t*)malloc(arrSize);
uint32_t* GPUcont = nullptr;
uint32_t* GPUdivs = nullptr;
uint8_t* GPUsequencies = nullptr;
NumBlocks = (possibilities + ThrPerBlk - 1) / ThrPerBlk;
std::cout << "NumBlocks: " << NumBlocks << ", ThrPerBlk: " << ThrPerBlk << std::endl; // debug
// getchar();
cudaEventCreate(&time1);
cudaEventCreate(&time2);
cudaEventCreate(&time3);
cudaEventRecord(time1, 0);
gpuErrchk(cudaMalloc((void **)&GPUcont, sizeof(uint32_t)));
gpuErrchk(cudaMemset(GPUcont, 0, sizeof(uint32_t)));
gpuErrchk(cudaMalloc((void **)&GPUsequencies, arrSize));
gpuErrchk(cudaMalloc((void **)&GPUdivs, (QTD_NUMBERS + 1) * sizeof(uint32_t)));
gpuErrchk(cudaMemcpy(GPUdivs, divs, (QTD_NUMBERS + 1) * sizeof(uint32_t), cudaMemcpyHostToDevice));
GenSeq <<< NumBlocks, ThrPerBlk >>> (GPUdivs, GPUsequencies, arrSize, GPUcont, MAXgTid);
gpuErrchk(cudaDeviceSynchronize());
cudaEventRecord(time2, 0);
gpuErrchk(cudaMemcpy(sequencies, GPUsequencies, arrSize, cudaMemcpyDeviceToHost));
cudaEventRecord(time3, 0);
cudaEventSynchronize(time1); cudaEventSynchronize(time2);
cudaEventSynchronize(time3);
cudaEventElapsedTime(&totalTime, time1, time3);
cudaEventElapsedTime(&kernelExecutionTime, time1, time2);
cudaEventElapsedTime(&tfrGPUtoCPU, time2, time3);
gpuErrchk(cudaDeviceSynchronize());
for(i = 0; i < arrSize; i++){
if(i % QTD_NUMBERS == 0)
std::cout << std::endl;
else
std::cout << ", ";
std::cout << int(sequencies[i]);
}
printf("\n\n-------------------- ... ----------------------------\n");
printf("Kernel Execution = %5.2f ms\n", kernelExecutionTime);
printf("GPU->CPU Transfer = %5.2f ms ... %4d MB ... %6.2f GB/s\n", tfrGPUtoCPU, arrSize / 1024 / 1024,
(float)arrSize / (tfrGPUtoCPU * 1024.0 * 1024.0));
printf("Total time elapsed = %5.2f ms\n", totalTime);
printf("-------------------- ... ----------------------------\n");
cudaFree(GPUdivs);
cudaFree(GPUsequencies);
cudaEventDestroy(time1);
cudaEventDestroy(time2);
cudaEventDestroy(time3);
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess){
std::cout << "cudaDeviceReset failed!";
free(sequencies); exit(EXIT_FAILURE);
}
free(sequencies);
return(EXIT_SUCCESS);
}
|
2,158
|
#include <stdio.h>
#include <iostream>
#include <ctime>
#include <unistd.h>
#include <sys/time.h>
using namespace std;
#define N 1000000
#define BLOCK_SIZE 16
//#define TIME_CHECK clock()/float(CLOCKS_PER_SEC)
typedef unsigned long long timestamp;
//get time in microseconds
timestamp get_timestamp()
{
struct timeval now;
gettimeofday(&now, NULL);
return now.tv_usec + now.tv_sec*1000000;
}
#define TIME_CHECK get_timestamp()
float hArray[N];
float *dArray;
int blocks;
void prologue(void) {
memset(hArray, 0, sizeof(hArray));
for(int i = 0; i < N; i++) {
hArray[i] = i + 1;
}
cudaMalloc((void**)&dArray, sizeof(hArray));
cudaMemcpy(dArray, hArray, sizeof(hArray), cudaMemcpyHostToDevice);
}
void epilogue(void) {
cudaMemcpy(hArray, dArray, sizeof(hArray), cudaMemcpyDeviceToHost);
cudaFree(dArray);
}
// Kernel
__global__ void pow3(float *A) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
if(x < N)
A[x] = A[x] * A[x] * A[x];
}
int main(int argc, char** argv)
{
int devCnt;
cudaGetDeviceCount(&devCnt);
if(devCnt == 0) {
perror("No CUDA devices available -- exiting.");
return 1;
}
timestamp gpu_start_time = 0;
timestamp gpu_post_prologue_time = 0;
timestamp gpu_post_computing_time = 0;
timestamp gpu_end_time = 0;
timestamp cpu_init_time = 0;
timestamp cpu_start_time = 0;
timestamp cpu_end_time = 0;
gpu_start_time = TIME_CHECK;
prologue();
blocks = N / BLOCK_SIZE;
if(N % BLOCK_SIZE)
blocks++;
gpu_post_prologue_time = TIME_CHECK;
pow3<<<blocks, BLOCK_SIZE>>>(dArray);
cudaThreadSynchronize();
gpu_post_computing_time = TIME_CHECK;
epilogue();
gpu_end_time = TIME_CHECK;
//cpu
cpu_init_time = TIME_CHECK;
for(int i = 0; i < N; i++) {
hArray[i] = i + 1;
}
cpu_start_time = TIME_CHECK;
for(long long i=0;i<N;i++)
hArray[i] = hArray[i] * hArray[i] * hArray[i];
cpu_end_time = TIME_CHECK;
// cout << gpu_start_time << endl;
// cout << gpu_post_prologue_time << endl;
// cout << gpu_post_computing_time << endl;
// cout << gpu_end_time << endl;
//
// cout << cpu_start_time << endl;
// cout << cpu_end_time << endl;
cout << "prologue\t" << gpu_post_prologue_time - gpu_start_time << endl;
cout << "counting\t" << gpu_post_computing_time - gpu_post_prologue_time << endl;
cout << "epilogue\t" << gpu_end_time - gpu_post_computing_time << endl;
cout << "cpu init\t" << cpu_start_time - cpu_init_time << endl;
cout << "cpu\t" << cpu_end_time - cpu_start_time << endl;
return 0;
}
|
2,159
|
#define BLOCK_SIZE 512
#include <stdio.h>
#include <cuda.h>
#include <math.h>
__constant__ float sigma;
__constant__ float rcut;
__constant__ float vrcut;
__constant__ float dvrc12;
__constant__ float dvrcut;
__constant__ int mx;
__constant__ int my;
__constant__ int mz;
__constant__ int natoms;
__constant__ int step;
__constant__ float sfx;
__constant__ float sfy;
__constant__ float sfz;
void copySigma(float *sig)
{
cudaMemcpyToSymbol("sigma", sig, sizeof(float));
}
void copyRcut(float *rcu)
{
cudaMemcpyToSymbol("rcut", rcu, sizeof(float));
}
void copyVrcut(float *vrc)
{
cudaMemcpyToSymbol("vrcut", vrc, sizeof(float));
}
void copyDvrc12(float *dvr)
{
cudaMemcpyToSymbol("dvrc12", dvr, sizeof(float));
}
void copyDvrcut(float *dvrc)
{
cudaMemcpyToSymbol("dvrcut", dvrc, sizeof(float));
}
void copyMx(int * m)
{
cudaMemcpyToSymbol("mx", m, sizeof(int));
}
void copyMy(int *mm)
{
cudaMemcpyToSymbol("my", mm, sizeof(int));
}
void copyMz(int *mmm)
{
cudaMemcpyToSymbol("mz", mmm, sizeof(int));
}
void copyNatoms(int *nat)
{
cudaMemcpyToSymbol("natoms", nat, sizeof(int));
}
void copyStep(int *ste)
{
cudaMemcpyToSymbol("step", ste, sizeof(int));
}
void copySfx(float *sf)
{
cudaMemcpyToSymbol("sfx", sf, sizeof(float));
}
void copySfy(float *sff)
{
cudaMemcpyToSymbol("sfy", sff, sizeof(float));
}
void copySfz(float *sfff)
{
cudaMemcpyToSymbol("sfz", sfff, sizeof(float));
}
__global__
void force (float *virialArray, float *potentialArray, float *rx, float *ry, float *rz, float *fx, float *fy, float *fz, float sigma, float rcut, float vrcut, float dvrc12, float dvrcut, int *head, int *list, int mx, int my, int mz, int natoms, float sfx, float sfy, float sfz)
{
int element = blockIdx.x * blockDim.x + threadIdx.x;
/**
if (element == (natoms - 1))
{
printf("\n------------------------------------START------------------------------\n");
printf("SIGMA: %f\n", sigma);
}
*/
float sigsq, rcutsq;
float rxi, ryi, rzi, fxi, fyi, fzi;
float rxij, ryij, rzij, rijsq;
float rij, sr2, sr6, vij, wij, fij, fxij, fyij, fzij;
int j, jcell;
float potential, virial;
int xi, yi, zi, ix, jx, kx, xcell, ycell, zcell;
__shared__ float vArray[BLOCK_SIZE];
__shared__ float pArray[BLOCK_SIZE];
sigsq = __fmul_rn(sigma, sigma);
rcutsq = __fmul_rn(rcut,rcut);
potential = (float)0.0;
virial = (float)0.0;
vArray[threadIdx.x] = (float)0.0;
pArray[threadIdx.x] = (float)0.0;
if (element < natoms)
{
rxi = rx[element];
ryi = ry[element];
rzi = rz[element];
/**
if (element == (natoms - 1))
{
printf("%d: rxi=%f\n",element, rxi);
printf("%d: ryi=%f\n",element, ryi);
printf("%d: rzi=%f\n",element, rzi);
}
*/
fxi = (float)0.0;
fyi = (float)0.0;
fzi = (float)0.0;
//(int)((rxi+0.5)/sfx) + 1;
//
xi = (int)( (rxi + (float)0.5) / sfx);
xi += 1;
yi = (int)( (ryi + (float)0.5) / sfy);
yi += 1;
zi = (int)( (rzi + (float)0.5) / sfz);
zi += 1;
/**
if (element == (natoms - 1))
{
printf("%d: xi=%d\n", element, xi);
printf("%d: yi=%d\n", element, yi);
printf("%d: zi=%d\n", element, zi);
}
*/
if(xi > mx)
{
xi = mx;
}
if(yi > my)
{
yi = my;
}
if(zi > mz)
{
zi = mz;
}
/**
if (element == (natoms - 1))
{
printf("%d: xi=%d\n", element, xi);
printf("%d: yi=%d\n", element, yi);
printf("%d: zi=%d\n", element, zi);
}
*/
//CHANGED THIS
//xi + (mx+2)*(yi+zi*(my+2))
//icell = __float2int_rn(__fadd_rn(xi, __fmul_rn(__fadd_rn(mx,2), __fadd_rn(yi, __fmul_rn(zi, __fadd_rn(my,2))))));
//TO THIS
//icell = xi;
/**
if (element == (natoms - 1))
{
printf("%d: icell=%d\n",element, icell);
}
*/
//icell += (mx+2)*(yi+zi*(my+2));
/**
if (element == (natoms - 1))
{
printf("%d: icell=%d\n",element, icell);
}
*/
// if (xi<0 || xi> mx) printf("\nxi = %d\n",xi);
// if (yi<0 || yi> my) printf("\nyi = %d\n",yi);
// if (zi<0 || zi> mz) printf("\nzi = %d\n",zi);
// if(icell<0||icell>(mx+2)*(my+2)*(mz+2)-1) printf("\nicell = %d\n",icell);
// if(step==92&&i==4680){ printf("Particle %5d, (xi,yi,zi) = %d,%d,%d, icell = %5d\n",i,xi,yi,zi,icell);
// printf("rx = %f, ry = %f, rz = %f\n",rxi,ryi,rzi);
// fflush(stdout);
// }
for (ix=-1;ix<=1;ix++)
for (jx=-1;jx<=1;jx++)
for (kx=-1;kx<=1;kx++){
xcell = ix+xi;
ycell = jx+yi;
zcell = kx+zi;
/**
if (element == (natoms - 1))
{
printf("%d: xcell=%d\n",element, xcell);
printf("%d: ycell=%d\n",element, ycell);
printf("%d: zcell=%d\n",element, zcell);
}
*/
//CHANGED THIS
//jcell = xcell + (mx+2)*(ycell+(my+2)*zcell);
//jcell = __float2int_rn(__fadd_rn(xcell, __fmul_rn(__fadd_rn(mx,2),__fadd_rn(ycell,__fmul_rn(__fadd_rn(my,2),zcell)))));
jcell = xcell + (mx+2)*(ycell+(my+2)*zcell);
//TO THIS
//jcell = xcell;
/**
if (element == (natoms - 1))
{
printf("%d: jcell=%d\n",element, jcell);
}
// jcell += (mx+2)*(ycell+(my+2)*zcell);
if (element == (natoms - 1))
{
printf("%d: jcell=%d\n",element, jcell);
}
*/
// printf("%d (%d,%d,%d); ",jcell,xcell,ycell,zcell);
j = head[jcell];
/**
if (element == (natoms - 1))
{
printf("%d: j=%d\n",element, j);
}
*/
// if(jcell<0||jcell>(mx+2)*(my+2)*(mz+2)-1) printf("\njcell = %d\n",jcell);
while (j>=0)
{
// if(j<0 || j>ntot-1) printf("\nj = %d\n",j);
if (j!=element)
{
rxij = __fadd_rn(rxi, -rx[j]);
ryij = __fadd_rn(ryi, -ry[j]);
rzij = __fadd_rn(rzi, -rz[j]);
/**
if (element == (natoms - 1))
{
printf("%d: rxij=%f\n",element,rxij);
printf("%d: ryij=%f\n",element,ryij);
printf("%d: rzij=%f\n",element,rzij);
}
*/
//rijsq = (rxij*rxij) + (ryij*ryij) + (rzij*rzij);
rijsq = __fadd_rn(__fadd_rn(__fmul_rn(rxij,rxij), __fmul_rn(ryij,ryij)), __fmul_rn(rzij,rzij));
//TO THIS
//rijsq = rxij*rxij;
//rijsq += ryij*ryij;
//rijsq += rzij*rzij;
/**
if (element == (natoms - 1))
{
printf("%d: rijsq=%f\n",element, rijsq);
printf("%d: rijsq=%f\n",element, rijsq);
printf("%d: rijsq=%f\n",element, rijsq);
}
*/
if (rijsq < rcutsq)
{
//START FORCE_IJ
rij = __fsqrt_rn(rijsq);
sr2 = __fdiv_rn(sigsq,rijsq);
sr6 = __fmul_rn(__fmul_rn(sr2,sr2),sr2);
//CHANGED THIS
//*vij = sr6*(sr6-1.0) - vrcut - dvrc12*(rij-rcut);
vij = __fadd_rn(__fadd_rn(__fmul_rn(sr6,__fadd_rn(sr6,(float)-1.0)), -vrcut), __fmul_rn(-dvrc12, __fadd_rn(rij,-rcut)));
//TO THIS
//vij = sr6*(sr6-1.0);
//vij -= vrcut;
//vij -= dvrc12*(rij - rcut);
//*wij = sr6*(sr6-0.5) + dvrcut*rij;
wij = __fadd_rn(__fmul_rn(sr6,__fadd_rn(sr6,(float)-0.5)), __fmul_rn(dvrcut,rij));
//TO THIS
//wij = sr6*(sr6-0.5);
//wij += dvrcut*rij;
fij = __fdiv_rn(wij, rijsq);
fxij = __fmul_rn(fij, rxij);
fyij = __fmul_rn(fij, ryij);
fzij = __fmul_rn(fij, rzij);
//END FORCE_IJ
wij = __fmul_rn(wij, (float)0.5);
vij = __fmul_rn(vij, (float)0.5);
potential = __fadd_rn(potential, vij);
virial = __fadd_rn(virial, wij);
fxi += fxij;
fyi += fyij;
fzi += fzij;
/**
if (element == (natoms - 1))
{
printf("%d: rij=%f\n",element, rij);
printf("%d: sr2=%f\n",element, sr2);
printf("%d: sr6=%f\n",element, sr6);
printf("%d: vij=%f\n",element, vij);
printf("%d: wij=%f\n",element, wij);
printf("%d: fij=%f\n",element, fij);
printf("%d: fxij=%f\n",element, fxij);
printf("%d: fyij=%f\n",element, fyij);
printf("%d: fzij=%f\n",element, fzij);
printf("%d: potential=%f\n",element, potential);
printf("%d: virial=%f\n",element, virial);
printf("%d: fxi=%f\n",element, fxi);
printf("%d: fyi=%f\n",element, fyi);
printf("%d: fzi=%f\n",element, fzi);
}
*/
}
}
j = list[j];
/**
if (element == (natoms - 1))
{
printf("%d j=%d\n",element, j);
}
*/
}
}
*(fx+element) = __fmul_rn((float)48.0, fxi);
*(fy+element) = __fmul_rn((float)48.0, fyi);
*(fz+element) = __fmul_rn((float)48.0, fzi);
/**
if (element == (natoms - 1))
{
printf("%d: fx+element= %f\n",element, *(fx+element));
printf("%d: fy+element= %f\n",element, *(fy+element));
printf("%d: fz+element= %f\n",element, *(fz+element));
}
*/
vArray[threadIdx.x] = virial;
//pArray[threadIdx.x] = potential;
pArray[threadIdx.x] = potential;
unsigned int t = threadIdx.x;
unsigned int stride;
for(stride = blockDim.x / 2; stride >0; stride >>= 1)
{
__syncthreads();
if (t<stride)
{
vArray[t]+= vArray[t+stride];
pArray[t]+= pArray[t+stride];
//vArray[t]+= vArray[t+stride];
}
}
/**
__syncthreads();
if (t < 32)
{
vArray[t] += vArray[t + 32];
vArray[t] += vArray[t + 16];
vArray[t] += vArray[t + 8];
vArray[t] += vArray[t + 4];
vArray[t] += vArray[t + 2];
vArray[t] += vArray[t + 1];
pArray[t] += pArray[t + 32];
pArray[t] += pArray[t + 16];
pArray[t] += pArray[t + 8];
pArray[t] += pArray[t + 4];
pArray[t] += pArray[t + 2];
pArray[t] += pArray[t + 1];
}
*/
__syncthreads();
if (threadIdx.x == 0)
{
virialArray[blockIdx.x] = vArray[0];
potentialArray[blockIdx.x] = pArray[0];
}
/**
if ((element == 0) || (element == 512))
{
printf("%d: vArray[0]= %f\n", element, vArray[0]);
printf("%d: pArray[0]= %f\n", element, pArray[0]);
}
*/
/**if (element == (natoms - 1))
{
printf("------------------------------------END------------------------------\n");
}
*/
}
//return;
}
|
2,160
|
#include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <ctime>
#include <time.h>
#include <sstream>
#include <string>
#include <fstream>
#include <cuda.h>
#include <cuda_runtime.h>
using namespace std;
__global__ void reduce0(int *g_idata, int *g_odata, int size){
extern __shared__ int sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = 0;
if(i<size)
sdata[tid] = g_idata[i];
__syncthreads();
for(unsigned int s=1; s < blockDim.x; s *= 2) {
int index = 2 * s * tid;
if (index < blockDim.x) {
sdata[index] += sdata[index + s];
}
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
int main(void){
int size = 939289;
thrust::host_vector<int> data_h_i(size, 1);
//initialize the data, all values will be 1
//so the final sum will be equal to size
int threadsPerBlock = 1024;
int totalBlocks = (size+(threadsPerBlock-1))/threadsPerBlock;
thrust::device_vector<int> data_v_i = data_h_i;
thrust::device_vector<int> data_v_o(totalBlocks);
int* output = thrust::raw_pointer_cast(data_v_o.data());
int* input = thrust::raw_pointer_cast(data_v_i.data());
bool turn = true;
cudaError_t error;
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
while(true) {
if(turn) {
//Odpal kernel (tablica wejściowa jako input, wyjściowa jako output
reduce0<<<totalBlocks, threadsPerBlock, threadsPerBlock*sizeof(int)>>>(input, output, size);
turn = false;
} else {
//Odpal kernel (tablica wyjściowa jako input, wejściowa jako output
reduce0<<<totalBlocks, threadsPerBlock, threadsPerBlock*sizeof(int)>>>(output, input, size);
turn = true;
}
//Jeżeli został jeden blok, to obliczenia zostały zakończone
if(totalBlocks == 1) break;
//Korzystaj tylko z zakresu tablicy odpowiadającemu liczbie bloków z poprzedniej iteracji
size = totalBlocks;
//Oblicz nową liczbę bloków
totalBlocks = ceil((double)totalBlocks/threadsPerBlock);
}
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
//Wektor wyjściowy hosta
thrust::host_vector<int> data_h_o;
//Pobierz wynik
if(turn)
//Wynik w tablicy wejściowej device
data_h_o = data_v_i;
else
//Wynik w tablicy wyjściowej device
data_h_o = data_v_o;
//Wyczyść wektory
data_v_i.clear();
data_v_i.shrink_to_fit();
data_v_o.clear();
data_v_o.shrink_to_fit();
//Wypisz wynik
cout<< "Wynik: " << data_h_o[0] << endl << "W czasie:" << msecTotal << endl;
return 0;
}
|
2,161
|
#include <stdio.h>
#include <stdlib.h>
#define MIN(a, b) (a < b) ? a : b
__global__ void inputs_gen(float *in, int samples, float first, float last) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float precision = (last - first) / (samples-1);
if (tid < samples) {
in[tid] = (tid*precision + first);
}
}
__global__ void sin_compute(float *in, float *out, int samples) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < samples) {
out[tid] = sinf(in[tid]);
}
}
int main(int argc, char **argv) {
float *in, *out;
float *d_in, *d_out;
int samples = 12501;
char option;
cudaError_t err;
if (argc == 2) {
option = argv[1][0];
switch (option) {
case 'a':
samples = 12500001;
break;
case 'b':
samples = 1250001;
break;
case 'c':
samples = 125001;
break;
case 'd':
samples = 12501;
break;
case 'e':
default:
samples = 1251;
}
}
int size = sizeof(float)*samples;
in = (float*)malloc(size);
out = (float*)malloc(size);
if (!in || !out) {
fprintf(stderr, "Erro alocando vetores\n");
exit(EXIT_FAILURE);
}
err = cudaMalloc(&d_in, size);
if (err != cudaSuccess) {
fprintf(stderr, "Erro alocando entradas (%s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc(&d_out, size);
if (err != cudaSuccess) {
fprintf(stderr, "Erro alocando saidas (%s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int tpb = MIN(samples, 512);
int bpg = (samples-1)/tpb+1;
inputs_gen<<<bpg, tpb>>>(d_in, samples, -2.0 * M_PI, 2 * M_PI);
if (cudaGetLastError() != cudaSuccess) {
fprintf(stderr, "Erro ao gerar entradas\n");
exit(EXIT_FAILURE);
}
cudaThreadSynchronize();
sin_compute<<<bpg, tpb>>>(d_in, d_out, samples);
if (cudaGetLastError() != cudaSuccess) {
fprintf(stderr, "Erro ao computar seno\n");
exit(EXIT_FAILURE);
}
cudaThreadSynchronize();
err = cudaMemcpy(in, d_in, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "Erro ao transferir valores de entrada (%s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "Erro ao transferir valores de saida (%s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaFree(d_in);
cudaFree(d_out);
for (int i = 0; i < samples; ++i) {
printf("%f,%f\n", in[i], out[i]);
}
free(in);
free(out);
return EXIT_SUCCESS;
}
|
2,162
|
#include <random>
#include <math.h>
#include <string.h>
#include <stdio.h>
#include <errno.h>
#include <chrono>
void ldpcEncoder (unsigned int *infoWord, unsigned int* W_ROW_ROM,
unsigned int numMsgBits, unsigned int numRowsinRom, unsigned int numParBits,
unsigned int shiftRegLength,
unsigned int *codeWord);
int main (int argc, char **argv) {
unsigned int numChecks, numBits, maxBitsForCheck, maxChecksForBit;
unsigned int numRowsW, numColsW, shiftRegLength;
unsigned int *W_ROW_ROM;
char alistFile[256];
char wROM_File[256];
FILE *src;
int errnum;
unsigned int infoLeng, rnum, rdenom;
using clock = std::chrono::steady_clock;
clock::time_point startTime;
clock::time_point endTime;
clock::duration encoderTime;
unsigned int seed = 163331;
/* or use this to get a fresh sequence each time the program is run.
std::random_device rd; //Will be used to obtain a seed for the random number engine
std::mt19937 generator(rd()); //Standard mersenne_twister_engine seeded with rd()
*/
std::mt19937 generator(seed); //Standard mersenne_twister_engine
std::uniform_real_distribution<> rDist(0, 1);
if (argc < 4) {
printf("usage: TestEncoder <infoLength> <r-numerator> <r-denominator>\n" );
exit(-1);
}
infoLeng = atoi(argv[1]);
rnum = atoi(argv[2]);
rdenom = atoi(argv[3]);
sprintf(alistFile, "./G_and_H_Matrices/H_%d%d_%d.alist", rnum, rdenom, infoLeng);
sprintf(wROM_File, "./G_and_H_Matrices/W_ROW_ROM_%d%d_%d.binary", rnum, rdenom, infoLeng);
src = fopen(alistFile, "r");
if (src == NULL) {
errnum = errno;
printf("Value of errno: %d\n", errnum);
perror("Error printed by perror");
printf("Error opening file %s\n", alistFile);
return(EXIT_FAILURE);
}
fscanf(src,"%d", &numBits);
fscanf(src ,"%d", &numChecks);
fscanf(src,"%d", &maxChecksForBit);
fscanf(src,"%d", &maxBitsForCheck);
fclose(src);
src = fopen(wROM_File, "r");
if (src == NULL) {
errnum = errno;
printf("Value of errno: %d\n", errnum);
perror("Error printed by perror");
printf("Error opening file %s\n", wROM_File);
return(EXIT_FAILURE);
}
fread(& numRowsW, sizeof(unsigned int), 1, src);
fread(& numColsW, sizeof(unsigned int), 1, src);
fread(& shiftRegLength, sizeof(unsigned int), 1, src);
W_ROW_ROM = (unsigned int*) malloc(numRowsW * numColsW * sizeof( unsigned int));
fread(W_ROW_ROM, sizeof(unsigned int), numRowsW * numColsW, src);
fclose(src);
printf("parameters have been read.\n");
printf("numBits = %i, numChecks = %i\n", numBits, numChecks);
printf("Max checks for bit: %i Max bits for check %i\n", maxChecksForBit, maxBitsForCheck);
// ///////////////////////////////////////////
unsigned int* infoWord;
unsigned int* codeWord;
infoWord = (unsigned int *)malloc(infoLeng * sizeof(unsigned int));
codeWord = (unsigned int *)malloc(numBits * sizeof(unsigned int));
for (unsigned int j=0; j < infoLeng; j++) {
// infoWord[j] = (0.5 >= rDist(generator))? 1:0;
infoWord[j] = j % 2;
}
startTime = clock::now();
ldpcEncoder(infoWord, W_ROW_ROM, infoLeng, numRowsW, numColsW, shiftRegLength, codeWord);
endTime = clock::now();
encoderTime = endTime - startTime;
printf("Time for encoder: %i microsec\n",
std::chrono::duration_cast<std::chrono::microseconds>(encoderTime).count());
char encodedFile[256];
sprintf(encodedFile, "./evenodd%d.encoded", numBits);
src = fopen(encodedFile, "w");
if (src == NULL) {
errnum = errno;
printf("Value of errno: %d\n", errnum);
perror("Error printed by perror");
printf("Error opening file %s\n", encodedFile);
return(EXIT_FAILURE);
}
for(unsigned int j=0; j<numBits; j++) fprintf(src,"%d\n", codeWord[j]);
fclose(src);
// Debug.
// unsigned int numParityBits = numColsW;
// for (unsigned int j=0; j< numParityBits; j++) {
// printf(" %i", codeWord[infoLeng+j]);
// if ( (j % 40) == 39) { printf("\n"); }
// }
// printf("\n");
}
|
2,163
|
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <iostream>
using namespace std;
//matrix structure
typedef struct {
int n;
int *el;
} Matrix;
//KERNEL
// D = AxB
__global__ void calcD(int n, Matrix D, Matrix A, Matrix B, Matrix C)
{
int Cv = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e=0; e<n; ++e)
Cv+=A.el[row*n+e] * B.el[e*n+col]; //calculate one value
D.el[row*n+col]=Cv+C.el[row*n+col]; //add value from C and write to D
}
//HOST CODE
int main(int argc, char* argv[])
{
if (argc<2) {
cout << "Input file not specified. Please, specify it as a first argument." << endl;
cout << "example: " << argv[0] << " matr.txt" << endl;
return -1;
}
ifstream file(argv[1]);
ofstream output("output.txt");
if (!file)
{
cout << "Error opening file" << endl;
return -1;
}
int n;
file >> n; //size N
if (argc>2) cout << "N=" << n << endl;
Matrix A, B, C, D; //host matrices
A.el = new int[n*n]; //....
B.el = new int[n*n]; //...
C.el = new int[n*n]; //..
D.el = new int[n*n]; //.
//reading from file into matrices
for (int i=0; i<(n*n); i++)
file >> A.el[i];
for (int i=0; i<n*n; i++)
file >> B.el[i];
for (int i=0; i<n*n; i++)
file >> C.el[i];
//preparing for the device
Matrix d_A;
d_A.n=n;
size_t size = n*n*sizeof(int);
cudaMalloc(&d_A.el, size); //allocate memory for A
cudaMemcpy(d_A.el, A.el, size, cudaMemcpyHostToDevice); //copy A to deviceA (d_A)
Matrix d_B;
d_B.n=n;
cudaMalloc(&d_B.el, size); //same for B
cudaMemcpy(d_B.el, B.el, size, cudaMemcpyHostToDevice);
Matrix d_C;
d_C.n=n;
cudaMalloc(&d_C.el, size); //same for C
cudaMemcpy(d_C.el, C.el, size, cudaMemcpyHostToDevice);
Matrix d_D; //resulting matrix D
d_D.n=n;
size = n*n*sizeof(int);
cudaMalloc(&d_D.el, size); //only allocate memory
//kernel call
dim3 dimBlock(n,n); // USING ONE BLOCK
dim3 dimGrid(1,1); // WITH NxN THREADS
calcD<<<dimGrid, dimBlock>>>(n, d_D, d_A, d_B, d_C);
//read matrix E back
cudaMemcpy(D.el, d_D.el, size, cudaMemcpyDeviceToHost);
//write output to file
output << "Matrix D:" << endl;
for (int i=0; i<n; i++) {
for (int j=0; j<n; j++)
output << D.el[(i*n+j)] << " ";
output << endl;
}
//print out resulting matrix D if second argument was present
if (argc>2) {
cout << endl << "Matrix D:" << endl;
for (int i=0; i<n; i++) {
for (int j=0; j<n; j++)
cout << D.el[(i*n+j)] << " ";
cout << endl;
}
}
//free the memory on device
cudaFree(d_A.el);
cudaFree(d_B.el);
cudaFree(d_C.el);
cudaFree(d_D.el);
//free the memory on host
delete[] A.el;
delete[] B.el;
delete[] C.el;
delete[] D.el;
file.close();
output.close();
cout << endl << "Done. " << endl;
return 0;
}
|
2,164
|
#include<iostream>
#include<math.h>
#include<stdlib.h>
#include<time.h>
#define N 2048
using namespace std;
void random_ints(int *vector, int size){
for(int i=0; i<size; i++)
vector[i] = rand()%10;
}
void copy_int_to_float(float *dest, int *src, int size){
for(int i=0; i<size; i++)
dest[i] = float(src[i]);
}
__global__ void min(int *vector){
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0){
if(tid < number_of_threads){
int first_index = tid * step_size *2;
int second_index = first_index + step_size;
vector[first_index] = vector[first_index] > vector[second_index] ? vector[second_index] : vector[first_index];
}
step_size <<= 1;
number_of_threads >>= 1;
}
}
__global__ void max(int *vector){
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0){
if(tid < number_of_threads){
int first_index = tid * step_size *2;
int second_index = first_index + step_size;
vector[first_index] = vector[first_index] < vector[second_index] ? vector[second_index] : vector[first_index];
}
step_size <<= 1;
number_of_threads >>= 1;
}
}
__global__ void sum(int *vector){
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0){
if(tid < number_of_threads){ //If thread is alive
int first_index = tid * step_size * 2; //As each thread operates on 2 elements.
int second_index = first_index + step_size;
vector[first_index] += vector[second_index];
}
step_size <<= 1;
number_of_threads >>= 1;
}
}
__global__ void sum_floats(float *vector){
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0){
if(tid < number_of_threads){ //If thread is alive
int first_index = tid * step_size * 2; //As each thread operates on 2 elements.
int second_index = first_index + step_size;
vector[first_index] += vector[second_index];
}
step_size <<= 1;
number_of_threads >>= 1;
}
}
__global__ void mean_diff_sq(float *vector, float mean){ //Calculates (x - x')^2
vector[threadIdx.x] -= mean;
vector[threadIdx.x] *= vector[threadIdx.x];
}
int main(void){
int size = N * sizeof(int);
int *vec; //Host copy of vec
int *d_vec; //Device copy of vec
int result;
srand(time(0));
vec = (int *)malloc(size);
random_ints(vec, N);
cudaMalloc((void **)&d_vec, size);
//SUM
cudaMemcpy(d_vec, vec, size, cudaMemcpyHostToDevice);
sum<<<1, N/2>>>(d_vec);
//Copy the first element of array back to result
cudaMemcpy(&result, d_vec, sizeof(int), cudaMemcpyDeviceToHost);
printf("Sum is: %d", result);
//MIN
cudaMemcpy(d_vec, vec, size, cudaMemcpyHostToDevice);
min<<<1, N/2>>>(d_vec);
//Copy the first element of array back to result
cudaMemcpy(&result, d_vec, sizeof(int), cudaMemcpyDeviceToHost);
printf("\\nMin is: %d", result);
//MAX
cudaMemcpy(d_vec, vec, size, cudaMemcpyHostToDevice);
max<<<1, N/2>>>(d_vec);
//Copy the first element of array back to result
cudaMemcpy(&result, d_vec, sizeof(int), cudaMemcpyDeviceToHost);
printf("\\nMax is: %d", result);
//MEAN
cudaMemcpy(d_vec, vec, size, cudaMemcpyHostToDevice);
sum<<<1, N/2>>>(d_vec);
//Copy the first element of array back to result
cudaMemcpy(&result, d_vec, sizeof(int), cudaMemcpyDeviceToHost);
float mean = float(result)/N;
printf("\\nMean is: %f", mean);
//STD. DEV
float *float_vec;
float *d_float_vec;
float_vec = (float *)malloc(N*sizeof(float));
cudaMalloc((void **)&d_float_vec, N*sizeof(float));
copy_int_to_float(float_vec, vec, N);
cudaMemcpy(d_float_vec, float_vec, N*sizeof(float), cudaMemcpyHostToDevice);
mean_diff_sq<<<1, N>>>(d_float_vec, mean);
sum_floats<<<1, N/2>>>(d_float_vec);
float res;
cudaMemcpy(&res, d_float_vec, sizeof(res), cudaMemcpyDeviceToHost);
res /= N;
printf("\\nVariance: %f", res);
res = sqrt(res);
printf("\\nStd. Dev: %f", res);
//Free allocated memory
cudaFree(d_vec);
printf("\\n");
return 0;
}
|
2,165
|
/*
By : Johan S. Suarez L. or @jadry92 in twitter
This kernel make inverse of the matrix A in the matrix B
*/
// Macro for mastrix index
#define Ind(a,i,j) (a)[(j)+(i)*N]
#include <stdio.h> //manipulacion de ficheros, lectura-escritura ficheros, scandf-printf
#include <stdlib.h> //Conversion de tipos de datos, memoria dinamica, abs
#include <string.h> //Uso de memcpy principalmente
#include <math.h> //funciones matemáticas
#include <time.h>
/* Funtion check malloc of variables */
void checkMalloc(void * var,const char *name){
if( var == NULL){
printf("It's not possible to allocate %s. \n",name);}
else {
printf(" Memory has already been allocated %s.\n",name);}
}
int main(int argc, char **argv){
/* Initiation of variables*/
int i,j; // i == rows and j == colms
int N =3;
float* A;
float* B;
/* Reservation of memory */
A = (float*)malloc(N*sizeof(float));
checkMalloc(A,"A");
B = (float*)malloc(N*sizeof(float));
checkMalloc(B,"B");
/* Initialitation Matix A*/
/* Initialitation Matix A*/
printf("A =\n");
for(i = 0; i < N; i++){
for(j = 0; j < N ; j++){
Ind(A,i,j) = rand()/(RAND_MAX/10.0);
printf(" %f ",Ind(A,i,j));
}
printf("\n");
}
/* Gaussian Elimination*/
// The B has to be identity matrix
memset(B,0.0,N*sizeof(float));
for(i = 0; i < N; i++){
Ind(B,i,i) = 1.0;
}
printf("B =\n");
for(i = 0; i < N; i++){
for(j = 0; j < N ; j++){
printf(" %f ",Ind(B,i,j));
}
printf("\n");
}
int y;
float key;
for(i = 0; i < N; i++){ // for to rows
for(j = 0; j <N ; j++){
y=0;
if(i==j){
}else{
while(y<N){
if(i+1<N ){
Ind(A,i,j+y) = Ind(A,i,j+y) - Ind(A,i+1,j+y)*(Ind(A,i,j)/Ind(A,i+1,j));
Ind(B,i,j+y) = Ind(B,i,j+y) - Ind(B,i+1,j+y)*(Ind(A,i,j)/Ind(A,i+1,j));
}else{
Ind(A,i,j+y) = Ind(A,i,j+y) - Ind(A,i-N-1+y,j+y)*(Ind(A,i,j)/Ind(A,i-N-1,j));
Ind(B,i,j+y) = Ind(B,i,j+y) - Ind(B,i-N-1+y,j+y)*(Ind(A,i,j)/Ind(A,i-N-1,j));
}
y++;
}
}
//Ind(A,i,j) = 1;
//Ind(B,i,j) = Ind(B,i,j)/Ind(A,i,j);
//}
}
}
printf("A =\n");
for(i = 0; i < N; i++){
for(j = 0; j < N ; j++){
printf(" %f ",Ind(A,i,j));
}
printf("\n");
}
printf("B =\n");
for(i = 0; i < N; i++){
for(j = 0; j < N ; j++){
printf(" %f ",Ind(B,i,j));
}
printf("\n");
}
/* Descompotition LU*/
return 0;
}
|
2,166
|
#include<stdio.h>
#include<sys/time.h>
/*
#define BLOCKSIZEX 64
#define BLOCKSIZEY 16
#define BLOCKSIZE BLOCKSIZEX * BLOCKSIZEY
#define GRIDSIZEX 8
#define GRIDSIZEY 16
#define GRIDSIZE GRIDSIZEX * GRIDSIZEY
#define THREAD_NUM BLOCKSIZE * GRIDSIZE
#define MIMAX 256
#define MJMAX GRIDSIZEY * (BLOCKSIZEY - 2) + 2
#define MKMAX GRIDSIZEX * (BLOCKSIZEX - 2) + 2
#define NN 750
*/
#define BLOCKSIZEX 6
#define BLOCKSIZEY 4
#define BLOCKSIZE BLOCKSIZEX * BLOCKSIZEY
#define GRIDSIZEX 3
#define GRIDSIZEY 6
#define GRIDSIZE GRIDSIZEX * GRIDSIZEY
#define THREAD_NUM BLOCKSIZE * GRIDSIZE
#define MIMAX 6
#define MJMAX GRIDSIZEY * (BLOCKSIZEY - 2) + 2
#define MKMAX GRIDSIZEX * (BLOCKSIZEX - 2) + 2
#define NN 3
/*static float p[MIMAX][MJMAX][MKMAX];
static float a[MIMAX][MJMAX][MKMAX][4];
static float b[MIMAX][MJMAX][MKMAX][3];
static float c[MIMAX][MJMAX][MKMAX][3];
static float bnd[MIMAX][MJMAX][MKMAX];
static float work1[MIMAX][MJMAX][MKMAX];
static float work2[MIMAX][MJMAX][MKMAX];*/
static int imax, jmax, kmax, mimax, mjmax, mkmax;
static float omega;
double second(){
struct timeval tm;
double t;
static int base_sec = 0, base_usec = 0;
gettimeofday(&tm, NULL);
if(base_sec == 0 && base_usec == 0){
base_sec = tm.tv_sec;
base_usec = tm.tv_usec;
t = 0.0;
}
else{
t = (double)(tm.tv_sec-base_sec) + ((double)(tm.tv_usec-base_usec))/1.0e6;
}
return t;
}
__global__ void jacobi(float *a0, float *a1, float *a2, float *a3, float *b0, float *b1, float *b2, float *c0, float *c1, float *c2, float *p, float *wrk1, float *wrk2, float *bnd, int nn, int imax, int jmax, int kmax, float omega, float *gosa){
int i, j, k, i2, j2, k2, n, xy, c, csb;
i = 1;
float s0, ss, temp;
//const int size = (imax-1)/(imax-1);
k = threadIdx.x + (blockDim.x-2) * blockIdx.x + 1;
j = threadIdx.y + (blockDim.y-2) * blockIdx.y + 1;
i2 = i-1;
k2 = threadIdx.x + blockDim.x * blockIdx.x;
j2 = threadIdx.y + blockDim.y * blockIdx.y;
const int tid = (k-1) + (j-1) * (kmax-2);
xy = kmax * jmax;
//__shared__ float sb[BLOCKSIZE];
//__shared__ float sb2[BLOCKSIZE];
extern __shared__ float sb[];
float *sb_t = sb;
float *sb_m = sb + blockDim.x * blockDim.y;
float *sb_b = sb + 2 * blockDim.x * blockDim.y;
extern __shared__ float sb2[];
float *sb2_t = sb2;
float *sb2_m = sb2 + blockDim.x * blockDim.y;
float *sb2_b = sb2 + 2 * blockDim.x * blockDim.y;
csb = threadIdx.x + threadIdx.y * blockDim.x;
for(n=0;n<nn;++n){
temp=0.0;
s0 = a0[i*jmax*kmax+j*kmax+k] * p[(i+1)*jmax*kmax+j*kmax+k]
+ a1[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+(j+1)*kmax+k]
+ a2[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+j*kmax+(k+1)]
+ b0[i*jmax*kmax+j*kmax+k]
*(p[(i+1)*jmax*kmax+(j+1)*kmax+k]
- p[(i+1)*jmax*kmax+(j-1)*kmax+k]
- p[(i-1)*jmax*kmax+(j+1)*kmax+k]
+ p[(i-1)*jmax*kmax+(j-1)*kmax+k] )
+ b1[i*jmax*kmax+j*kmax+k]
*(p[i*jmax*kmax+(j+1)*kmax+(k+1)]
- p[i*jmax*kmax+(j-1)*kmax+(k+1)]
- p[i*jmax*kmax+(j-1)*kmax+(k-1)]
+ p[i*jmax*kmax+(j+1)*kmax+(k-1)])
+ b2[i*jmax*kmax+j*kmax+k]
*(p[(i+1)*jmax*kmax+j*kmax+(k+1)]
- p[(i-1)*jmax*kmax+j*kmax+(k+1)]
- p[(i+1)*jmax*kmax+j*kmax+(k-1)]
+ p[(i-1)*jmax*kmax+j*kmax+(k-1)] )
+ c0[i*jmax*kmax+j*kmax+k] * p[(i-1)*jmax*kmax+j*kmax+k]
+ c1[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+(j-1)*kmax+k]
+ c2[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+j*kmax+(k-1)]
+ wrk1[i*jmax*kmax+j*kmax+k];
ss = (s0 * a3[i*jmax*kmax+j*kmax+k] - p[i*jmax*kmax+j*kmax+k]) * bnd[i*jmax*kmax+j*kmax+k];
temp = temp + ss*ss;
wrk2[i*jmax*kmax+j*kmax+k] = p[i*jmax*kmax+j*kmax+k] + omega * ss;
for(i=2 ; i<imax-1 ; ++i){
i2 = i-1;
printf("%f\n", sb_m[csb]);
sb_m[csb] = p[i*jmax*kmax+j*kmax+k];
if(0 < threadIdx.x && k < kmax-1 && 0 < j && j < jmax-1){
s0 = a0[i*jmax*kmax+j*kmax+k] * p[(i+1)*jmax*kmax+j*kmax+k]
+ a1[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+(j+1)*kmax+k]
+ a2[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+j*kmax+(k+1)]
+ b0[i*jmax*kmax+j*kmax+k]
*(p[(i+1)*jmax*kmax+(j+1)*kmax+k]
- p[(i+1)*jmax*kmax+(j-1)*kmax+k]
- p[(i-1)*jmax*kmax+(j+1)*kmax+k]
+ p[(i-1)*jmax*kmax+(j-1)*kmax+k] )
+ b1[i*jmax*kmax+j*kmax+k]
*(p[i*jmax*kmax+(j+1)*kmax+(k+1)]
- p[i*jmax*kmax+(j-1)*kmax+(k+1)]
- p[i*jmax*kmax+(j-1)*kmax+(k-1)]
+ p[i*jmax*kmax+(j+1)*kmax+(k-1)])
+ b2[i*jmax*kmax+j*kmax+k]
*(p[(i+1)*jmax*kmax+j*kmax+(k+1)]
- p[(i-1)*jmax*kmax+j*kmax+(k+1)]
- p[(i+1)*jmax*kmax+j*kmax+(k-1)]
+ p[(i-1)*jmax*kmax+j*kmax+(k-1)] )
+ c0[i*jmax*kmax+j*kmax+k] * p[(i-1)*jmax*kmax+j*kmax+k]
+ c1[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+(j-1)*kmax+k]
+ c2[i*jmax*kmax+j*kmax+k] * p[i*jmax*kmax+j*kmax+(k-1)]
+ wrk1[i*jmax*kmax+j*kmax+k];
ss = (s0 * a3[i*jmax*kmax+j*kmax+k] - p[i*jmax*kmax+j*kmax+k]) * bnd[i*jmax*kmax+j*kmax+k];
temp = temp + ss*ss;
wrk2[i*jmax*kmax+j*kmax+k] = p[i*jmax*kmax+j*kmax+k] + omega * ss;
}
sb2_m[csb] = wrk2[i*jmax*kmax+j*kmax+k];
printf("%f\n", sb2_m[csb]);
__syncthreads();
if(0 < threadIdx.x && threadIdx.x < blockDim.x-1 && 0 < threadIdx.y && threadIdx.y < blockDim.y-1){
s0 = a0[i2*jmax*kmax+j*kmax+k] * wrk2[(i2+1)*jmax*kmax+j*kmax+k]
+ a1[i2*jmax*kmax+j*kmax+k] * wrk2[i2*jmax*kmax+(j+1)*kmax+k]
+ a2[i2*jmax*kmax+j*kmax+k] * wrk2[i2*jmax*kmax+j*kmax+(k+1)]
+ b0[i2*jmax*kmax+j*kmax+k]
*(wrk2[(i2+1)*jmax*kmax+(j+1)*kmax+k]
- wrk2[(i2+1)*jmax*kmax+(j-1)*kmax+k]
- wrk2[(i2-1)*jmax*kmax+(j+1)*kmax+k]
+ wrk2[(i2-1)*jmax*kmax+(j-1)*kmax+k] )
+ b1[i2*jmax*kmax+j*kmax+k]
*(wrk2[i2*jmax*kmax+(j+1)*kmax+(k+1)]
- wrk2[i2*jmax*kmax+(j-1)*kmax+(k+1)]
- wrk2[i2*jmax*kmax+(j-1)*kmax+(k-1)]
+ wrk2[i2*jmax*kmax+(j+1)*kmax+(k-1)])
+ b2[i2*jmax*kmax+j*kmax+k]
*(wrk2[(i2+1)*jmax*kmax+j*kmax+(k+1)]
- wrk2[(i2-1)*jmax*kmax+j*kmax+(k+1)]
- wrk2[(i2+1)*jmax*kmax+j*kmax+(k-1)]
+ wrk2[(i2-1)*jmax*kmax+j*kmax+(k-1)] )
+ c0[i2*jmax*kmax+j*kmax+k] * wrk2[(i2-1)*jmax*kmax+j*kmax+k]
+ c1[i2*jmax*kmax+j*kmax+k] * wrk2[i2*jmax*kmax+(j-1)*kmax+k]
+ c2[i2*jmax*kmax+j*kmax+k] * wrk2[i2*jmax*kmax+j*kmax+(k-1)]
+ wrk1[i2*jmax*kmax+j*kmax+k];
ss = ( s0 * a3[i2*jmax*kmax+j*kmax+k] - wrk2[i2*jmax*kmax+j*kmax+k] ) * bnd[i2*jmax*kmax+j*kmax+k];
temp = temp + ss*ss;
p[i2*jmax*kmax+j*kmax+k] = wrk2[i2*jmax*kmax+j*kmax+k] + omega * ss;
c += xy;
}
}
i2 = imax-1;
if(0 < threadIdx.x && threadIdx.x < blockDim.x-1 && 0 < threadIdx.y && threadIdx.y < blockDim.y-1){
s0 = a0[i2*jmax*kmax+j*kmax+k] * wrk2[(i2+1)*jmax*kmax+j*kmax+k]
+ a1[i2*jmax*kmax+j*kmax+k] * wrk2[i2*jmax*kmax+(j+1)*kmax+k]
+ a2[i2*jmax*kmax+j*kmax+k] * wrk2[i2*jmax*kmax+j*kmax+(k+1)]
+ b0[i2*jmax*kmax+j*kmax+k]
*(wrk2[(i2+1)*jmax*kmax+(j+1)*kmax+k]
- wrk2[(i2+1)*jmax*kmax+(j-1)*kmax+k]
- wrk2[(i2-1)*jmax*kmax+(j+1)*kmax+k]
+ wrk2[(i2-1)*jmax*kmax+(j-1)*kmax+k] )
+ b1[i2*jmax*kmax+j*kmax+k]
*(wrk2[i2*jmax*kmax+(j+1)*kmax+(k+1)]
- wrk2[i2*jmax*kmax+(j-1)*kmax+(k+1)]
- wrk2[i2*jmax*kmax+(j-1)*kmax+(k-1)]
+ wrk2[i2*jmax*kmax+(j+1)*kmax+(k-1)])
+ b2[i2*jmax*kmax+j*kmax+k]
*(wrk2[(i2+1)*jmax*kmax+j*kmax+(k+1)]
- wrk2[(i2-1)*jmax*kmax+j*kmax+(k+1)]
- wrk2[(i2+1)*jmax*kmax+j*kmax+(k-1)]
+ wrk2[(i2-1)*jmax*kmax+j*kmax+(k-1)] )
+ c0[i2*jmax*kmax+j*kmax+k] * wrk2[(i2-1)*jmax*kmax+j*kmax+k]
+ c1[i2*jmax*kmax+j*kmax+k] * wrk2[i2*jmax*kmax+(j-1)*kmax+k]
+ c2[i2*jmax*kmax+j*kmax+k] * wrk2[i2*jmax*kmax+j*kmax+(k-1)]
+ wrk1[i2*jmax*kmax+j*kmax+k];
ss = ( s0 * a3[i2*jmax*kmax+j*kmax+k] - wrk2[i2*jmax*kmax+j*kmax+k] ) * bnd[i2*jmax*kmax+j*kmax+k];
temp = temp + ss*ss;
p[i2*jmax*kmax+j*kmax+k] = wrk2[i2*jmax*kmax+j*kmax+k] + omega * ss;
}
} /* end n loop */
__syncthreads();
gosa[tid] = temp;
}
int main(){
int i, j, k;
float final_gosa;
double cpu0, cpu1, nflop, xmflops2, score;
float gosa[THREAD_NUM];
/************************************/
float *p;
float *a0, *a1, *a2, *a3;
float *b0, *b1, *b2;
float *c0, *c1, *c2;
float *bnd;
float *wrk1, *wrk2;
/************************************/
mimax = MIMAX;
mjmax = MJMAX;
mkmax = MKMAX;
imax = MIMAX-1;
jmax = MJMAX-1;
kmax = MKMAX-1;
//int N_IJK = MIMAX*MJMAX*MKMAX;
int N_IJK = mimax*mjmax*mkmax;
int WORKSIZE = THREAD_NUM*mimax;
/************************************/
float *dev_p;
float *dev_a0, *dev_a1, *dev_a2, *dev_a3;
float *dev_b0, *dev_b1, *dev_b2;
float *dev_c0, *dev_c1, *dev_c2;
float *dev_bnd;
float *dev_wrk1, *dev_wrk2;
float *dev_gosa;
/************************************/
omega = 0.8;
//initial_maxtrix();
/******allocate mem on CPU***********/
a0 = (float*)malloc(sizeof(float)*N_IJK);
a1 = (float*)malloc(sizeof(float)*N_IJK);
a2 = (float*)malloc(sizeof(float)*N_IJK);
a3 = (float*)malloc(sizeof(float)*N_IJK);
b0 = (float*)malloc(sizeof(float)*N_IJK);
b1 = (float*)malloc(sizeof(float)*N_IJK);
b2 = (float*)malloc(sizeof(float)*N_IJK);
c0 = (float*)malloc(sizeof(float)*N_IJK);
c1 = (float*)malloc(sizeof(float)*N_IJK);
c2 = (float*)malloc(sizeof(float)*N_IJK);
p = (float*)malloc(sizeof(float)*N_IJK);
wrk1 = (float*)malloc(sizeof(float)*N_IJK);
wrk2 = (float*)malloc(sizeof(float)*WORKSIZE);
bnd = (float*)malloc(sizeof(float)*N_IJK);
//gosa = (float*)malloc(sizeof(float));
/************************************/
/******allocate mem on GPU***********/
cudaMalloc((void**)&dev_a0, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_a1, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_a2, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_a3, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_b0, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_b1, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_b2, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_c0, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_c1, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_c2, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_p, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_bnd, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_wrk1, N_IJK*sizeof(float));
cudaMalloc((void**)&dev_wrk2, WORKSIZE*sizeof(float));
cudaMalloc((void**)&dev_gosa, sizeof(float)*THREAD_NUM);
/************************************/
/*****Initialize*********************/
//int i,j,k;
/*
for(i=0 ; i<mimax ; ++i)
for(j=0 ; j<mjmax ; ++j)
for(k=0 ; k<mkmax ; ++k){
a0[i*mjmax*mkmax+j*mkmax+k]=0.0;
a1[i*mjmax*mkmax+j*mkmax+k]=0.0;
a2[i*mjmax*mkmax+j*mkmax+k]=0.0;
a3[i*mjmax*mkmax+j*mkmax+k]=0.0;
b0[i*mjmax*mkmax+j*mkmax+k]=0.0;
b1[i*mjmax*mkmax+j*mkmax+k]=0.0;
b2[i*mjmax*mkmax+j*mkmax+k]=0.0;
c0[i*mjmax*mkmax+j*mkmax+k]=0.0;
c1[i*mjmax*mkmax+j*mkmax+k]=0.0;
c2[i*mjmax*mkmax+j*mkmax+k]=0.0;
p[i*mjmax*mkmax+j*mkmax+k]=0.0;
wrk1[i*mjmax*mkmax+j*mkmax+k]=0.0;
bnd[i*mjmax*mkmax+j*mkmax+k]=0.0;
}
*/
for(i=0 ; i<mimax ; ++i){
for(j=0 ; j<mjmax ; ++j){
for(k=0 ; k<mkmax ; ++k){
a0[i*mjmax*mkmax+j*mkmax+k]=1.0;
a1[i*mjmax*mkmax+j*mkmax+k]=1.0;
a2[i*mjmax*mkmax+j*mkmax+k]=1.0;
a3[i*mjmax*mkmax+j*mkmax+k]=1.0/6.0;
b0[i*mjmax*mkmax+j*mkmax+k]=0.0;
b1[i*mjmax*mkmax+j*mkmax+k]=0.0;
b2[i*mjmax*mkmax+j*mkmax+k]=0.0;
c0[i*mjmax*mkmax+j*mkmax+k]=1.0;
c1[i*mjmax*mkmax+j*mkmax+k]=1.0;
c2[i*mjmax*mkmax+j*mkmax+k]=1.0;
p[i*mjmax*mkmax+j*mkmax+k]=(float)(i*i)/(float)(imax*imax);
wrk1[i*mjmax*mkmax+j*mkmax+k]=0.0;
bnd[i*mjmax*mkmax+j*mkmax+k]=1.0;
}
}
}
/************************************/
/*****copy array to device mem*******/
cudaMemcpy(dev_a0, a0, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_a1, a1, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_a2, a2, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_a3, a3, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b0, b0, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b1, b1, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b2, b2, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c0, c0, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c1, c1, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c2, c2, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_wrk1, wrk1, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_wrk2, wrk2, WORKSIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_bnd, bnd, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_p, p, N_IJK*sizeof(float), cudaMemcpyHostToDevice);
//cudaMemcpy(dev_gosa, gosa, sizeof(float), cudaMemcpyHostToDevice);
/************************************/
printf("mimax = %d mjmax = %d mkmax = %d\n", MIMAX, MJMAX, MKMAX);
printf("imax = %d jmax = %d kmax = %d\n", imax, jmax, kmax);
cpu0 = second(); /**measuring**/
dim3 block(BLOCKSIZEX, BLOCKSIZEY, 1);
dim3 grid(GRIDSIZEX, GRIDSIZEY, 1);
jacobi<<<grid, block, sizeof(float) * BLOCKSIZE * 6>>>(dev_a0, dev_a1, dev_a2, dev_a3, dev_b0, dev_b1, dev_b2, dev_c0, dev_c1, dev_c2, dev_p, dev_wrk1, dev_wrk2, dev_bnd, NN, mimax, mjmax, mkmax, omega, dev_gosa);
cudaDeviceSynchronize();
cpu1 = second();
cudaMemcpy(&gosa, dev_gosa, sizeof(float)*THREAD_NUM, cudaMemcpyDeviceToHost);
/******Free mem on the GPU**********/
cudaFree(dev_a0);
cudaFree(dev_a1);
cudaFree(dev_a2);
cudaFree(dev_a3);
cudaFree(dev_b0);
cudaFree(dev_b1);
cudaFree(dev_b2);
cudaFree(dev_c0);
cudaFree(dev_c1);
cudaFree(dev_c2);
cudaFree(dev_p);
cudaFree(dev_wrk1);
cudaFree(dev_wrk2);
cudaFree(dev_bnd);
cudaFree(dev_gosa);
/************************************/
/********Final sum of gosa***********/
for(int gosa_index=0; gosa_index<THREAD_NUM; gosa_index++){
//printf("%f\n", gosa[gosa_index]);
final_gosa += gosa[gosa_index];
//printf("Gosa%d: %e \n", gosa_index, gosa[gosa_index]);
}
/************************************/
nflop = (kmax-2)*(jmax-2)*(imax-2)*34;
if(cpu1 != 0.0){
xmflops2 = nflop/cpu1*1.0e-6*(float)NN;
}
score = xmflops2/32.27;
printf("gpu: %f sec.\n", cpu1);
printf("Loop executed for %d times\n", NN);
printf("Gosa: %e \n", final_gosa);
//printf("MFLOPS measured: %f\n", xmflops2);
//printf("Score: %f\n", score);
return(0);
}
|
2,167
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<iostream>
#include<limits.h>
#define NUM_NODES 5
using namespace std;
__global__ void CUDA_SSSP_KERNEL1(int *Va, int *Ea, int *Wa, bool *Ma, int *Ca, int *Ua, bool *done)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if(id>NUM_NODES)
{
*done = false;
}
if(Ma[id]==true)
{
Ma[id] = false;
__syncthreads();
int start = Va[id];
int end = Va[id+1];
for(int i=start;i<end;i++)
{
int nid = Ea[i];
if(Ua[nid]>(Ca[nid]+Wa[nid]))
{
Ua[nid] = Ca[id] + Wa[nid];
}
}
}
}
__global__ void CUDA_SSSP_KERNEL2(int *Va, int *Ea, int *Wa, bool *Ma, int *Ca, int *Ua, bool *done)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if(id>NUM_NODES)
{
*done = false;
}
if(Ca[id]>Ua[id])
{
Ca[id] = Ua[id];
Ma[id] = true;
*done = false;
}
Ua[id] = Ca[id];
}
int main(int argc, char** argv)
{
int** graph = new int* [NUM_NODES];
int edges = 0;
for(int i=0;i<NUM_NODES;i++)
{
graph[i] = new int[NUM_NODES];
}
for(int i=0;i<NUM_NODES;i++)
{
for(int j=i+1;j<NUM_NODES;j++)
{
int x = rand()%100;
if(x!=0)
{
edges+=1;
graph[i][j] = x;
graph[j][i] = x;
}
}
}
int* v = new int[NUM_NODES+1];
int* e = new int[2*edges];
int* w = new int[2*edges];
int x = 0;
for(int i=0;i<NUM_NODES;i++)
{
v[i] = x;
for(int j=0;j<NUM_NODES;j++)
{
if(graph[i][j]!=0)
{
e[x] = j;
w[x] = graph[i][j];
x+=1;
}
}
}
v[NUM_NODES] = x;
bool mask[NUM_NODES] = { false };
int cost[NUM_NODES] = { INT_MAX };
int updated[NUM_NODES] = { INT_MAX };
int source = 0;
mask[source] = true;
updated[source] = 0;
cost[source] = 0;
int* Va;
cudaMalloc(&Va, sizeof(int)*(NUM_NODES+1));
cudaMemcpy(Va, v, sizeof(int)*(NUM_NODES+1), cudaMemcpyHostToDevice);
int* Ea;
cudaMalloc(&Ea, sizeof(int)*(2*edges));
cudaMemcpy(Ea, e, sizeof(int)*(2*edges), cudaMemcpyHostToDevice);
int* Wa;
cudaMalloc(&Wa, sizeof(int)*(2*edges));
cudaMemcpy(Wa, w, sizeof(int)*(2*edges), cudaMemcpyHostToDevice);
bool* Ma;
cudaMalloc(&Ma, sizeof(bool)*NUM_NODES);
cudaMemcpy(Ma, mask, sizeof(bool)*NUM_NODES, cudaMemcpyHostToDevice);
int* Ua;
cudaMalloc(&Ua, sizeof(int)*NUM_NODES);
cudaMemcpy(Ua, updated, sizeof(int)*NUM_NODES, cudaMemcpyHostToDevice);
int* Ca;
cudaMalloc(&Ca, sizeof(int)*NUM_NODES);
cudaMemcpy(Ca, cost, sizeof(int)*NUM_NODES, cudaMemcpyHostToDevice);
int num_blks = 1;
int threads = 5;
bool done;
bool* d_done;
cudaMalloc((void**)&d_done, sizeof(bool));
do {
done = true;
cudaMemcpy(d_done, &done, sizeof(bool), cudaMemcpyHostToDevice);
CUDA_SSSP_KERNEL1<<<num_blks, threads>>>(Va, Ea, Wa, Ma, Ca, Ua, d_done);
CUDA_SSSP_KERNEL2<<<num_blks, threads>>>(Va, Ea, Wa, Ma, Ca, Ua, d_done);
cudaMemcpy(&done, d_done , sizeof(bool), cudaMemcpyDeviceToHost);
} while (!done);
cudaMemcpy(cost, Ca, sizeof(int)*NUM_NODES, cudaMemcpyDeviceToHost);
cout<<"Cost: "<<endl;
for(int i=0;i<NUM_NODES;i++)
{
cout<<cost[i]<<" ";
}
cout<<endl;
return 0;
}
|
2,168
|
//compile
/** nvcc -arch=sm_11 dijkstra_cuda.cu -o dijkstra_cuda **/
//reference: github.com/AlexDWong/dijkstra-CUDA/
#include <iostream>
#include <stdio.h>
#include <cstdlib>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <limits.h>
#define VERTICES 16384 //number of vertices
#define CPU_IMP 1 //number of Dijkstra implementations (non-GPU)
#define GPU_IMP 1 //number of Dijkstra implementations (GPU)
#define BLOCKS 32
#define THREADS_PER_BLOCK 512
typedef int data_t;
int main() {
clock_t cpu_startTime, cpu_endTime;
double cpu_ElapseTime = 0;
int* d_min;
int* d_minIndex;
int* d_temp;
int* d_tempIndex;
cudaMalloc((void**)& d_min, sizeof(int));
cudaMalloc((void**)& d_minIndex, sizeof(int));
cudaMalloc((void**)& d_temp, BLOCKS * sizeof(int));
cudaMalloc((void**)& d_tempIndex, BLOCKS * sizeof(int));
cudaMemset(d_min, INT_MAX, sizeof(int));
cudaMemset(d_minIndex, 0, sizeof(int));
//functions
void setIntArrayValue(int* in_array, int array_size, int value);
void setDataArrayValue(data_t * in_array, int array_size, data_t init_value);
void initializeGraphZero(data_t * graph, int num_vertices);
void constructGraphEdge(data_t * graph, int num_vertices);
//Dijkstra's implementations
void dijkstraCPUSerial(data_t * graph, data_t * node_dist, int* visited_node, int num_vertices, int v_start); //serial Dijkstra
__global__ void closestNodeCUDA(int* d_min, int* d_minIndex, int* d_temp, int* d_tempIndex, data_t * node_dist, int* visited_node, int* global_closest, int num_vertices); //Dijkstra CUDA Pt. 1
__global__ void cudaRelax(data_t * graph, data_t * node_dist, int* visited_node, int* source); //Dijkstra CUDA Pt. 2
/*************SETUP GRAPH*************/
int graph_size = VERTICES * VERTICES * sizeof(data_t);
int int_array = VERTICES * sizeof(int);
int data_array = VERTICES * sizeof(data_t);
data_t* graph = (data_t*)malloc(graph_size);
data_t* node_dist = (data_t*)malloc(data_array);
int* visited_node = (int*)malloc(int_array);
data_t* dist_matrix = (data_t*)malloc((CPU_IMP + GPU_IMP) * data_array);
printf("Variables created, allocated\n");
data_t* gpu_graph; //CUDA mallocs
data_t* gpu_node_dist;
int* gpu_visited_node;
cudaMalloc((void**)& gpu_graph, graph_size);
cudaMalloc((void**)& gpu_node_dist, data_array);
cudaMalloc((void**)& gpu_visited_node, int_array);
int* closest_vertex = (int*)malloc(sizeof(int)); //for closest vertex
int* gpu_closest_vertex;
closest_vertex[0] = -1;
cudaMalloc((void**)& gpu_closest_vertex, (sizeof(int)));
cudaMemcpy(gpu_closest_vertex, closest_vertex, sizeof(int), cudaMemcpyHostToDevice);
setDataArrayValue(node_dist, VERTICES, INT_MAX);
setIntArrayValue(visited_node, VERTICES, 0);
initializeGraphZero(graph, VERTICES);
constructGraphEdge(graph, VERTICES);
printf("Variables initialized.\n");
/************RUN DIJKSTRA'S************/
int i;
int origin = 0;
printf("Origin vertex: %d\n", origin);
/* SERIAL DIJKSTRA */
int version = 0;
printf("Running serial...");
cpu_startTime = clock();
dijkstraCPUSerial(graph, node_dist, visited_node, VERTICES, origin);
cpu_endTime = clock();
cpu_ElapseTime = ((cpu_endTime - cpu_startTime) / (double)CLOCKS_PER_SEC);
for (i = 0; i < VERTICES; i++) {
dist_matrix[version * VERTICES + i] = node_dist[i];
}
printf("Done!\n");
/* CUDA DIJKSTRA */
version++;
cudaEvent_t gpu_start, gpu_stop;
float gpu_elapsed_exec;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_stop);
setDataArrayValue(node_dist, VERTICES, INT_MAX); //reset previous data
setIntArrayValue(visited_node, VERTICES, 0);
node_dist[origin] = 0;
cudaMemcpy(gpu_graph, graph, graph_size, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_node_dist, node_dist, data_array, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_visited_node, visited_node, int_array, cudaMemcpyHostToDevice);
dim3 gridMin(BLOCKS, 1, 1);
dim3 blockMin(THREADS_PER_BLOCK, 1, 1);
dim3 gridRelax(BLOCKS, 1, 1); /** number of blocks **/
dim3 blockRelax(THREADS_PER_BLOCK, 1, 1); /** numebr of threads per block **/
cudaEventRecord(gpu_start, 0);
for (int i = 0; i < VERTICES; i++) {
closestNodeCUDA <<<gridMin, blockMin >>> (d_min, d_minIndex, d_temp, d_tempIndex, gpu_node_dist, gpu_visited_node, gpu_closest_vertex, VERTICES); //find min
cudaRelax <<<gridRelax, blockRelax >>> (gpu_graph, gpu_node_dist, gpu_visited_node, gpu_closest_vertex); //relax
}
cudaEventRecord(gpu_stop, 0);
cudaEventSynchronize(gpu_stop);
cudaEventElapsedTime(&gpu_elapsed_exec, gpu_start, gpu_stop); //elapsed execution time
cudaEventDestroy(gpu_start);
cudaEventDestroy(gpu_stop);
cudaMemcpy(node_dist, gpu_node_dist, data_array, cudaMemcpyDeviceToHost);
cudaMemcpy(visited_node, gpu_visited_node, int_array, cudaMemcpyDeviceToHost);
for (i = 0; i < VERTICES; i++) {
dist_matrix[version * VERTICES + i] = node_dist[i];
}
//free memory
cudaFree(d_min);
cudaFree(d_minIndex);
cudaFree(d_temp);
cudaFree(d_tempIndex);
cudaFree(gpu_graph);
cudaFree(gpu_node_dist);
cudaFree(gpu_visited_node);
printf("\nVertices: %d", VERTICES);
printf("\n\nSerial Time (s): %.6f\n", cpu_ElapseTime);
printf("\n\nCUDA Time (ms): %.3f\n", gpu_elapsed_exec);
/***************ERROR CHECKING***************/
printf("\n\nError checking:\n");
printf("----Serial vs CUDA:\n");
int d_errors = 0;
for (i = 0; i < VERTICES; i++) {
if (dist_matrix[i] != dist_matrix[VERTICES + i]) {
d_errors++;
printf("d_Error: Serial has %d %d, CUDA has %d %d\n", dist_matrix[i], i, dist_matrix[VERTICES + i], VERTICES + i);
}
}
printf("--------%d dist errors found.\n", d_errors);
}
/****************DIJKSTRA'S ALGORITHM IMPLEMENTATIONS****************/
/* Serial Implementation */
int closestNode(data_t* node_dist, int* visited_node, int num_vertices) {
int dist = INT_MAX;
int node = -1;
int i;
for (i = 0; i < num_vertices; i++) {
if ((node_dist[i] < dist) && (visited_node[i] == 0)) {
node = i;
dist = node_dist[i];
}
}
return node;
}
void dijkstraCPUSerial(data_t* graph, data_t* node_dist, int* visited_node, int num_vertices, int v_start) {
//functions
void setIntArrayValue(int* in_array, int array_size, int init_value);
void setDataArrayValue(data_t * in_array, int array_size, data_t init_value);
int closestNode(data_t * node_dist, int* visited_node, int num_vertices);
setDataArrayValue(node_dist, VERTICES, INT_MAX); //reset data from previous runs
setIntArrayValue(visited_node, VERTICES, 0);
node_dist[v_start] = 0;
int i, next;
for (i = 0; i < num_vertices; i++) {
int curr_node = closestNode(node_dist, visited_node, num_vertices);
visited_node[curr_node] = 1;
for (next = 0; next < num_vertices; next++) { //Update only if neighbor is reachable, not visited, and if distance through current is less than the current min
int new_dist = node_dist[curr_node] + graph[curr_node * num_vertices + next];
if ((visited_node[next] != 1)
&& (graph[curr_node * num_vertices + next] != (data_t)(0))
&& (new_dist < node_dist[next])) {
node_dist[next] = new_dist; //update distance
}
}
}
}
/* CUDA implementation */
__global__ void closestNodeCUDA(int* min_value, int* minIndex, int* temp, int* tempIndex, data_t* node_dist, int* visited_node, int* global_closest, int num_vertices) {
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int cache[THREADS_PER_BLOCK];
__shared__ int cacheIndex[THREADS_PER_BLOCK];
if (index < num_vertices) {
if ((node_dist[index]) < INT_MAX && (visited_node[index]) == 0) {
cache[threadIdx.x] = node_dist[index];
cacheIndex[threadIdx.x] = index;
}
else {
cache[threadIdx.x] = INT_MAX;
cacheIndex[threadIdx.x] = -1;
}
}
__syncthreads();
unsigned int i = blockDim.x / 2;
while (i != 0) {
if (threadIdx.x < i) {
if (cache[threadIdx.x + i] < cache[threadIdx.x]) {
cache[threadIdx.x] = cache[threadIdx.x + i];
cacheIndex[threadIdx.x] = cacheIndex[threadIdx.x + i];
}
}
__syncthreads();
i /= 2;
}
if (threadIdx.x == 0) {
temp[blockIdx.x] = cache[0];
tempIndex[blockIdx.x] = cacheIndex[0];
}
unsigned int k = BLOCKS / 2;
if (threadIdx.x == 0 && blockIdx.x == 0) {
while (k != 0) {
for (int j = 0; j < k; ++j) {
if ((temp[j + k]) < temp[j]) {
temp[j] = temp[j + k];
tempIndex[j] = tempIndex[j + k];
}
}
__syncthreads();
k /= 2;
}
}
if (threadIdx.x == 0 && blockIdx.x == 0) {
*min_value = temp[0];
*minIndex = tempIndex[0];
global_closest[0] = *minIndex;
visited_node[*minIndex] = 1;
}
__syncthreads();
}
__global__ void cudaRelax(data_t* graph, data_t* node_dist, int* visited_node, int* global_closest) {
int next = blockIdx.x * blockDim.x + threadIdx.x;
int source = global_closest[0];
data_t edge = graph[source * VERTICES + next];
data_t new_dist = node_dist[source] + edge;
if ((edge != 0) &&
(visited_node[next] != 1) &&
(new_dist < node_dist[next])) {
node_dist[next] = new_dist;
}
}
/********FUNCTIONS*********/
/* Initialize elements of a 1D int array with an initial value */
void setIntArrayValue(int* in_array, int array_size, int init_value) {
int i;
for (i = 0; i < array_size; i++) {
in_array[i] = init_value;
}
}
/* Initialize elements of a 1D data_t array with an initial value */
void setDataArrayValue(data_t* in_array, int array_size, data_t init_value) {
int i;
for (i = 0; i < array_size; i++) {
in_array[i] = init_value;
}
}
/* Construct graph with no edges or weights */
void initializeGraphZero(data_t* graph, int num_vertices) {
int i, j;
for (i = 0; i < num_vertices; i++) {
for (j = 0; j < num_vertices; j++) { //weight of all edges initialized to 0
graph[i * num_vertices + j] = (data_t)0;
}
}
}
/* Construct a fully connected, undirected graph with non-negative edges and a minimum degree for vertices. */
void constructGraphEdge(data_t* graph, int num_vertices) {
int i, j;
data_t weight;
//initialize a connected graph
printf("Initializing a connected graph...");
for (i = 0; i < num_vertices; i++) {
for (j = i; j < num_vertices; j++) {
weight = rand() % 1000;
graph[i * num_vertices + j] = weight;
graph[j * num_vertices + i] = weight;
}
}
printf("done!\n");
}
|
2,169
|
#include "includes.h"
__global__ void transposeUnroll8Row(float *out, float *in, const int nx, const int ny)
{
unsigned int ix = blockDim.x * blockIdx.x * 8 + threadIdx.x;
unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int ti = iy * nx + ix; // access in rows
unsigned int to = ix * ny + iy; // access in columns
if (ix + 7 * blockDim.x < nx && iy < ny)
{
out[to] = in[ti];
out[to + ny * blockDim.x] = in[ti + blockDim.x];
out[to + ny * 2 * blockDim.x] = in[ti + 2 * blockDim.x];
out[to + ny * 3 * blockDim.x] = in[ti + 3 * blockDim.x];
out[to + ny * 4 * blockDim.x] = in[ti + 4 * blockDim.x];
out[to + ny * 5 * blockDim.x] = in[ti + 5 * blockDim.x];
out[to + ny * 6 * blockDim.x] = in[ti + 6 * blockDim.x];
out[to + ny * 7 * blockDim.x] = in[ti + 7 * blockDim.x];
}
}
|
2,170
|
#include<stdio.h>
#include<stdlib.h>
#include<cuda_runtime.h>
#include<iostream>
#include <iostream>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#include <iomanip>
#define n 1000
__global__ void Matrix_Product (double *A, double *g, double *C)
// Each thread computes one element of C
// by accumulating results into Cvalue
{ double Cvalue = 0.00;
int row = blockIdx.y*blockDim.y+threadIdx.y;
// int col = blockIdx.x * blockDim.x + threadIdx.x;
//size of matrix A//
int N=1000;
if(row> N ) return;
for (int e = 0; e < N; e++)
{
Cvalue += A[N*row+e]*g[e];
}
C[row]+= Cvalue;
}
using namespace std;
int main(){
double a[n*n],x[n],c[n],temp=0,d=2;
srand(time(NULL));
for(long int i=0;i<n*n;i++)
{
a[i]=2*i*314.9568298+100;
//cin>>a[i][j]; //generating the matrix a[n][n]
//cout<<" "<<a[i][j]<<endl;
}
//
for(int i=0;i<n;i++)
{
x[i]=0.5;
}
x[n-1]=1;
cudaEvent_t start,stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
double *dev_a, *dev_x, *dev_c;
dim3 griddim(100,1);
dim3 blockdim(10,1);
cudaMalloc( (void**)&dev_a, n *n* sizeof(double) );
cudaMalloc( (void**)&dev_c, n * sizeof(double) );
cudaMalloc( (void**)&dev_x, n * sizeof(double) );
cudaMemcpy( dev_a,a,n * n * sizeof(double),cudaMemcpyHostToDevice );
while(fabs(d-temp)>0.0000000000001)
{
for(int i=0;i<n;i++)
{
c[i]=0;
}
// for(int j=0;j<n;j++) //portion to be parallelized
// {
// c[i]+=a[i][j]*x[j];
// }
// cudaMalloc( (void**)&dev_c, n * sizeof(double) );
// cudaMalloc( (void**)&dev_x, n * sizeof(double) );
// cudaMalloc( (void**)&dev_a, n *n* sizeof(double) );
//cudaMemcpy( dev_a,a,n * n * sizeof(double),cudaMemcpyHostToDevice );
cudaMemcpy( dev_x,x,n * sizeof(double),cudaMemcpyHostToDevice );
cudaMemcpy( dev_c,c,n * sizeof(double),cudaMemcpyHostToDevice );
Matrix_Product<<<griddim, blockdim>>>( dev_a, dev_x, dev_c );
cudaMemcpy( c,dev_c,n * sizeof(double),cudaMemcpyDeviceToHost );
// cudaFree( dev_a );
// cudaFree( dev_x );
// cudaFree( dev_c );
for(int i=0;i<n;i++)
{
x[i]=c[i];
}
temp=d;
d=0;
for(int i=0;i<n;i++)
{
if(fabs(x[i])>fabs(d))
d=x[i];
}
for(int i=0;i<n;i++){
x[i]/=d;
}
}
// cudaMemcpy( c,dev_c,n * sizeof(double),cudaMemcpyDeviceToHost );
cudaFree( dev_a );
cudaFree( dev_x );
cudaFree( dev_c );
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
cout<<"\n\nElapsed Time = "<<elapsedTime<<" ms";
//cout<<d<<endl;
//for(int i=0;i<n;i++){
// cout<<setprecision(30)<<d<<endl;
//}
//cout<<"Enter the initial guess for eigen vector";
//for(int i=0;i<n;i++){
// cout<<x[i]<<endl;
//}
//}
return 0;
}
|
2,171
|
/*
* Solve-1 by SnipGhost 22.03.2017
*/
#include <stdio.h>
#include <stdlib.h>
#define INCREMENT 5
#define BLOCK_SIZE 8
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
__global__ void kernel_simulate(float *data, float *buff, const int size, const float Hx, const float Ht)
{
unsigned tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0) {
buff[tid] = 0;
return;
} else if (tid == size-1) {
buff[tid] += INCREMENT;
return;
}
buff[tid] += ((data[tid+1] - 2*data[tid] + data[tid-1]) * Ht) / (Hx * Hx);
}
void initialize(float *d, int size, float init = 0)
{
for (int i = 0; i < size; ++i)
d[i] = init;
}
void save_data(FILE *f, float *d, int size, int z)
{
if (!f)
printf("File output error\n");
else
for (int i = 0; i < size; ++i)
fprintf(f, "%4d %4d %20.4f\n", i, z, d[i]);
}
void print_data(float *d, int size, int z)
{
for (int i = 0; i < size; ++i)
printf("%4d %4d %10.4f\n", i, z, d[i]);
}
int main(void)
{
const int S_LENGTH = 32;
const int T_LENGTH = 16;
const float Hx = 0.5;
const float Ht = 0.05;
const int NODES = S_LENGTH / Hx;
const int TICKS = T_LENGTH / Ht;
const int numBytes = sizeof(float) * NODES;
FILE *f = fopen("out","w");
dim3 threads(BLOCK_SIZE);
dim3 blocks(NODES/BLOCK_SIZE);
float *data_dev, *buff_dev, *data_host;
data_host = (float*)malloc(numBytes);
initialize(data_host, NODES);
CUDA_CHECK_RETURN(cudaMalloc((void**)&data_dev, numBytes));
CUDA_CHECK_RETURN(cudaMalloc((void**)&buff_dev, numBytes));
CUDA_CHECK_RETURN(cudaMemcpy(data_dev, data_host, numBytes, cudaMemcpyHostToDevice));
for (int t = 0; t < TICKS; ++t)
{
kernel_simulate <<< blocks, threads >>> (data_dev, buff_dev, NODES, Hx, Ht);
CUDA_CHECK_RETURN(cudaGetLastError());
CUDA_CHECK_RETURN(cudaMemcpy(data_dev, buff_dev, numBytes, cudaMemcpyDeviceToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(data_host, buff_dev, numBytes, cudaMemcpyDeviceToHost));
save_data(f, data_host, NODES, t);
}
CUDA_CHECK_RETURN(cudaFree((void*)data_dev));
CUDA_CHECK_RETURN(cudaFree((void*)buff_dev));
CUDA_CHECK_RETURN(cudaDeviceReset());
free(data_host);
fclose(f);
return 0;
}
|
2,172
|
//============================================================
// File: im_cuda.cu
// Author: John Gauch
// Date: Summer 2010
//============================================================
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#define PIXEL(x,y) ( (((y)+ydim)%ydim)*xdim + ((x)+xdim)%xdim )
// Global variables for timers
#define CUDA_TIMER
#ifdef CUDA_TIMER
cudaEvent_t cuda_start;
cudaEvent_t cuda_stop;
float cuda_time;
#endif
//============================================================
void query_cuda()
{
// Get number of CUDA devices
int devCount;
cudaGetDeviceCount(&devCount);
printf("There are %d CUDA devices.\n", devCount);
// Iterate through CUDA devices
for (int i = 0; i < devCount; ++i)
{
// Get CUDA device properties
printf("\nCUDA Device #%d\n", i);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
// Print CUDA device properties
printf("Major revision number: %d\n", (int)devProp.major);
printf("Minor revision number: %d\n", (int)devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %d\n", (int)devProp.totalGlobalMem);
printf("Total shared memory per block: %d\n", (int)devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", (int)devProp.regsPerBlock);
printf("Warp size: %d\n", (int)devProp.warpSize);
printf("Maximum memory pitch: %d\n", (int)devProp.memPitch);
printf("Maximum threads per block: %d\n", (int)devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, (int)devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, (int)devProp.maxGridSize[i]);
printf("Clock rate: %d\n", (int)devProp.clockRate);
printf("Total constant memory: %d\n", (int)devProp.totalConstMem);
printf("Texture alignment: %d\n", (int)devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", (int)devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
}
}
//============================================================
void create_timer()
{
#ifdef CUDA_TIMER
cudaEventCreate(&cuda_start);
cudaEventCreate(&cuda_stop);
#endif
}
//============================================================
void destroy_timer()
{
#ifdef CUDA_TIMER
cudaEventDestroy(cuda_start);
cudaEventDestroy(cuda_stop);
#endif
}
//============================================================
void start_timer()
{
#ifdef CUDA_TIMER
cudaEventRecord(cuda_start,0);
#endif
}
//============================================================
void end_timer(char *str)
{
#ifdef CUDA_TIMER
cudaEventRecord(cuda_stop,0);
cudaEventSynchronize(cuda_stop);
cudaEventElapsedTime(&cuda_time, cuda_start, cuda_stop);
printf("%s: %f\n", str, cuda_time/1000);
#endif
}
//============================================================
void cuda_config(int length, int &thread_cnt, int &block_cnt)
{
thread_cnt = 0;
block_cnt = 0;
cudaDeviceProp devProp;
if (cudaGetDeviceProperties(&devProp, 0) != cudaSuccess) return;
thread_cnt = devProp.maxThreadsPerBlock / 4;
block_cnt = length / thread_cnt;
if (block_cnt * thread_cnt < length) block_cnt++;
// printf("thread_cnt=%d, block_cnt=%d\n", thread_cnt, block_cnt);
}
//============================================================
__global__ void binomial_kernel(float *cu_in, float *cu_out, int xdim, int ydim)
{
// get (x,y) coordinates
int pixel = blockIdx.x*blockDim.x + threadIdx.x;
if (pixel > xdim * ydim) return;
int x = pixel % xdim;
int y = pixel / xdim;
cu_out[PIXEL(x,y)]
= (cu_in[PIXEL(x+1,y+1)]
+ cu_in[PIXEL(x+1,y)] * 2
+ cu_in[PIXEL(x+1,y-1)]
+ cu_in[PIXEL(x,y+1)] * 2
+ cu_in[PIXEL(x,y)] * 4
+ cu_in[PIXEL(x,y-1)] * 2
+ cu_in[PIXEL(x-1,y+1)]
+ cu_in[PIXEL(x-1,y)] * 2
+ cu_in[PIXEL(x-1,y-1)]) / (float)16.0;
}
//============================================================
float* binomial_cuda(float *data, int count, int xdim, int ydim)
{
// declare variables
float *cu_in;
float *cu_out;
int length = xdim*ydim;
size_t size_in = length*sizeof(float);
size_t size_out = length*sizeof(float);
int thread_cnt, block_cnt;
cuda_config(length, thread_cnt, block_cnt);
dim3 blocks(block_cnt, 1, 1);
dim3 threads(thread_cnt, 1, 1);
// allocate array on device
create_timer();
cudaMalloc((void **) &cu_in, size_in);
cudaMalloc((void **) &cu_out, size_out);
// copy data from host to device
start_timer();
cudaMemcpy(cu_in, data, size_in, cudaMemcpyHostToDevice);
end_timer("Copy memory to device");
// run kernel
start_timer();
while (count >= 2)
{
binomial_kernel <<< blocks, threads >>> (cu_in, cu_out, xdim, ydim);
binomial_kernel <<< blocks, threads >>> (cu_out, cu_in, xdim, ydim);
count -= 2;
}
if (count == 1)
binomial_kernel <<< blocks, threads >>> (cu_in, cu_out, xdim, ydim);
end_timer("Run kernel");
// copy data from device to host
start_timer();
if (count == 1)
cudaMemcpy(data, cu_out, size_out, cudaMemcpyDeviceToHost);
else
cudaMemcpy(data, cu_in, size_in, cudaMemcpyDeviceToHost);
end_timer("Copy memory from device");
// cleanup
cudaFree(cu_in);
cudaFree(cu_out);
destroy_timer();
return data;
}
//============================================================
__global__ void binomial_kernel(short *cu_in, short *cu_out, int xdim, int ydim)
{
// get (x,y) coordinates
int pixel = blockIdx.x*blockDim.x + threadIdx.x;
if (pixel > xdim * ydim) return;
int x = pixel % xdim;
int y = pixel / xdim;
cu_out[PIXEL(x,y)]
= (cu_in[PIXEL(x+1,y+1)]
+ cu_in[PIXEL(x+1,y)] * 2
+ cu_in[PIXEL(x+1,y-1)]
+ cu_in[PIXEL(x,y+1)] * 2
+ cu_in[PIXEL(x,y)] * 4
+ cu_in[PIXEL(x,y-1)] * 2
+ cu_in[PIXEL(x-1,y+1)]
+ cu_in[PIXEL(x-1,y)] * 2
+ cu_in[PIXEL(x-1,y-1)] + 8) / 16;
}
//============================================================
short* binomial_cuda(short *data, int count, int xdim, int ydim)
{
// declare variables
short *cu_in;
short *cu_out;
int length = xdim*ydim;
size_t size_in = length*sizeof(short);
size_t size_out = length*sizeof(short);
int thread_cnt, block_cnt;
cuda_config(length, thread_cnt, block_cnt);
dim3 blocks(block_cnt, 1, 1);
dim3 threads(thread_cnt, 1, 1);
// allocate array on device
create_timer();
cudaMalloc((void **) &cu_in, size_in);
cudaMalloc((void **) &cu_out, size_out);
// copy data from host to device
start_timer();
cudaMemcpy(cu_in, data, size_in, cudaMemcpyHostToDevice);
end_timer("Copy memory to device");
// run kernel
start_timer();
while (count >= 2)
{
binomial_kernel <<< blocks, threads >>> (cu_in, cu_out, xdim, ydim);
binomial_kernel <<< blocks, threads >>> (cu_out, cu_in, xdim, ydim);
count -= 2;
}
if (count == 1)
binomial_kernel <<< blocks, threads >>> (cu_in, cu_out, xdim, ydim);
end_timer("Run kernel");
// copy data from device to host
start_timer();
if (count == 1)
cudaMemcpy(data, cu_out, size_out, cudaMemcpyDeviceToHost);
else
cudaMemcpy(data, cu_in, size_in, cudaMemcpyDeviceToHost);
end_timer("Copy memory from device");
// cleanup
cudaFree(cu_in);
cudaFree(cu_out);
destroy_timer();
return data;
}
//============================================================
__global__ void gradient_kernel(float *cu_in, float *cu_out, int xdim, int ydim)
{
// get (x,y) coordinates
int pixel = blockIdx.x*blockDim.x + threadIdx.x;
if (pixel > xdim * ydim) return;
int x = pixel % xdim;
int y = pixel / xdim;
float dx
= cu_in[PIXEL(x+1,y+1)]
+ cu_in[PIXEL(x+1,y)] * 2
+ cu_in[PIXEL(x+1,y-1)]
- cu_in[PIXEL(x-1,y+1)]
- cu_in[PIXEL(x-1,y)] * 2
- cu_in[PIXEL(x-1,y-1)];
float dy
= cu_in[PIXEL(x+1,y+1)]
+ cu_in[PIXEL(x,y+1)] * 2
+ cu_in[PIXEL(x-1,y+1)]
- cu_in[PIXEL(x+1,y-1)]
- cu_in[PIXEL(x,y-1)] * 2
- cu_in[PIXEL(x-1,y-1)];
if (dx < 0) dx = -dx;
if (dy < 0) dy = -dy;
cu_out[PIXEL(x,y)] = (dx+dy) / (float)8.0;
}
//============================================================
float* gradient_cuda(float *data, int xdim, int ydim)
{
// declare variables
float *cu_in;
float *cu_out;
int length = xdim*ydim;
size_t size_in = length*sizeof(float);
size_t size_out = length*sizeof(float);
int thread_cnt, block_cnt;
cuda_config(length, thread_cnt, block_cnt);
dim3 blocks(block_cnt, 1, 1);
dim3 threads(thread_cnt, 1, 1);
// allocate array on device
create_timer();
cudaMalloc((void **) &cu_in, size_in);
cudaMalloc((void **) &cu_out, size_out);
// copy data from host to device
start_timer();
cudaMemcpy(cu_in, data, size_in, cudaMemcpyHostToDevice);
end_timer("Copy memory to device");
// run kernel
start_timer();
gradient_kernel <<< blocks, threads >>> (cu_in, cu_out, xdim, ydim);
end_timer("Run kernel");
// copy data from device to host
start_timer();
cudaMemcpy(data, cu_out, size_out, cudaMemcpyDeviceToHost);
end_timer("Copy memory from device");
// cleanup
cudaFree(cu_in);
cudaFree(cu_out);
return data;
}
//============================================================
__global__ void gradient_kernel(short *cu_in, short *cu_out, int xdim, int ydim)
{
// get (x,y) coordinates
int pixel = blockIdx.x*blockDim.x + threadIdx.x;
if (pixel > xdim * ydim) return;
int x = pixel % xdim;
int y = pixel / xdim;
int dx
= cu_in[PIXEL(x+1,y+1)]
+ cu_in[PIXEL(x+1,y)] * 2
+ cu_in[PIXEL(x+1,y-1)]
- cu_in[PIXEL(x-1,y+1)]
- cu_in[PIXEL(x-1,y)] * 2
- cu_in[PIXEL(x-1,y-1)];
int dy
= cu_in[PIXEL(x+1,y+1)]
+ cu_in[PIXEL(x,y+1)] * 2
+ cu_in[PIXEL(x-1,y+1)]
- cu_in[PIXEL(x+1,y-1)]
- cu_in[PIXEL(x,y-1)] * 2
- cu_in[PIXEL(x-1,y-1)];
if (dx < 0) dx = -dx;
if (dy < 0) dy = -dy;
cu_out[PIXEL(x,y)] = (dx+dy) / 8;
}
//============================================================
short* gradient_cuda(short *data, int xdim, int ydim)
{
// declare variables
short *cu_in;
short *cu_out;
int length = xdim*ydim;
size_t size_in = length*sizeof(short);
size_t size_out = length*sizeof(short);
int thread_cnt, block_cnt;
cuda_config(length, thread_cnt, block_cnt);
dim3 blocks(block_cnt, 1, 1);
dim3 threads(thread_cnt, 1, 1);
// allocate array on device
create_timer();
cudaMalloc((void **) &cu_in, size_in);
cudaMalloc((void **) &cu_out, size_out);
// copy data from host to device
start_timer();
cudaMemcpy(cu_in, data, size_in, cudaMemcpyHostToDevice);
end_timer("Copy memory to device");
// run kernel
start_timer();
gradient_kernel <<< blocks, threads >>> (cu_in, cu_out, xdim, ydim);
end_timer("Run kernel");
// copy data from device to host
start_timer();
end_timer("Copy memory from device");
cudaMemcpy(data, cu_out, size_out, cudaMemcpyDeviceToHost);
// cleanup
cudaFree(cu_in);
cudaFree(cu_out);
return data;
}
|
2,173
|
#include "../include/obtainDeviceInfo.cuh"
#include <iostream>
void getInfo(){
/*
purpose : This function prints out information about the available CUDA devices on
the system
*/
int count;
cudaDeviceProp prop;
cudaGetDeviceCount(&count);
std::cout << "This program has identified " << count << " Nvidia GPUs" << std::endl;
cudaGetDeviceProperties(&prop, 0);
std::cout << "The first device is called " << prop.name << std::endl;
}
void obtainDeviceInfo(){
/*
purpose : Acts as a wrapper for getInfo so that this function can be called
from a regular C++ file
*/
getInfo();
}
|
2,174
|
#include <iostream>
#include "cuda_runtime.h"
#include <cufft.h>
#include <stdio.h>
const double PI = 3.141592653589793238460;
int main() {
int N = 10;
size_t memorySize = N * sizeof(cufftComplex);
cufftComplex* A = (cufftComplex*) malloc(memorySize);
cufftComplex* B = (cufftComplex*)malloc(memorySize);
for (int i = 0; i < N; i++) {
A[i].x = (float) sin(2 * PI * i / N);
A[i].y = 0.0;
}
cufftComplex* d_A;
cufftComplex* d_B;
cudaMalloc(&d_A, memorySize);
cudaMalloc(&d_B, memorySize);
cudaMemcpy(d_A, A, memorySize, cudaMemcpyHostToDevice);
cufftHandle plan;
cufftPlan1d(&plan, N, CUFFT_C2C, 1);
cufftExecC2C(plan, d_A, d_B, CUFFT_FORWARD);
cudaMemcpy(B, d_B, memorySize, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
printf("%f %f %f %f\n", A[i].x, A[i].y, B[i].x, B[i].y);
}
return 0;
}
|
2,175
|
#include "includes.h"
__global__ void horspool_match (char* text, char* pattern, int* shift_table, unsigned int* num_matches, int chunk_size, int num_chunks, int text_size, int pat_len , unsigned int* d_output) {
extern __shared__ int s[];
int count = 0;
int myId = threadIdx.x + blockDim.x * blockIdx.x;
if(myId > num_chunks){ //if thread is an invalid thread
return;
}
int text_length = (chunk_size * myId) + chunk_size + pat_len - 1;
// don't need to check first pattern_length - 1 characters
int i = (myId*chunk_size) + pat_len - 1;
int k = 0;
while(i < text_length) {
// reset matched character count
k = 0;
if (i >= text_size) {
// break out if i tries to step past text length
break;
}
while(k <= pat_len - 1 && pattern[pat_len - 1 - k] == text[i - k]) {
// increment matched character count
k++;
}
if(k == pat_len) {
// increment pattern count, text index
++count;
++i;
} else {
i = i + shift_table[text[i]];
}
}
// atomicAdd(num_matches, count);
s[threadIdx.x] = count;
__syncthreads();
// Add count to total matches atomically
if (threadIdx.x == 0 ){
int sum = 0;
for(int idx =0; idx < NUM_THREADS_PER_BLOCK; idx++){
sum += s[idx];
}
d_output[blockIdx.x] = sum;
}
}
|
2,176
|
#include<stdio.h>
#include<cuda.h>
#include<stdlib.h>
__global__ void hello(int* d_in){
d_in[threadIdx.x] = (blockIdx.x + threadIdx.x) * (blockIdx.x * threadIdx.x);
printf("%d : %d\n", threadIdx.x, d_in[threadIdx.x]);
}
int main(){
int* h_array = (int*)malloc(1200*1024 * sizeof(int));
int* d_array;
cudaMalloc((void**) &d_array, 1200*1024 * sizeof(int));
/*for (int i = 0; i < 300; i++){
h_array[i] = i;
}*/
cudaMemcpy(d_array, h_array, 1200*1024*sizeof(int), cudaMemcpyHostToDevice);
hello<<<1200,1024>>>(d_array);
cudaMemcpy(h_array, d_array, sizeof(int) * 1200*1024, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
free(h_array);
cudaFree(d_array);
return 0;
}
|
2,177
|
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
// You can use any other block size you wish.
#define BLK_SIZE 256
#define DEFAULT_NUM_ELEMENTS 16777216
#define MAX_RAND 2
// reduce phase, reduction
template <typename T>
__global__ void reduce(T *data, int num_elements, int last_tid, int offset)
{
volatile __shared__ T temp[BLK_SIZE];
// load data into shared memory
int uid = blockIdx.y*blockDim.x*gridDim.x + blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
int i1 = offset*(2*uid+1)-1;
int i2 = i1 + offset;
if (uid < num_elements) { // prevent out of range in last iteration
temp[tid] = data[i1] + data[i2]; // one reduction on loading
}
__syncthreads();
int n, d;
for (d = 1, n = BLK_SIZE>>1; d < BLK_SIZE; d <<= 1, n >>= 1) {
if (tid < n) {
temp[d*(2*tid+2)-1] += temp[d*(2*tid+1)-1];
}
__syncthreads();
}
// write updated entry back to global memory
if (uid < num_elements) {
data[i2] = temp[tid];
}
if (tid == last_tid) { // clear last element
data[i2] = 0;
}
}
// down-speed phase
template <typename T>
__global__ void down_sweep(T *data, int global_d, int num_elements, int offset)
{
volatile __shared__ T temp[2*BLK_SIZE];
// load data into shared memory
int uid = blockIdx.y*blockDim.x*gridDim.x + blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
int i1 = offset*(2*uid+1)-1;
int i2 = i1 + offset;
if (i2 < num_elements) {
temp[2*tid] = data[i1];
temp[2*tid+1] = data[i2];
}
__syncthreads();
int d, n;
if (BLK_SIZE < global_d)
d = BLK_SIZE;
else
d = global_d;
for (n = BLK_SIZE/d; d > 0; d >>= 1, n <<= 1) {
if (tid < n) {
int a1 = d*(2*tid+1)-1;
int a2 = a1 + d;
T t = temp[a1];
temp[a1] = temp[a2];
temp[a2] += t;
}
__syncthreads();
}
// write updated entry back to global memory
if (i2 < num_elements) {
data[i1] = temp[2*tid];
data[i2] = temp[2*tid+1];
}
}
// kernel launch
template <typename T>
void prescanArray(T *Array, int numElements)
{
int num_blocks;
int offset;
int n;
// reduce phase
for (offset = 1, n = numElements/2; offset < numElements; offset*=(2*BLK_SIZE), n/=(2*BLK_SIZE)) {
num_blocks = (n + BLK_SIZE - 1) / (BLK_SIZE);
int num_x = num_blocks, num_y = 1;
if (num_blocks > 32768) {
num_x = 32768;
num_y = num_blocks / 32768;
}
dim3 grid_size(num_x, num_y);
reduce<<<grid_size, BLK_SIZE>>>(Array, n, n-1, offset);
}
// down-sweep phase
num_blocks = 1;
n = numElements/2;
for (offset = numElements/2; ; offset /= BLK_SIZE*2) {
int finish_offset = offset / BLK_SIZE;
if (finish_offset == 0) finish_offset = 1; // last iteration
if (num_blocks * BLK_SIZE > numElements)
num_blocks = (numElements+BLK_SIZE-1)/BLK_SIZE;
int num_x = num_blocks, num_y = 1;
if (num_blocks > 32768) {
num_x = 32768;
num_y = num_blocks / 32768;
}
dim3 grid_size(num_x, num_y);
down_sweep<<<grid_size, BLK_SIZE>>>(Array, n, numElements, finish_offset);
num_blocks *= BLK_SIZE*2;
n /= BLK_SIZE*2;
if (finish_offset == 1) break;
}
}
// declaration, forward
void runTest(int argc, char** argv);
template<typename T>
void computeGold(T* reference, T* idata, const unsigned int len);
template<typename T>
int compare(const T* reference, const T* data, const unsigned int len);
// Program main
int main(int argc, char** argv)
{
// cudaSetDevice(1);
runTest(argc, argv);
return EXIT_SUCCESS;
}
//! Run a scan test for CUDA
void runTest(int argc, char** argv)
{
float time, time1, time2;
cudaEvent_t start1,start2,start3,stop1,stop2,stop3;
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cudaEventCreate(&start2);
cudaEventCreate(&stop2);
cudaEventCreate(&start3);
cudaEventCreate(&stop3);
int num_elements = 0;
if (argc == 1) {
num_elements = DEFAULT_NUM_ELEMENTS;
} else if (argc == 2) {
num_elements = atoi(argv[1]);
} else {
fprintf(stderr, "Usage: %s <# Elements>\n", argv[0]);
exit(1);
}
// allocate host memory
size_t mem_size = sizeof(double) * num_elements;
double *h_data,*host_data;
cudaMallocHost(&h_data, mem_size); // use pinned memory
cudaMallocHost(&host_data, mem_size); // use pinned memory
// initialize the input data
for(unsigned int i = 0; i < num_elements; ++i)
{
h_data[i] = (int)(rand() % MAX_RAND);
host_data[i] = h_data[i];
}
//CPU Timing Starts
cudaEventRecord(start3, NULL);
// compute reference solution
double* reference = (double*) malloc(mem_size);
printf("Processing %d elements...\n", num_elements);
computeGold(reference, h_data, num_elements);
cudaEventRecord(stop3, NULL);
cudaEventSynchronize(stop3);
cudaEventElapsedTime(&time2, start3, stop3);
//CPU Timing Ends
printf("\n CPU time is: %f", time2);
// padding
int N = 0;
while (num_elements > ( (unsigned int)1 << N ) ) {
N++;
}
int d_num_elements = (unsigned int)1 << N;
size_t d_mem_size = sizeof(double) * d_num_elements;
printf("Padded to %d elements for GPU\n", d_num_elements);
// allocate device memory
double* d_data = NULL;
if (cudaMalloc((void**) &d_data, d_mem_size) != cudaSuccess)
{
fprintf(stderr, "cudaMalloc fails. Exit ...\n");
exit(1);
}
// Timing starts
cudaEventRecord(start1, NULL);
// Run once to remove startup overhead for more accurate performance measurement
prescanArray(d_data, 16);
// copy data
cudaMemcpy(d_data, h_data, mem_size, cudaMemcpyHostToDevice);
// launch kernel
prescanArray(d_data, d_num_elements);
// copy result back, only copy un-padded part back
cudaMemcpy(h_data, d_data, mem_size, cudaMemcpyDeviceToHost);
cudaEventRecord(stop1, NULL);
cudaEventSynchronize(stop1);
cudaEventElapsedTime(&time1, start1, stop1);
//Timing Ends
printf("\n GPU time is: %f", time1);
// Check correctness
int result_regtest = compare(reference, h_data, num_elements);
if (result_regtest == 0)
fprintf(stdout, "Test PASSED\n");
else
fprintf(stdout, "Test Failed: %d errors\n", result_regtest);
//Thrust code
thrust::host_vector<double> h_vec(host_data, host_data+num_elements);
// thrust::generate(data.begin(), data.end(), rand);
// thrust::exclusive_scan(data.begin(), data.end(), h);
cudaEventRecord(start2, NULL);
//copy host vector to device
thrust::device_vector<double> d_vec = h_vec;
thrust::exclusive_scan(d_vec.begin(), d_vec.end(), d_vec.begin());
//copy back to host
thrust::copy(d_vec.begin(), d_vec.end(),host_data);
cudaEventRecord(stop2, NULL);
cudaEventSynchronize(stop2);
cudaEventElapsedTime(&time, start2, stop2);
printf("\n GPU Thrust Time is : %f", time);
// Check correctness
int result_thrust = compare(reference, host_data, num_elements);
if (result_thrust == 0)
fprintf(stdout, "Test PASSED\n");
else
fprintf(stdout, "Test Failed: %d errors\n", result_thrust);
cudaFreeHost(h_data);
free(reference);
cudaFree(d_data);
}
template <typename T>
void computeGold( T* reference, T* idata, const unsigned int len)
{
reference[0] = 0;
T total_sum = 0;
for(unsigned int i = 1; i < len; ++i)
{
total_sum += idata[i-1];
reference[i] = idata[i-1] + reference[i-1];
}
if (total_sum != reference[len-1])
fprintf(stderr, "Warning: exceeding single-precision accuracy. Scan will be inaccurate.\n");
}
template <typename T>
int compare(const T* reference, const T* data, const unsigned int len)
{
int num_errors = 0;
T eps = (T)0.0001;
for (int i = 0; i < len; i++) {
// relax eps as len grows
eps = 0.0001 * log(len) / log(2);
T error = reference[i] - data[i];
if (abs(error) > eps) {
num_errors++;
}
}
return num_errors;
}
|
2,178
|
#include <stdio.h>
#include <math.h>
void printMatrix(const int *A, int rows, int cols) {
for(int i = 0; i < rows*cols*4; i++){
printf("%d ", A[i]);
printf(" ");
if ((i+1)%9 == 0){
printf("|");
}
}
printf("\n");
};
void readInput_soa(const char *filename, int **Soa, int *rows, int *cols) {
FILE *file;
file = fopen(filename, "r");
fscanf(file, "%d %d", rows, cols);
int * A_F0 = (int *) malloc(*rows * (*cols)* (4) * sizeof(int));
for(int i = 0; i < *rows*(*cols)*(4); i++) {
fscanf(file, "%d ", &A_F0[i]);
}
fclose(file);
*Soa = A_F0;
};
void readInput_aos(const char *filename, int **Aos, int *rows, int *cols) {
FILE *file;
file = fopen(filename, "r");
fscanf(file, "%d %d", rows, cols);
int * A_F1 = (int *) malloc(*rows * (*cols)* (4) * sizeof(int));
for(int j = 0; j < 4; j++) {
int counter = 0;
for(int i = 0; i < *cols*(*rows); i++){
fscanf(file, "%d ", &A_F1[counter +j]);
counter = counter + 4;
}
}
fclose(file);
*Aos = A_F1;
}
__global__ void step_periodic_Soa(int * array,int rows, int cols){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if (tId < rows*cols){
int x = tId%(cols);
int y = (int) tId/rows;
//Colission
if (array[tId] == 1 && array[tId + 2*rows*cols] == 1){
if(array[tId + rows*cols] == 0 && array[tId+rows*cols*3] == 0){
array[tId] = 0;
array[tId + 2*rows*cols] = 0;
array[tId + rows*cols] = 1;
array[tId+rows*cols*3] = 1;
}
}
if (array[tId + rows*cols] == 1 && array[tId+rows*cols*3] == 1){
if(array[tId] == 0 && array[tId + 2*rows*cols] == 0){
array[tId + rows*cols] = 0;
array[tId+rows*cols*3] = 0;
array[tId] = 1;
array[tId + 2*rows*cols] = 1;
}
}
//streaming
int c_aux = x + 1;
if (c_aux == cols){
c_aux = 0;
}
if (array[tId] == 1){
array[(y*rows + c_aux)] = array[tId]*2;
}
//left
c_aux = x - 1;
if (c_aux < 0){
c_aux = cols -1;
}
if (array[tId+ 2*rows*cols] == 1){
array[(y*rows + c_aux) + 2*rows*cols] = array[tId+ 2*rows*cols]*2;
}
//top
c_aux = y + 1;
if (c_aux == rows){
c_aux = 0;
}
if (array[tId + rows*cols] == 1){
array[(c_aux*rows + x) + + rows*cols] = array[tId+ rows*cols]*2;
}
//bottom
c_aux = y + 1;
if (c_aux < 0){
c_aux = rows-1;
}
if (array[tId+ 3*rows*cols] == 1){
array[(c_aux*rows + x)+ 3*rows*cols] = array[tId+ 3*rows*cols]*2;
}
//Correction
if(array[tId] == 1){
array[tId] = 0;
}
if(array[tId] == 2){
array[tId] = 1;
}
if(array[tId+ rows*cols] == 1){
array[tId+ rows*cols] = 0;
}
if(array[tId+ rows*cols] == 2){
array[tId+ rows*cols] = 1;
}
if(array[tId+ 2*rows*cols] == 1){
array[tId+ 2*rows*cols] = 0;
}
if(array[tId+ 2*rows*cols] == 2){
array[tId+ 2*rows*cols] = 1;
}
if(array[tId+ 3*rows*cols] == 1){
array[tId+ 3*rows*cols] = 0;
}
if(array[tId+ 3*rows*cols] == 2){
array[tId+ 3*rows*cols] = 1;
}
}
};
//Periodic boundaries condition Array of Structures
__global__ void step_periodic_Aos(int * array,int rows, int cols){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if (tId < rows*cols){
int x = tId%(cols);
int y = (int) tId/rows;
//Colission
if (array[tId*4+0] == 1 && array[tId*4+2] == 1){
if(array[tId*4+1] == 0 && array[tId*4+3] == 0){
array[tId*4+0] = 0;
array[tId*4+2] = 0;
array[tId*4+1] = 1;
array[tId*4+3] = 1;
}
}
if (array[tId*4+1] == 1 && array[tId*4+3] == 1){
if(array[tId*4+0] == 0 && array[tId*4+2] == 0){
array[tId*4+1] = 0;
array[tId*4+3] = 0;
array[tId*4+0] = 1;
array[tId*4+2] = 1;
}
}
//streaming
//right
int c_aux = x + 1;
if (c_aux == cols){
c_aux = 0;
}
if (array[tId*4] == 1){
array[(y*rows + c_aux)*4] = array[tId*4]*2;
}
//left
c_aux = x - 1;
if (c_aux < 0){
c_aux = cols -1;
}
if (array[tId*4+2] == 1){
array[(y*rows + c_aux)*4 + 2] = array[tId*4+2]*2;
}
//top
c_aux = y + 1;
if (c_aux == rows){
c_aux = 0;
}
if (array[tId*4+1] == 1){
array[(c_aux*rows + x)*4 + 1] = array[tId*4+1]*2;
}
//bottom
c_aux = y + 1;
if (c_aux < 0){
c_aux = rows-1;
}
if (array[tId*4+3] == 1){
array[(c_aux*rows + x)*4 + 3] = array[tId*4+3]*2;
}
//Correction
for(int i = 0; i < 4; i++){
if(array[tId*4+i] == 1){
array[tId*4+i] = 0;
}
if(array[tId*4+i] == 2){
array[tId*4+i] = 1;
}
};
}
};
int main(int argc, char const *argv[])
{
int rows, cols;
int *Aos, *Soa;
int *d_Aos, *d_Soa;
readInput_aos("initial.txt", &Aos, &rows, &cols);
readInput_soa("initial.txt", &Soa, &rows, &cols);
//printMatrix(Soa,rows,cols);
int n = (int)(rows*cols);
int block_size = 256;
int grid_size = (int) ceil((float)n / block_size);
cudaMalloc(&d_Aos, 4 * rows * cols * sizeof(int));
cudaMalloc(&d_Soa, 4 * rows * cols * sizeof(int));
cudaMemcpy(d_Aos, Aos, 4 * rows * cols * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_Soa, Soa, 4 * rows * cols * sizeof(int), cudaMemcpyHostToDevice);
for(int k = 0; k < 1000; k++){
//step_periodic_Aos<<<grid_size, block_size>>>(d_Aos, rows, cols);
step_periodic_Soa<<<grid_size, block_size>>>(d_Soa, rows, cols);
}
cudaMemcpy(Aos, d_Aos, 4 * rows * cols * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(Soa, d_Soa, 4 * rows * cols * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_Aos);
cudaFree(d_Soa);
//printf("----- \n");
//printMatrix(Soa,rows,cols);
//printMatrix(Aos,rows,cols);
//printf("----- \n");
//printMatrix(Soa,rows,cols);
return 0;
}
|
2,179
|
#include <cstdio>
#include <ostream>
#include <sstream>
#include <iostream>
int main(){
cudaDeviceProp prop{};
//query device
cudaGetDeviceProperties(&prop,0);
//store device parameters as variables
std::string name = prop.name;
double clockRate = prop.clockRate;
double globalMem = prop.totalGlobalMem;
double memFreq = prop.memoryClockRate;
double maxThreads = prop.maxThreadsPerBlock;
double maxBlocks = prop.maxBlocksPerMultiProcessor;
//print results
std::cout
<< "Name: "<< name << std::endl
<< "Clock Rate: "<< clockRate << std::endl
<< "Memory Available: "<< globalMem << std::endl
<< "Memory Frequency: "<< memFreq << std::endl
<< "Max Threads: "<< maxThreads << std::endl
<< "Max Blocks: "<< maxBlocks << std::endl;
printf("done");
}
|
2,180
|
#include "includes.h"
//Udacity HW 4
//Radix Sorting
__global__ void scatter(unsigned int *in,unsigned int *in_pos, unsigned int *out, unsigned int *out_pos, unsigned int n, unsigned int *d_histScan, unsigned int mask, unsigned int current_bits, unsigned int nBins)
{
if (threadIdx.x == 0)
{
unsigned int start = blockIdx.x*blockDim.x;
for (int i = start; i < min(n, start + blockDim.x) ; i++)
{
unsigned int bin = (in[i] >> current_bits) & mask;
out[d_histScan[blockIdx.x + bin*gridDim.x]] = in[i];
out_pos[d_histScan[blockIdx.x + bin*gridDim.x]] = in_pos[i];
d_histScan[blockIdx.x + bin*gridDim.x]++;
}
}
}
|
2,181
|
#include <stdio.h>
#include <stdlib.h>
#define BLOCKS 4
#define THREADSPERBLOCK 4
#define VECSIZE 16
__global__ void vector_add(int *a, int *b, int *c) {
int i = (blockIdx.x * THREADSPERBLOCK) + threadIdx.x;
c[i] = a[i] + b[i];
}
void random_ints(int* a, int n) {
int i;
for (i = 0; i < n; ++i) {
a[i] = rand() % n;
}
}
void show_vector(int* vec, int size) {
for(int i = 0; i < size; i++) {
printf("%d\t", vec[i]);
}
printf("\n");
}
int main(void) {
int vector1[VECSIZE];
int vector2[VECSIZE];
int vector_out[VECSIZE];
int *gpu_vector1, *gpu_vector2, *gpu_vector_out;
int size = sizeof(int) * VECSIZE;
// Allocate space for device copies of a, b, c
cudaMalloc((void **) &gpu_vector1, size);
cudaMalloc((void **) &gpu_vector2, size);
cudaMalloc((void **) &gpu_vector_out, size);
// Populate the input vectors with random integers
random_ints(vector1, VECSIZE);
random_ints(vector2, VECSIZE);
// Copy inputs to device
cudaMemcpy(gpu_vector1, &vector1, size, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_vector2, &vector2, size, cudaMemcpyHostToDevice);
vector_add<<<BLOCKS, THREADSPERBLOCK>>> (gpu_vector1, gpu_vector2, gpu_vector_out);
// Copy result back to host
cudaMemcpy(&vector_out, gpu_vector_out, size, cudaMemcpyDeviceToHost);
// Cleanup
cudaFree(gpu_vector1);
cudaFree(gpu_vector2);
cudaFree(gpu_vector_out);
show_vector(vector1, VECSIZE);
show_vector(vector2, VECSIZE);
printf("\n");
show_vector(vector_out, VECSIZE);
printf("\n");
return 0;
}
|
2,182
|
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <curand_kernel.h>
#include <iostream>
#include <iomanip>
#include <numeric>
#include <time.h>
__global__ void mc_pi(float *d, int seed, int n_try){
int index = blockIdx.x * blockDim.x + threadIdx.x;
curandState s;
curand_init(seed, index, 0, &s);
int inside = 0;
for(int i=0; i<n_try; i++){
float x = curand_uniform(&s);
float y = curand_uniform(&s);
if(x*x + y*y < 1){
inside++;
}
}
d[index] = inside / (float)n_try;
}
int main(void){
const int THREADS = 512;
std::cout << "input N: ";
int N;
std::cin >> N;
std::cout << "input n_try: ";
int n_try;
std::cin >> n_try;
thrust::device_vector<float> D(N);
float* d = thrust::raw_pointer_cast(D.data());
mc_pi<<<(N/THREADS)+1, THREADS>>>(d, time(NULL), n_try);
thrust::host_vector<float> H = D;
std::cout << "PI: ";
std::cout << std::setprecision(16);
std::cout << 4.0 * std::accumulate(H.begin(), H.end(), 0.0)/N << std::endl;
return 0;
}
|
2,183
|
#include <iostream>
#include <stdio.h>
#include <time.h>
#include <cuda.h>
using namespace std;
struct Address
{
int numa;
int numb;
};
__global__ void pairhmm( Address * address, int * result_d)
{
clock_t start_time=clock();
int warp_index=threadIdx.x/32;
int numa=address[warp_index]. numa;
int numb=address[warp_index]. numb;
int result=0;
int round=0;
for(round=0;round<2;round++)
{
for(int i=0;i<numa;i++)
{
if(threadIdx.x%32==0) printf("round=%d warp %d numa=%d i=%d \n",round, warp_index, numa,i);
for(int j=0;j<numb;j++)
{
if(threadIdx.x%32==0) printf("warp %d numb=%d j=%d \n", warp_index, numb,j);
result+=i+j*2;
}
}
if(threadIdx.x%32==0) printf("round=%d warp %d endendend \n",round, warp_index);
result_d[threadIdx.x]=result;
}
clock_t finish_time=clock();
int time=(int)( finish_time-start_time);
if(threadIdx.x%32==0) printf("%d\n", time);
}
int main()
{
Address * address;
address=(Address *)malloc(sizeof(Address)* 4);
address[0].numa=2;
address[0].numb=2;
address[1].numa=4;
address[1].numb=4;
address[2].numa=6;
address[2].numb=6;
address[3].numa=8;
address[3].numb=8;
Address * address_d;
cudaMalloc( (Address **)&address_d,sizeof(int) *100 );
cudaMemcpy(address_d,address,4*sizeof(Address), cudaMemcpyHostToDevice);
int blocksize=64;
int gridsize=1;
int *result_h;
int *result_d;
result_h=(int *) malloc( sizeof(int)* 128);
cudaMalloc( (int **)&result_d,sizeof(int) *128);
pairhmm<<<gridsize,blocksize>>>(address_d,result_d);
cudaMemcpy(result_h,result_d,128*sizeof(int), cudaMemcpyDeviceToHost);
// for(int i=0;i<128;i++)
// printf("index= %d %d\n", i, result_h[i]);
cudaDeviceSynchronize();
return 0;
}
|
2,184
|
#include "includes.h"
__global__ void set_stretch_kernel(int samps, float mean, float *d_input) {
int t = blockIdx.x * blockDim.x + threadIdx.x;
if (t >= 0 && t < samps)
d_input[t] = mean;
}
|
2,185
|
////////////////////////////////////////////////////////////////////////////
// Calculate scalar products of VectorN vectors of ElementN elements on CPU.
// Straight accumulation in double precision.
////////////////////////////////////////////////////////////////////////////
#include <iostream>
#include <cmath>
using namespace std;
void Kernel_1_CPU(int *A, int *B, int& Max_CPU, int LA, int LB, int sim, int dissim, int Gop, int Gex)
{
int i,j,S1,H1,EF, H_Max ;
int *F, *E, *H;
F = new int [LA];
E = new int [LA+1];
H = new int [(LA+1)*2];
Max_CPU = 0;
for (j=0; j<LA; j++){
F[j]=0;
E[j]=0;
}
E[LA+1]=0;
for (j=0; j<(2*(LA+1)); j++)
H[j]=0;
for (i=0; i<LB; i++)
{
for (j=0; j<LA; j++)
{
if (A[j]==B[i])
S1=H[j]+sim;
else
S1=H[j]+dissim;
H1=max(S1,0);
F[j]=max(F[j]-Gex,H[j+1]-Gop);
E[j+1]=max(E[j]-Gex,H[(LA+1)+j]-Gop);
EF=max(F[j],E[j+1]);
H[(LA+1)+j+1]=max(EF,H1);
}
for (j=1; j<(LA+1); j++)
{
H[j]=H[(LA+1)+j];
H_Max = H[j];
if ((H_Max>1) && (H_Max>Max_CPU))
{
Max_CPU=H_Max;
}
}
}
}
/* printf("No. Sim. Val. Seq. A Seq. B \n");
printf("-------------------------------- \n");
for( i = 1; i < (K1_Max_Report+1); ++i)
{
printf(" %i %i %i %i \n", i, Max_CPU[K1_Max_Report-i],int(fmod(1.0*End_Point[K1_Max_Report-i],(LA+1))), int(1.0*(End_Point[K1_Max_Report-i])/(LA+1)));
} */
// cout<<A[j]<<"-----"<<B[i]<<"-----"<<F[j]<<"------"<<E[j]<<endl;
|
2,186
|
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <utility>
#include <iostream>
#include <bitset>
#include <math.h>
#include <time.h>
#include <chrono>
#include <cuda.h>
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*
* See cuda.h for error code descriptions.
*/
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 1024
#endif
#ifndef ELAPSED_TIME
#define ELAPSED_TIME 0
#endif
#ifndef EXP_BITS_SIZE
#define EXP_BITS_SIZE 10
#endif
void print(int* h_data, int n) {
std::cout << "\n";
for (int i = 0; i < n; i++) {
std::cout << h_data[i] << " ";
}
std::cout << "\n";
}
void cudaTest(cudaError_t error) {
if (error != cudaSuccess) {
printf("cuda returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
}
/*
* 256 threads per block
* 4 elements per thread
* = 1024 elements per block
* = n/1024 blocks
*/
template<int BITS_NUMBER = 64>
__global__ void radix_sort(int *d_vec, int *d_seg, int num_segments) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int begin = d_seg[bx];
int end = d_seg[bx + 1];
int size = end - begin;
__shared__ int s_vec[BLOCK_SIZE];
__shared__ int s_aux[BLOCK_SIZE];
__shared__ int s_pref_sum_one[BLOCK_SIZE];
__shared__ int s_pref_sum_zero[BLOCK_SIZE];
for (int k = 0; k < size; k += BLOCK_SIZE) {
int threadIndexGlobal = begin + k + tx;
int block = BLOCK_SIZE;
if(BLOCK_SIZE+k > size)
block = size-k;
if (threadIndexGlobal < end) {
s_vec[tx] = d_vec[threadIndexGlobal];
__syncthreads();
int i, j;
int exp = 0;
for (j = 0; j < BITS_NUMBER; j++) {
int x = (s_vec[tx] >> exp) & 1;
s_pref_sum_one[tx] = x;
s_pref_sum_zero[tx] = 1-x;
__syncthreads();
for (i = 1; i < block; i*=2) {
int index = tx + i;
if (index < block) {
int one = s_pref_sum_one[tx] + s_pref_sum_one[index];
int zero = s_pref_sum_zero[tx] + s_pref_sum_zero[index];
__syncthreads();
s_pref_sum_one[index] = one;
s_pref_sum_zero[index] = zero;
__syncthreads();
}
}
x = (s_vec[tx] >> exp) & 1;
int index = (x) * (s_pref_sum_one[tx] + s_pref_sum_zero[block-1] - 1)
+ (1 - x) * (s_pref_sum_zero[tx] - 1);
s_aux[index] = s_vec[tx];
__syncthreads();
s_vec[tx] = s_aux[tx];
__syncthreads();
exp++;
}
d_vec[threadIndexGlobal] = s_aux[tx];
}
}
}
int main(int argc, char **argv) {
int num_of_segments;
int num_of_elements;
int i;
scanf("%d", &num_of_segments);
int mem_size_seg = sizeof(int) * (num_of_segments + 1);
int *h_seg = (int *) malloc(mem_size_seg);
for (i = 0; i < num_of_segments + 1; i++)
scanf("%d", &h_seg[i]);
scanf("%d", &num_of_elements);
int mem_size_vec = sizeof(int) * num_of_elements;
int *h_vec = (int *) malloc(mem_size_vec);
for (i = 0; i < num_of_elements; i++)
scanf("%d", &h_vec[i]);
// print(h_seg, num_of_segments + 1); print(h_vec, num_of_elements);
// Allocate device memory
int *d_seg, *d_vec;
cudaTest(cudaMalloc((void **) &d_seg, mem_size_seg));
cudaTest(cudaMalloc((void **) &d_vec, mem_size_vec));
//cudaTest(cudaMalloc((void **) &d_aux, mem_size_vec));
// copy host memory to device
cudaTest(cudaMemcpy(d_seg, h_seg, mem_size_seg, cudaMemcpyHostToDevice));
cudaTest(cudaMemcpy(d_vec, h_vec, mem_size_vec, cudaMemcpyHostToDevice));
// Setup execution parameters
// int devID = 0;
// cudaDeviceProp deviceProp;
// cudaTest(cudaGetDeviceProperties(&deviceProp, devID));
// unsigned int multiprocessor_number = deviceProp.multiProcessorCount;
// //unsigned int grid_blocks_max_x = deviceProp.maxGridSize[0];
// //unsigned int sharedMemoryTotal = deviceProp.sharedMemPerBlock/(sizeof(int));
//
int blocksize = BLOCK_SIZE; //num_of_elements / num_of_segments;
//if (blocksize > 1024)
// blocksize = 1024;
dim3 threads(blocksize, 1);
//dim3 grid(num_of_segments / blocksize + 1, 1);
dim3 grid(num_of_segments, 1);
std::chrono::high_resolution_clock::time_point start =
std::chrono::high_resolution_clock::now();
radix_sort<EXP_BITS_SIZE> <<<grid, threads>>>(d_vec, d_seg,
num_of_segments);
cudaDeviceSynchronize();
std::chrono::high_resolution_clock::time_point stop =
std::chrono::high_resolution_clock::now();
cudaTest(cudaPeekAtLastError());
std::chrono::duration<double> time_span = std::chrono::duration_cast<
std::chrono::duration<double>>(stop - start);
cudaTest(cudaMemcpy(h_seg, d_seg, mem_size_seg, cudaMemcpyDeviceToHost));
cudaTest(cudaMemcpy(h_vec, d_vec, mem_size_vec, cudaMemcpyDeviceToHost));
//print(h_seg, num_of_segments + 1);
//print(h_vec, num_of_elements);
//print(h_seg, 10);
//print(h_vec, 1000);
if (ELAPSED_TIME == 1)
// std::cout << "It took me " << time_span.count() * 1000
// << " miliseconds.\n";
;
else
print(h_vec, num_of_elements);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
free(h_seg);
free(h_vec);
cudaFree(d_seg);
cudaFree(d_vec);
return 0;
}
/**
* // cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
*/
/*
printf("thread=%d | aux=%d %d %d %d\n", bx * blockDim.x + tx, d_aux[begin], d_aux[begin + 1], d_aux[begin + 2], d_aux[begin + 3]);
int devID = 0;
cudaDeviceProp deviceProp;
cudaTest(cudaGetDeviceProperties(&deviceProp, devID));
unsigned int multiprocessorNumber = deviceProp.multiProcessorCount;
unsigned int sharedMemoryTotal = deviceProp.sharedMemPerBlock/(sizeof(int));
std::cout << "multiprocessorNumber: " << multiprocessorNumber << "\n";
std::cout << "sharedMemoryTotal: " << sharedMemoryTotal << "\n";
std::cout << "numberOfSegmentsPerBlock: " << sharedMemoryTotal << "\n";
*/
|
2,187
|
//#include <helper_cuda.h>
#include "kernel.cuh"
__global__ void my_first_kernel(float *x)
{
int tid = threadIdx.x + blockDim.x*blockIdx.x;
x[tid] = (float) threadIdx.x;
}
void my_first(float *x, int nblocks, int nthreads)
{
my_first_kernel<<<nblocks,nthreads>>>(x);
}
|
2,188
|
#include <stdio.h>
#include <stdlib.h>
/*
Aqui se defina la funcion que se quiere integrar
*/
double funcion(double x){
return 4/(1+x*x); // mofificar con la funcion deseada
}
__global__
void aproxIntegral( double (*f)(double), double a, double b , long n, double *result ){
double suma, tiempoInicio, tiempoEjecucion, resultado;
int i = blockDim.x * blockIdx.x + threadIdx.x;
if ( i < n ) *result+=(*f)(a+i*(b-a)/n);
}
int main(){
int N = 10;
int nblocks = (N+255) / 256;
double a;
aproxIntegral<<<nblocks, 256>>>(funcion, 0, 1, N, &a);
printf("> Result %lf", a );
return 0;
}
|
2,189
|
// Modified CUDA Add Example
// This example takes 2 float arrays of size 1M and adds them together.
// Prints out Total Runtime.
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <iomanip>
#include <chrono>
cudaError_t addWithCuda(float *c, const float *a, const float *b, unsigned int size);
__global__ void addKernel(float *c, const float *a, const float *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
//printf("ThreadIdx.x : %d " , i );
//printf("a[i] = %.2f", a[i]);
//printf("b[i] = %.2f", b[i]);
//printf("c[i] = %.2f \n", c[i]);
}
int main()
{
// Instance
const int arraySize = 1 << 20; // 1 Million elements
float *x = new float[arraySize]; // Input 1
float *y = new float[arraySize]; // Input 2
float *z = new float[arraySize]; // Output
// Initialize
for (int i = 0; i < arraySize; i++)
{
x[i] = 1.0f;
y[i] = 2.0f;
z[i] = 0.0f;
}
//const int a[arraySize] = { 1, 2, 3, 4, 5 };
//const int b[arraySize] = { 10, 20, 30, 40, 50 };
//int c[arraySize] = { 0 };
//Timing Variables
auto start = std::chrono::high_resolution_clock::now();
std::ios_base::sync_with_stdio(false);
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(z, x, y, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
// Calculating total time taken by the program.
auto end = std::chrono::high_resolution_clock::now();
double time_taken = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
time_taken *= 1e-9;
std::cout << "Time taken by program: " << std::fixed << time_taken << std::setprecision(9) << " sec. \n" << std::endl;
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
//Print arrays
printf("{x[0], x[1], x[2], x[3],x[4], ...} + {y[0], y[1], y[2], y[3], y[4], ...} = {%.2f,%.2f,%.2f,%.2f,%.2f, ...}\n",
z[0], z[1], z[2], z[3], z[4]);
printf("Complete.\n");
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(float *c, const float *a, const float *b, unsigned int size)
{
float *dev_a = 0;
float *dev_b = 0;
float *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, 256>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
2,190
|
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define N 1024*1024
#define THREADS_PER_BLOCK 512
__global__ void multiply(float *a, float *b, int n);
void random_floats(float *x, int Num);
float* CPU_big_dot(float *a, float *b, int Num);
float* GPU_big_dot(float *A, float *B, int Num);
long long start_timer();
long long stop_timer(long long start_time, char *name);
int main(void) {
float *a, *b; // host copies of a, b, c
int size = N * sizeof(float);
// Alloc space for host copies of a, b, c and setup input values
a = (float *) malloc(size);
random_floats(a, N);
b = (float *) malloc(size);
random_floats(b, N);
float *result_cpu, *result_gpu;
long long cpu_start, cpu_time, gpu_start, gpu_time;
char cpu_task_name[] = "CPU time usage";
char gpu_task_name[] = "GPU time usage";
cpu_start = start_timer();
result_cpu = CPU_big_dot(a, b, N);
cpu_time = stop_timer(cpu_start, cpu_task_name);
gpu_start = start_timer();
result_gpu = GPU_big_dot(a, b, N);
gpu_time = stop_timer(gpu_start, gpu_task_name);
printf("\ncpu result: %f\n", *result_cpu);
printf("gpu result: %f\n", *result_gpu);
float diff = *result_cpu - *result_gpu;
if (diff <= 1.0e-6)
printf("difference between 2 results is: %f < 1.0e-6 ===> correct.\n", diff);
else{
printf("difference between 2 results is: %f > 1.0e-6 ===> incorrect.\nExit Now!\n", diff);
exit(-1);
}
printf("\nCPU/GPU speedup: %f\n", 1.0 * cpu_time / gpu_time);
free(a);
free(b);
free(result_cpu);
free(result_gpu);
return 0;
}
__global__ void multiply(float *a, float *b, int n)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
a[index] = a[index] * b[index];
}
void random_floats(float *x, int Num)
{
for (int i = 0; i < Num; i++)
{
x[i] = (float)rand() / RAND_MAX;
}
}
float* CPU_big_dot(float *a, float *b, int Num)
{
float *sum;
sum = (float *) malloc(sizeof(float));
(*sum) = 0;
for (int i = 0; i < Num; i++)
{
(*sum) += a[i] * b[i];
}
return sum;
}
float* GPU_big_dot(float *A, float *B, int Num)
{
float *sum;
sum = (float *) malloc(sizeof(float));
(*sum) = 0;
float *d_A, *d_B; // device copies of A, B
int size = Num * sizeof(float);
// Allocate space for device copies of a, b, c
cudaMalloc((void **) &d_A, size);
cudaMalloc((void **) &d_B, size);
// Copy inputs to device
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU with N threads
multiply<<<(Num + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_A, d_B, Num);
// Copy result back to host
cudaMemcpy(A, d_A, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < Num; i++)
(*sum) += A[i];
// Cleanup
cudaFree(d_A);
cudaFree(d_B);
return sum;
}
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
long long stop_timer(long long start_time, char *name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
printf("%s: %.5f sec\n", name, ((float)(end_time-start_time))/(1000*1000));
return end_time - start_time;
}
|
2,191
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
#include <math.h>
#define TIMER_CREATE(t) \
cudaEvent_t t##_start, t##_end; \
cudaEventCreate(&t##_start); \
cudaEventCreate(&t##_end);
#define TIMER_START(t) \
cudaEventRecord(t##_start); \
cudaEventSynchronize(t##_start); \
#define TIMER_END(t) \
cudaEventRecord(t##_end); \
cudaEventSynchronize(t##_end); \
cudaEventElapsedTime(&t, t##_start, t##_end); \
cudaEventDestroy(t##_start); \
cudaEventDestroy(t##_end);
#define TILE_SIZE 16
#define CUDA_TIMING
unsigned char *input_gpu;
unsigned char *output_gpu;
/*******************************************************/
/* Cuda Error Function */
/*******************************************************/
inline cudaError_t checkCuda(cudaError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
exit(-1);
}
#endif
return result;
}
// GPU kernel and functions
__global__ void kernel(unsigned char *input,
unsigned char *output,
unsigned int height,
unsigned int width){
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int i,j;
int sumx,sumy;
unsigned long int sum;
int hx[9] = {1,0,-1,2,0,-2,1,0,-1};
int hy[9] = {1,2,1,0,0,0,-1,-2,-1};
if (x < width && y < height ){
sumx=0;
sumy=0;
for(j=x-1;j<x+2;j++)
for(i=y-1;i<y+2;i++)
if(j>=0 && j<width && i>=0 && i<height)
{
sumx += input[i*width+j]*hx[(y-i+1)*3+x-j+1];
sumy += input[i*width+j]*hy[(y-i+1)*3+x-j+1];
}
}
sum = sumx*sumx+sumy*sumy;
if(sum>100)
output[y*width+x] = 255;
else
output[y*width+x] = 0;
}
void transpose_img(unsigned char *in_mat,
unsigned char *out_mat,
unsigned int height,
unsigned int width){
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
// Allocate arrays in GPU memory
checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned char)));
// Copy data to GPU
checkCuda(cudaMemcpy(input_gpu,
in_mat,
height*width*sizeof(char),
cudaMemcpyHostToDevice));
checkCuda(cudaDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
#if defined(CUDA_TIMING)
float Ktime;
TIMER_CREATE(Ktime);
TIMER_START(Ktime);
#endif
// Kernel Call
kernel<<<dimGrid, dimBlock>>>(input_gpu, output_gpu, height, width);
checkCuda(cudaDeviceSynchronize());
#if defined(CUDA_TIMING)
TIMER_END(Ktime);
printf("Kernel Execution Time: %f ms\n", Ktime);
#endif
// Retrieve results from the GPU
checkCuda(cudaMemcpy(out_mat,
output_gpu,
height*width*sizeof(unsigned char),
cudaMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(cudaFree(output_gpu));
checkCuda(cudaFree(input_gpu));
}
|
2,192
|
#include <fstream>
#include <iostream>
#include <stdlib.h>
#define show(x) std::cout << #x ": " << x << std::endl;
int main()
{
const float T = 5.0f;
const float X = 1.0f;
const float c = 1.0f;
float dt = 0.010;
float dx = 0.025;
int nt = (int)(T/dt) + 1;
int nx = (int)(X/dx) + 1;
dt = T / (nt-1);
dx = X / (nx-1);
float r = c * dt / dx;
float *u = (float *)calloc(nx * nt, sizeof(float));
// fill in the two first rows
for (int j = 1; j < nx - 1; j++) {
float x = j*dx;
float y = x - 0.5;
u[j + nx] = u[j] = exp(-40 * y*y);
}
for (int i = 1; i < nt-1; i++) {
for (int j = 1; j < nx - 1; j++) {
int id = j + nx * (i + 1);
float uijp = u[j + 1 + nx * i];
float uij0 = u[j + 0 + nx * i];
float uijn = u[j - 1 + nx * i];
float uinj = u[j + nx * (i - 1)];
u[id] = r*r*uijp + 2 * (1 - r*r)*uij0 + r*r*uijn - uinj;
}
}
std::ofstream ofs("output.txt");
for (int i = 0; i < nt; i++) {
for (int j = 0; j < nx; j++)
ofs << u[i*nx + j] << "\t";
ofs << std::endl;
}
ofs.close();
free(u);
return 0;
}
|
2,193
|
#include "includes.h"
__global__ void matmul(double *a, double *b, double *c, int n)
{
// Get global thread ID
int Col = blockIdx.x*blockDim.x+threadIdx.x;
int Row = blockIdx.y*blockDim.y+threadIdx.y;
// Not out of bounds
if((Col<n) && (Row<n)) {// Mutliply matrices
// c[Row*n + Col] = 0;
double sum = 0.0;
for(int k=0;k<n;k++) {
// c[Row*n + Col] += a[Row*n+k]*b[k*n+Col];
sum += a[Row*n+k]*b[k*n+Col];
}
c[Row*n + Col] = sum;
}
}
|
2,194
|
#include<iostream>
#include<vector>
#include<cstdlib>
__global__ void convolution_kernel(double *arr, double *mask, double *output, int N, int M){
auto i = blockDim.x*blockIdx.x+threadIdx.x;
auto start = i - (M/2);
auto temp = 0.0;
for(auto k = 0; k < M; k++){
if((start+k >=0) && (start+k <N)){
temp += arr[start+k]*mask[k];
}
}
output[i] = temp;
}
int main(){
int N = 1048576; // size of the array = 2^20
size_t size_N = N*sizeof(double);
int M = 7; // size of the mask
size_t size_M = M*sizeof(double);
std::vector<double> h_array(N);
std::vector<double> h_mask(M);
std::vector<double> h_output(N);
for(auto& i:h_array){i = rand()%100;}
for(auto& j:h_mask){j = rand()%10;}
double *d_array, *d_mask, *d_output;
cudaMalloc(&d_array, size_N);
cudaMalloc(&d_output, size_N);
cudaMalloc(&d_mask, size_M);
cudaMemcpy(d_array, h_array.data(), size_N, cudaMemcpyHostToDevice);
cudaMemcpy(d_mask, h_mask.data(), size_M, cudaMemcpyHostToDevice);
int threadsPerBlock = 32;
int blocksPerGrid = (N+threadsPerBlock-1)/threadsPerBlock;
convolution_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_array, d_mask, d_output, N, M);
cudaMemcpy(h_output.data(), d_output, size_N, cudaMemcpyDeviceToHost);
// Uncomment to print the output
// for(auto& i:h_output){std::cout << i << std::endl;}
cudaFree(d_array);
cudaFree(d_output);
cudaFree(d_mask);
return 0;
}
|
2,195
|
#define _NTHREAD 512
#define _NBLOCK 65535
#include<cuda.h>
__global__ void _AFFINE_KERNEL(int* ,int ,int* ,int ,int ,int ,int ,int ,int ,int ,int ,int ,int );
#include<stdio.h>
#include<stdlib.h>
int main()
{
int XP1[20][20],XS3[20],i,j,k;
for(i=0;i<20;i++)
for(j=0;j<20;j++)
{
XP1[i][j]=i+j;
XS3[i]=2*i;
}
int _SZ_XS3_1 = 20;
int _SZ_XP1_2 = 20;
int _SZ_XP1_1 = 20;
int *_DEV_XS3;
cudaMalloc((void**) &_DEV_XS3, sizeof(int)*_SZ_XS3_1);
cudaMemcpy(_DEV_XS3, XS3, sizeof(int)*_SZ_XS3_1, cudaMemcpyHostToDevice);
int *_DEV_XP1;
cudaMalloc((void**) &_DEV_XP1, sizeof(int)*_SZ_XP1_2*_SZ_XP1_1);
cudaMemcpy(_DEV_XP1, XP1, sizeof(int)*_SZ_XP1_2*_SZ_XP1_1, cudaMemcpyHostToDevice);
float _NUM_THREADS = 400,_NUM_BLOCKS=1;
int _NUM_TILE=1;
dim3 _THREADS(512);
dim3 _BLOCKS(1);
if(_NUM_THREADS < _NTHREAD)
{
_THREADS.x=20;
_THREADS.y=20;
}
else {
_NUM_BLOCKS=_NUM_THREADS/256;
_BLOCKS.x=_BLOCKS.y=ceil(sqrt(_NUM_BLOCKS));
_THREADS.x=_THREADS.y=ceil(sqrt(400.0/(_BLOCKS.x*_BLOCKS.y)));
int temp=_NUM_BLOCKS;
if(_NUM_BLOCKS>_NBLOCK)
_NUM_TILE=(temp % _NBLOCK == 0)?(_NUM_BLOCKS/_NBLOCK):((_NUM_BLOCKS/_NBLOCK)+1);
}
int _CUDA_TILE;
for(i=0;i<20;i+=3)
for(_CUDA_TILE=0;_CUDA_TILE<_NUM_TILE;_CUDA_TILE++)
_AFFINE_KERNEL<<<_BLOCKS,_THREADS>>>(_DEV_XS3, _SZ_XS3_1, _DEV_XP1, _SZ_XP1_2, _SZ_XP1_1, 2, i, j, 0, 20, 0, 20, _CUDA_TILE);
cudaDeviceSynchronize();
cudaMemcpy(XP1, _DEV_XP1, sizeof(int)*_SZ_XP1_2*_SZ_XP1_1, cudaMemcpyDeviceToHost);
return 0;
}
__global__ void _AFFINE_KERNEL(int* XS3,int _SZ_XS3_1,int* XP1,int _SZ_XP1_2,int _SZ_XP1_1,int phi_count, int CUDA_i, int CUDA_j, int CUDA_L_i,int CUDA_U_i, int CUDA_L_j,int CUDA_U_j, int _CUDA_TILE)
{
int i = gridDim.x*blockDim.x*_CUDA_TILE + blockDim.x*blockIdx.x + threadIdx.x;
int j = gridDim.y*blockDim.y*_CUDA_TILE + blockDim.y*blockIdx.y + threadIdx.y;
if((CUDA_i<=i)&&(i<(CUDA_i+3))&&(i<CUDA_U_i)){
if((CUDA_L_j<=j)&&(j<CUDA_U_j)){
XP1[i*_SZ_XP1_1+j]=XP1[(i+3)*_SZ_XP1_1+j+4]+XS3[i];
}}}
|
2,196
|
#include <bits/stdc++.h>
using namespace std;
// Kernel function for matrix multiplication
__global__
void GPUmatmul(int N, double *x, double *y, double *ans)
{
//calculates unique thread ID in the block
int t= (blockDim.x*blockDim.y)*threadIdx.z+(threadIdx.y*blockDim.x)+(threadIdx.x);
//calculates unique block ID in the grid
int b= (gridDim.x*gridDim.y)*blockIdx.z+(blockIdx.y*gridDim.x)+(blockIdx.x);
//block size (this is redundant though)
int T= blockDim.x*blockDim.y*blockDim.z;
//grid size (this is redundant though)
int B= gridDim.x*gridDim.y*gridDim.z;
/*
* Each cell in the matrix is assigned to a different thread.
* Each thread do O(N*number of asssigned cell) computation.
* Assigned cells of different threads does not overlape with
* each other. And so no need for synchronization.
*/
for (int i=b;i<N;i+=B)
{
for(int j=t;j<N;j+=T)
{
for(int k=0;k<N;k++)
{
ans[i*N+j]+=(x[i*N+k]*y[k*N+j]);
}
}
}
}
void CPUmatmul(int N,double *x, double *y, double *ans)
{
for(int i=0;i<N;i++)
{
for(int j=0;j<N;j++)
{
for(int k=0;k<N;k++)
{
ans[i*N+j]+=(x[i*N+k]*y[k*N+j]);
}
}
}
}
bool check(int N,double *ans)
{
for(int i=0;i<N;i++)
{
for(int j=0;j<N;j++)
{
if(ans[i*N+j]!=20.0)return false;
}
}
return true;
}
int main(void)
{
//size of matrix
int N = 1<<9;
double *x, *y, *ans;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*N*sizeof(double));
cudaMallocManaged(&y, N*N*sizeof(double));
cudaMallocManaged(&ans, N*N*sizeof(double));
// initialize x,y and ans arrays on the host
for (int i = 0; i < N; i++)
{
for(int j=0;j<N;j++)
{
x[i*N+j]=5;
y[i*N+j]=(i==j?1:0);
ans[i*N+j]=(double)0.000000000000;
}
}
clock_t t;
double avg=0;
cout<<"Strting CPU computation"<<endl;
for(int i=0;i<=3;i++)
{
t=clock();
CPUmatmul(N, x, y,ans);
t = clock() - t;
if(i)avg+=t; //we will ignore the first run
printf ("It took CPU-%d %f ms.\n",i,(((double)t)/CLOCKS_PER_SEC)*1000);
}
avg/=3;
avg/=CLOCKS_PER_SEC;
avg*=1000;
printf ("It took %lf ms on avg.\n",avg);
if(check(N,ans))cout<<"RUN OK."<<endl;
else cout<<"RUN NOT OK."<<endl;
// initialize x,y and ans arrays on the host
for (int i = 0; i < N; i++)
{
for(int j=0;j<N;j++)
{
x[i*N+j]=5;
y[i*N+j]=(i==j?1:0);
ans[i*N+j]=(double)0.000000000000;
}
}
avg=0;
cout<<"Strting GPU computation"<<endl;
// Run kernel on GPU
for(int i=0;i<=3;i++)
{
t=clock();
GPUmatmul<<<dim3(16,16,16), dim3(16,8,8)>>>(N, x, y,ans);
cudaDeviceSynchronize();
t = clock() - t;
if(i)avg+=t; //we will ignore the first run
printf ("It took GPU-%d %f ms.\n",i,(((double)t)/CLOCKS_PER_SEC)*1000);
}
avg/=3;
avg/=CLOCKS_PER_SEC;
avg*=1000;
printf ("It took %lf ms on avg.\n",avg);
if(check(N,ans))cout<<"RUN OK."<<endl;
else cout<<"RUN NOT OK."<<endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
2,197
|
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <vector>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#include <algorithm>
using namespace std;
vector< vector<double> > PointValues;
vector< vector<double> > KCentroids;
vector<int> ClusteringValues;
void printClusters();
void updateCentroids(int total_values);
bool updatePointDistances();
void CheckCudaError(char sms[], int line);
int main(int argc, char** argv) {
cudaEvent_t E1, E2, E3, E4, E5;
float TiempoTotal, TiempoUpdateCentroids, TiempoUpdatePointDistances;
cudaEventCreate(&E1);
cudaEventCreate(&E2);
cudaEventCreate(&E3);
cudaEventCreate(&E4);
cudaEventCreate(&E5);
int total_points, total_values, K, max_iterations;
cin >> total_points >> total_values >> K >> max_iterations;
if(K > total_points)
cout << "INPUT ERROR";
ClusteringValues.resize(total_points);
for(int i = 0; i < total_points; i++) {
vector<double> values;
for(int j = 0; j < total_values; j++)
{
double value;
cin >> value;
values.push_back(value);
}
PointValues.push_back(values);
}
vector<int> prohibited_indexes;
srand(1);
for(int i = 0; i < K; i++)
{
while(true)
{
int index_point = rand() % total_points;
if(find(prohibited_indexes.begin(), prohibited_indexes.end(),
index_point) == prohibited_indexes.end())
{
prohibited_indexes.push_back(index_point);
ClusteringValues[index_point] = i;
break;
}
}
}
KCentroids = vector<vector<double> >(K, vector<double>(total_values));
cudaEventRecord(E1, 0);
cudaEventSynchronize(E1);
updateCentroids(total_values);
cudaEventRecord(E2, 0);
cudaEventSynchronize(E2);
printClusters();
/*int counter = 0;
cudaEventRecord(E3, 0);
cudaEventSynchronize(E3);
bool yeray = updatePointDistances();
cudaEventRecord(E4, 0);
cudaEventSynchronize(E4);
while (yeray and counter <= max_iterations) {
++counter;
updateCentroids(total_values);
yeray = updatePointDistances();
}
cout << "LLAMADAS A UPDATECENTROIDS: " << counter << endl;
cout << "LLAMADAS A UPDATEPOINTDISTANCES: " << counter+1 << endl;
cudaEventRecord(E5, 0);
cudaEventSynchronize(E5);*/
cudaEventElapsedTime(&TiempoUpdateCentroids, E1, E2);
//cudaEventElapsedTime(&TiempoUpdatePointDistances, E3, E4);
//cudaEventElapsedTime(&TiempoTotal, E1, E5);
printf("Tiempo UpdateCentroids function: %4.6f milseg\n",
TiempoUpdateCentroids);
//printf("Tiempo UpdatePointDistances function: %4.6f milseg\n",
// TiempoUpdatePointDistances);
//printf("Tiempo Global: %4.6f milseg\n", TiempoTotal);
cudaEventDestroy(E1);
cudaEventDestroy(E2); cudaEventDestroy(E3);
cudaEventDestroy(E4); cudaEventDestroy(E5);
}
//Updatea la distancia de los puntos con las nuevas K's (si hay algun
//cambio retorna true, else false
bool updatePointDistances(){
double sum, min_dist;
int min_k;
bool change = false;
for (int i = 0; i<PointValues.size(); ++i) {
min_dist =
0.0;
for (int j = 0; j<KCentroids.size(); ++j) {
sum = 0.0;
for (int k = 0; k<PointValues[i].size(); ++k) {
sum += pow(KCentroids[j][k] -
PointValues[i][k], 2.0);
}
if (j == 0) {
min_dist = sqrt(sum);
min_k = j;
}
if (min_dist > sqrt(sum)) {
min_dist = sqrt(sum);
min_k = j;
}
}
if (ClusteringValues[i] != min_k) {
ClusteringValues[i] = min_k;
change = true;
}
}
return change;
}
//Updatea los nuevos valores de K segun los cambios que ha habido en
//la assignacion de puntos
void updateCentroids(int total_values){
vector<vector<double> > updatingK;
updatingK.resize(KCentroids.size());
for (int i = 0; i<ClusteringValues.size(); ++i) {
vector<double> AddingK;
for (int j = 0; j<PointValues[i].size(); ++j) {
AddingK.push_back(PointValues[i][j]);
}
for (int j = 0; j<AddingK.size(); ++j) {
updatingK[ClusteringValues[i]].push_back(AddingK[j]);
}
}
vector<double> KUpdated(total_values,0);
for (int i = 0; i<updatingK.size(); ++i) {
vector<double> KUpdated(total_values,0);
for (int j = 0; j<updatingK[i].size(); ++j) {
KUpdated[j%total_values] += updatingK[i][j];
}
if (updatingK[i].size() > 0) {
for (int j = 0; j<KUpdated.size(); ++j) {
KUpdated[j] /= (updatingK[i].size()/total_values);
}
KCentroids[i] = KUpdated;
}
}
}
void printClusters() {
for (int i = 0; i<KCentroids.size(); ++i) {
cout << "Centroid " << i << ": ";
for (int j = 0; j<KCentroids[i].size(); ++j) {
cout << KCentroids[i][j] << " ";
}
cout << endl;
}
for (int i = 0; i<PointValues.size(); ++i) {
cout << "Point " << i << ": ";
for (int j = 0; j<PointValues[i].size(); ++j) {
cout << PointValues[i][j] << " ";
}
cout << "is located on cluster: " << ClusteringValues[i] << endl;
}
}
int error(float a, float b) {
if (abs (a - b) / a > 0.000001) return 1;
else return 0;
}
void CheckCudaError(char sms[], int line) {
cudaError_t error;
error = cudaGetLastError();
if (error) {
printf("(ERROR) %s - %s in %s at line %d\n", sms, cudaGetErrorString(error), __FILE__, line);
exit(EXIT_FAILURE);
}
}
|
2,198
|
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <errno.h>
#include <time.h>
#include <math.h>
#include <float.h>
#include <cuda.h>
const unsigned long WIDTH = 8192;
const unsigned long HEIGHT = 8192;
#define THREADS 32
__global__ void add(int* a, int* b, int* c)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
if (idx > WIDTH || idy > HEIGHT) return;
c[idy * WIDTH + idx] = a[idy * WIDTH + idx] + b[idy * WIDTH + idx];
}
unsigned long get_time()
{
struct timespec ts;
if (clock_gettime(0, &ts) < 0) {
fprintf(stderr, "Error calc time... %s\n", strerror(errno));
exit(1);
}
return ts.tv_sec * 1000000000L + ts.tv_nsec;
}
void init(int* h_v, int numb) {
for (int i = 0; i < HEIGHT; i++) {
for (int j = 0; j < WIDTH; ++j) {
h_v[i * HEIGHT + j] = numb;
}
}
}
void print_results(const int *result)
{
fprintf(stderr, "%s\n", "Result...");
for (int i = 0; i < HEIGHT; ++i) {
for (int j = 0; j < WIDTH; ++j) {
fprintf(stderr, " %d ", result[i * HEIGHT + j]);
}
fprintf(stderr, "%s\n", "");
}
fprintf(stderr, "%s\n", "");
}
int main( void ) {
unsigned long now = get_time();
int *result, *h_a, *h_b;
int *dev_a, *dev_b, *dev_c;
int size = WIDTH * HEIGHT * sizeof(int);
result = (int*) malloc( size );
h_a = (int*) malloc( size );
h_b = (int*) malloc( size );
init(h_a, 7);
init(h_b, 2);
cudaMalloc( &dev_a, size );
cudaMalloc( &dev_b, size );
cudaMalloc( &dev_c, size );
// se transfieren los datos a memoria de dispositivo.
cudaMemcpy( dev_a, h_a, size, cudaMemcpyHostToDevice);
cudaMemcpy( dev_b, h_b, size, cudaMemcpyHostToDevice);
cudaMemset(dev_c, 0, size);
dim3 th(THREADS, THREADS);
dim3 blocks((WIDTH + th.x - 1) / th.x , (HEIGHT + th.y - 1) / th.y);
add<<<blocks, th>>>(dev_a, dev_b, dev_c);
// se transfieren los datos del dispositivo a memoria.
cudaMemcpy(result, dev_c, size, cudaMemcpyDeviceToHost);
free(h_a), free(h_b), free(result);
cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c);
fprintf(stderr, "Time %lu\n", get_time() - now);
return 0;
}
|
2,199
|
#include <cuda_runtime_api.h>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <random>
#include <cooperative_groups.h>
/*
WRITE CUDA KERNEL FOR COUNT HERE
*/
__device__
int log_2(float x){
int count = 0;
while(x>1){
x/=2;
count++;
}
return count;
}
__global__
void parallel_implementation(int * data, int vals,int * tmp_data, int * output_data){
int blocks = gridDim.x;
int start_index = threadIdx.x + (vals*blockIdx.x)/blocks;
int end_index = (vals*(blockIdx.x+1))/blocks;
int layers = log_2(vals);
//printf("Layers = %d",layers);
//sync stuff
//grid_group g = this_grid();
//data works as previous layer holder
//output_data works as current layer holder
//Hillis-Steele Algorithm
for(int layer = 0; layer < layers; layer++){
//for each layer
//if(blockIdx.x==0 && threadIdx.x==vals) printf("layer %d",layer);
for(int i = start_index;i<end_index;i+=blockDim.x){//move in strides the size of the block
//for each element in the array
int neighbor_i = i-pow(2,layer);
if(neighbor_i>=0){
output_data[i]+=data[neighbor_i];//update output
//tmp_data[i]+=data[i-pow(2,layer)];//update holder container
//test removal tmp_data[i]=output_data[i];
}
}
__syncthreads();/*
if(blockIdx.x==0 && threadIdx.x==0){//copy new array over to data
//swap new layers data int "data" for next round
//int * tmp = data;
//data = tmp_data;
//tmp_data = tmp;
for i
}*/
//if(blockIdx.x==0 && threadIdx.x==0) for (int i = 0; i<vals;i++) printf("%d\n",output_data[i]);
//prep for next layer
for(int i = start_index;i<end_index;i+=blockDim.x){
data[i] = output_data[i];
}
__syncthreads();//sync threads across all blocks before next layer
}
//bug removal fix later
__syncthreads();
if(blockIdx.x==0 && threadIdx.x==0)for(int i = 2047; i<vals;i+=4096) output_data[i]-=data[0];
}
int * serial_implementation(int * data, int vals) {
int * output = (int *)malloc(sizeof(int) * vals);
output[0] = 0;
for (int i = 1; i < vals; i++) {
output[i] = output[i-1] + data[i-1];
}
return output;
}
int main(int argc, char ** argv) {
assert(argc == 2);
int values = atoi(argv[1]); // Values is guaranteed to be no more than 10000000
int * data = (int *)malloc(sizeof(int) * values);
// Generate "random" vector
std::mt19937 gen(13); // Keep constant to maintain determinism between runs
std::uniform_int_distribution<> dist(0, 50);
for (int i = 0; i < values; i++) {
data[i] = dist(gen);
}
cudaStream_t stream;
cudaEvent_t begin, end;
cudaStreamCreate(&stream);
cudaEventCreate(&begin);
cudaEventCreate(&end);
int * h_output = (int *)malloc(sizeof(int) * (1+values)); // THIS VARIABLE SHOULD HOLD THE TOTAL COUNT BY THE END
/*
PERFORM NECESSARY VARIABLE DECLARATIONS HERE
PERFORM NECESSARY DATA TRANSFER HERE
*/
int * gpu_data;
int * gpu_output_data;
int * gpu_tmp_data;
//Determine allocation size
int array_size = sizeof(int)*values;
//Allocate memory on GPU
cudaMalloc(&gpu_data, array_size);
cudaMalloc(&gpu_output_data, array_size);
cudaMalloc(&gpu_tmp_data, array_size);
//cudaMalloc(&gpu_data, array_size+sizeof(int));
//cudaMalloc(&gpu_output_data, array_size+sizeof(int));
//cudaMalloc(&gpu_tmp_data, array_size+sizeof(int));
//Copy data to GPU
cudaMemcpy(gpu_data,data,array_size,cudaMemcpyHostToDevice);
cudaMemcpy(gpu_output_data,data,array_size,cudaMemcpyHostToDevice);
cudaMemcpy(gpu_tmp_data,data,array_size,cudaMemcpyHostToDevice);
//cudaMemcpy(gpu_data+sizeof(int),data,array_size,cudaMemcpyHostToDevice);
//cudaMemcpy(gpu_output_data+sizeof(int),data,array_size,cudaMemcpyHostToDevice);
//cudaMemcpy(gpu_tmp_data+sizeof(int),data,array_size,cudaMemcpyHostToDevice);
cudaEventRecord(begin, stream);
/*
LAUNCH KERNEL HERE
*/
int THREADS_PER_BLOCK = 1024;//128 ideal
int BLOCKS = 1;//16 ideal
dim3 block(THREADS_PER_BLOCK);
dim3 grid(BLOCKS);
parallel_implementation<<<grid,block,0,stream>>>(gpu_data,values,gpu_tmp_data,gpu_output_data);
cudaEventRecord(end, stream);
/*
PERFORM NECESSARY DATA TRANSFER HERE
*/
cudaMemcpy(h_output+1, gpu_output_data, array_size, cudaMemcpyDeviceToHost);
h_output[0] = 0;
cudaStreamSynchronize(stream);
float ms;
cudaEventElapsedTime(&ms, begin, end);
printf("Elapsed time: %f ms\n", ms);
/*
DEALLOCATE RESOURCES HERE
*/
cudaFree(gpu_data);
cudaFree(gpu_output_data);
cudaFree(gpu_tmp_data);
int * reference_output = serial_implementation(data, values);
for (int i = 0; i < values; i++) {
if (reference_output[i] != h_output[i]) {
printf("ERROR: %d != %d at index %d. Off by %d\n", reference_output[i], h_output[i], i,reference_output[i]- h_output[i]);
/* printf("V | C | GPU\n");
for(int j = 0; j< values;j++){
printf("%d | %d | %d\n",data[j],reference_output[j],h_output[j]);
}*/
//abort();
}
}
cudaEventDestroy(begin);
cudaEventDestroy(end);
cudaStreamDestroy(stream);
free(data);
free(reference_output);
free(h_output);
return 0;
}
|
2,200
|
__global__ void move(const int num_tracks, double distance, double* x) {
for (int tid = blockIdx.x * blockDim.x + threadIdx.x; tid < num_tracks;
tid += blockDim.x * gridDim.x) {
x[tid] += distance;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.