serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
21,001 | #include "includes.h"
/*
* file name: mm_omp_vs_cuda.cu
*
* mm_omp_vs_cuda.cu contains the code that realize some common used matrix operations in CUDA, and
* an implementation of matrix multiplication speedup via openmp, this is a practice to compare the
* of performance of cuda and openmp, as well as a trail of using cuda and openmp in the same program
*
* this is a toy program for learning CUDA, some functions are reusable in other project
* note:
* compile: nvcc -Xcompiler \-fopenmp -lgomp mm_omp_vs_cuda.cu
*/
#define BLOCK_SIZE 16
/*
*********************************************************************
function name: gpu_matrix_mult
description: dot product of two matrix (not only square)
parameters:
&a GPU device pointer to a m X n matrix (A)
&b GPU device pointer to a n X k matrix (B)
&c GPU device output purpose pointer to a m X k matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
further sppedup can be obtained by using shared memory to decrease global memory access times
return: none
*********************************************************************
*/
/*
*********************************************************************
function name: cpu_matrix_mult
description: dot product of two matrix (not only square) in CPU,
for validating GPU results
parameters:
&a CPU device pointer to a n X n matrix (A)
&b CPU device pointer to a n X n matrix (B)
&c CPU device output purpose pointer to a n X n matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
return: none
*********************************************************************
*/
/*
*********************************************************************
function name: gpu_matrix_transpose
description: matrix transpose
parameters:
&mat_in GPU device pointer to a rows X cols matrix
&mat_out GPU device output purpose pointer to a cols X rows matrix
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
return: none
*********************************************************************
*/
/*
*********************************************************************
function name: cpu_matrix_mult
description: dot product of two matrix (not only square) in CPU,
for validating GPU results
parameters:
&a CPU host pointer to a m X n matrix (A)
&b CPU host pointer to a n X k matrix (B)
&c CPU host output purpose pointer to a m X k matrix (C)
to store the result
return: none
*********************************************************************
*/
__global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if( col < k && row < m)
{
for(int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
} |
21,002 | //this program will assume a 98x98x98 grid with 2 cells of zero padding for the E fields
//the padded zeros act as PEC boundaries
//The H fields will be 99x99x99 (offset by half cell, inside the PEC boundary)
#include <vector>
#include <stdio.h>
#include <iostream>
#include <sstream>
#include <string>
#include <fstream>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#define EX_SIZE 100
#define EY_SIZE 100
#define EZ_SIZE 100
#define HX_SIZE 99
#define HY_SIZE 99
#define HZ_SIZE 99
#define DX 1.0
#define DY 1.0
#define DZ 1.0
#define BLOCK 1024
using namespace std;
// __global__ functions are called by the host and invoke a kernel (must be void)
// __device__ functions are called by the device and are local to the gpu (can have a return value)
// int tid = blockIdx.x * blockDim.x + threadIdx.x; is the most common way to keep track of thread id
// each block can have at most 1024 threads where multiple of 32 threads are allocated per block are ideal
// blockIdx.x depends on how many numBlocks are passed
// blockDim.x refers to the size of each block that was assigned
// threadIdx.x can range from 0 to threadsPerBlock that was assigned, can have multiple dimensions
__global__ void InitWall(double* ey, double init, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
ey[tid] = init;
}
}
//calculate e/h{i,j,k} for the next time step
//depends on: e/h{i,j,k} for the current time step, hz of adjacent cells, hy of adj. cells,
//the time step, epsilon, and the cell steps
//ended up using this as the general calculation for all E and H components
__device__ double Calc(double exn, double hzp, double hzn, double hyp, double hyn, double d1, double d2, double perm, double dt) {
double term1, term2;
double t1;
t1 = hzp - hzn;
term1 = t1/d1;
term2 = (hyp - hyn)/d2;
return dt*(term1-term2)/perm+exn;
}
// conversion for the 1D array
__device__ int E2H(int index) {
int i = index / (EY_SIZE*EZ_SIZE);
index -= (EY_SIZE*EZ_SIZE)*i;
int j = index / EZ_SIZE;
int k = index - EZ_SIZE*j;
return i*HY_SIZE*HZ_SIZE+j*HZ_SIZE+k;
}
// conversion for the 1D array
__device__ int H2E(int index) {
int i = index / (HY_SIZE*HZ_SIZE);
index -= (HY_SIZE*HZ_SIZE)*i;
int j = index / HZ_SIZE;
int k = index - HZ_SIZE*j;
return i*EY_SIZE*EZ_SIZE+j*EZ_SIZE+k;
}
__global__ void Set_H_X(double* hx, double* ey, double* ez, double mu, int size, double dt) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// don't do anything for any thread ids that are greater
if (tid < size) {
double old, t1p, t1n, t2p, t2n;
int edex = H2E(tid);
int y_offset = EZ_SIZE;
old = hx[tid];
t1p = ey[edex+1];
t1n = ey[edex];
t2p = ez[edex+y_offset];
t2n = ez[edex];
hx[tid] = Calc(old,t1p,t1n,t2p,t2n,DZ,DY,mu,dt);
}
}
__global__ void Set_H_Y(double* hy, double* ez, double* ex, double mu, int size, double dt) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// don't do anything for any thread ids that are greater
if (tid < size) {
double old, t1p, t1n, t2p, t2n;
int x_offset = EY_SIZE*EZ_SIZE;
old = hy[tid];
int edex = H2E(tid);
t1p = ez[edex+x_offset];
t1n = ez[edex];
t2p = ex[edex+1];
t2n = ex[edex];
hy[tid] = Calc(old,t1p,t1n,t2p,t2n,DX,DZ,mu,dt);
}
}
__global__ void Set_H_Z(double* hz, double* ex, double* ey, double mu, int size, double dt) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// don't do anything for any thread ids that are greater
if (tid < size) {
double old, t1p, t1n, t2p, t2n;
int x_offset = EY_SIZE*EZ_SIZE;
int y_offset = EZ_SIZE;
old = hz[tid];
int edex = H2E(tid);
t1p = ex[edex+y_offset];
t1n = ex[edex];
t2p = ey[edex+x_offset];
t2n = ey[edex];
hz[tid] = Calc(old,t1p,t1n,t2p,t2n,DY,DX,mu,dt);
}
}
__global__ void Set_E_X(double* ex, double* hz, double* hy, double eps, int size, double dt, int* inner_indices) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// respect the border
if (tid < size) {
double old, t1p, t1n, t2p, t2n;
int index = inner_indices[tid];
int hdex = E2H(index);
int y_offset = HZ_SIZE;
old = ex[index];
t1p = hz[hdex];
t1n = hz[hdex-y_offset];
t2p = hy[hdex];
t2n = hy[hdex-1];
ex[index] = Calc(old,t1p,t1n,t2p,t2n,DY,DX,eps,dt);
}
}
__global__ void Set_E_Y(double* ey, double* hx, double* hz, double eps, int size, double dt, int* inner_indices) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// respect the border
if (tid < size) {
double old, t1p, t1n, t2p, t2n;
int index = inner_indices[tid];
int hdex = E2H(index);
int x_offset = HY_SIZE * HZ_SIZE;
old = ey[index];
t1p = hx[hdex];
t1n = hx[hdex-1];
t2p = hz[hdex];
t2n = hz[hdex-x_offset];
ey[index] = Calc(old,t1p,t1n,t2p,t2n,DZ,DX,eps,dt);
}
}
__global__ void Set_E_Z(double* ez, double* hy, double* hx, double eps, int size, double dt, int* inner_indices) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// respect the border
if (tid < size) {
double old, t1p, t1n, t2p, t2n;
int index = inner_indices[tid];
int hdex = E2H(index);
int y_offset = HZ_SIZE;
int x_offset = HY_SIZE*HZ_SIZE;
old = ez[index];
t1p = hy[hdex];
t1n = hy[hdex-x_offset];
t2p = hx[hdex];
t2n = hx[hdex-y_offset];
ez[index] = Calc(old,t1p,t1n,t2p,t2n,DX,DY,eps,dt);
}
}
// Used for time keeping independent of the clock
double get_wall_time(){
struct timeval time;
if (gettimeofday(&time,NULL)){
// Handle error
return 0;
}
return (double)time.tv_sec + (double)time.tv_usec * .000001;
}
// This is source term
// the argument is time value
//
double source(double t) {
double expnum = pow(t-5e-7,2.0);
return exp(-1*expnum/1e-15);
};
//use existing procedures for all calculations
//the various calc_enijk procedures are the exact same math, we will just use one
//depends on: desired quantity, Ex array, Ey array, Ez array, Hx array, Hy array,
//Hz array (all of which are pointers), dx, dy, dz, i, j, k
//type values: 0, 1, 2, 3, 4, 5 = ex, ey, ez, hx, hy, hz
// double calc_int(int type, double ex[][ny][nz], double ey[][ny][nz], double ez[][ny][nz],
// double hx[][ny-1][nz-1], double hy[][ny-1][nz-1], double hz[][ny-1][nz-1],
// double dx, double dy, double dz,
// double dt, int i, int j, int k)
//function to calculate the magnitude of a 3-vector
//used to write out results, not part of simulation
double magn(double x, double y, double z) {
// cout << x << ' ' << y << ' ' << z << endl;
double mag;
mag = sqrt(x*x+y*y+z*z);
return mag;
}
// frunction to write out the magnitude of the E-field to a file
int write_to(ofstream& f, double t, int ind, int stride, double* ex, double* ey, double* ez) {
f << t;
int i;
for (i = 0; i < EX_SIZE; i+=stride) {
int index = ind*EY_SIZE*EZ_SIZE+i*EZ_SIZE+EZ_SIZE/2-1; // middle index for 100
f << "\t" << magn(ex[index],ey[index],ez[index]);
}
f << "\t" << ind << "\t" << i << "\t" << EZ_SIZE/2-1;
f << endl;
return 0;
}
// primary simulation chunk
int main() {
double eps = 8.85e-12;
double mu = 1.257e-6;
double dt = 1e-9;
double tf = 1e-6;
double t = 0.0;
int out_index = 0;
double *ex, *ey, *ez, *hx, *hy, *hz;
int *inner_indices;
int e_size = EX_SIZE*EY_SIZE*EZ_SIZE;
int h_size = HX_SIZE*HY_SIZE*HZ_SIZE;
int i_size = (EX_SIZE-2)*(EY_SIZE-2)*(EZ_SIZE-2);
int s_size = EY_SIZE*EZ_SIZE;
ex = (double *)malloc((e_size)*sizeof(double));
ey = (double *)malloc((e_size)*sizeof(double));
ez = (double *)malloc((e_size)*sizeof(double));
hx = (double *)malloc((h_size)*sizeof(double));
hy = (double *)malloc((h_size)*sizeof(double));
hz = (double *)malloc((h_size)*sizeof(double));
inner_indices = (int *)malloc((i_size)*sizeof(int));
// initialize to zero
for (int i = 0; i < e_size; i++) {
ex[i] = 0.0;
ey[i] = 0.0;
ez[i] = 0.0;
}
for (int i = 0; i < h_size; i++) {
hx[i] = 0.0;
hy[i] = 0.0;
hz[i] = 0.0;
}
// cuda variables
double *d_ex, *d_ey, *d_ez, *d_hx, *d_hy, *d_hz;
int *d_inner;
// allocate memory for the cuda variables
cudaMalloc((void **)&d_ex, sizeof(double) * (e_size));
cudaMalloc((void **)&d_ey, sizeof(double) * (e_size));
cudaMalloc((void **)&d_ez, sizeof(double) * (e_size));
cudaMalloc((void **)&d_hx, sizeof(double) * (h_size));
cudaMalloc((void **)&d_hy, sizeof(double) * (h_size));
cudaMalloc((void **)&d_hz, sizeof(double) * (h_size));
cudaMalloc((void **)&d_inner, sizeof(int) * (i_size));
// copy memory from host to device
cudaMemcpy(d_ex, ex, sizeof(double) * (e_size), cudaMemcpyHostToDevice);
cudaMemcpy(d_ey, ey, sizeof(double) * (e_size), cudaMemcpyHostToDevice);
cudaMemcpy(d_ez, ez, sizeof(double) * (e_size), cudaMemcpyHostToDevice);
cudaMemcpy(d_hx, hx, sizeof(double) * (h_size), cudaMemcpyHostToDevice);
cudaMemcpy(d_hy, hy, sizeof(double) * (h_size), cudaMemcpyHostToDevice);
cudaMemcpy(d_hz, hz, sizeof(double) * (h_size), cudaMemcpyHostToDevice);
// cout << "middle element is: " << ex[49][49][49] << endl;
//the courant condition for 1 meter is 1.9e-9
//final time be 1e-6 (for 1000 time steps)
ofstream outFiles[11];
stringstream fname;
for (int it = 0; it < 11; it++) {
fname.str("");
fname << "paraOut/output" << it << ".txt";
outFiles[it].open(fname.str());
};
int outind;
ofstream probef;
probef.open("paraOut/test.txt");
ofstream probef2;
probef2.open("paraOut/test_h.txt");
double difference, w_start, w_finish;
w_start = get_wall_time();
int numBlocksH = h_size/BLOCK+1; // set the numblock size to at least one
int numBlocksI = i_size/BLOCK+1;
int numBlocksS = s_size/BLOCK+1;
dim3 threadsPerBlock(BLOCK, 1); // Max one dimensional block
int count = 0;
// keep track of the inner indices to respect the boundaries of the E-field
for (int i = 1; i < HX_SIZE; i++) {
for (int j = 1; j < HY_SIZE; j++) {
for (int k = 1; k < HZ_SIZE; k++) {
inner_indices[count] = i*EY_SIZE*EZ_SIZE+j*EZ_SIZE+k;
count++;
}
}
}
cudaMemcpy(d_inner, inner_indices, sizeof(int) * (i_size), cudaMemcpyHostToDevice);
while (t<tf) {
cout << "t = " <<t <<endl;
// set the source value for the incoming plane wave at x boundary
double ey_init = source(t);
InitWall<<<numBlocksS,threadsPerBlock>>>(d_ey, ey_init, s_size);
// Every tenth time step, write out slices of e-field values to a set of files
if (!(out_index%10)) {
cudaMemcpy(ex, d_ex, sizeof(double) * e_size, cudaMemcpyDeviceToHost);
cudaMemcpy(ey, d_ey, sizeof(double) * e_size, cudaMemcpyDeviceToHost);
cudaMemcpy(ez, d_ez, sizeof(double) * e_size, cudaMemcpyDeviceToHost);
cudaMemcpy(hy, d_hy, sizeof(double) * h_size, cudaMemcpyDeviceToHost);
for (int fn = 0; fn < 11; fn++) {
outind = fn*10;
if (outind > HY_SIZE) {
outind = HY_SIZE;
}
write_to(outFiles[fn], t, outind, 10, ex, ey, ez);
}
probef << t;
// write to a couple of debug probes placed in the center of the box
for (int y = 45; y < 55; y+=1) {
int ex_index = 49*EY_SIZE*EZ_SIZE+49*EZ_SIZE+y;
int hy_index = y*HY_SIZE*HZ_SIZE+49*HZ_SIZE+49;
probef << "\t" << ex[ex_index];
probef2 << "\t" << hy[hy_index];
};
probef << endl;
probef2 << endl;
};
Set_H_X<<<numBlocksH, threadsPerBlock>>>(d_hx, d_ey, d_ez, mu, h_size, dt);
Set_H_Y<<<numBlocksH, threadsPerBlock>>>(d_hy, d_ez, d_ex, mu, h_size, dt);
Set_H_Z<<<numBlocksH, threadsPerBlock>>>(d_hz, d_ex, d_ey, mu, h_size, dt);
cudaDeviceSynchronize(); // waits for kernels to return before continuing on the CPU
Set_E_X<<<numBlocksI, threadsPerBlock>>>(d_ex, d_hz, d_hy, eps, i_size, dt, d_inner);
Set_E_Y<<<numBlocksI, threadsPerBlock>>>(d_ey, d_hx, d_hz, eps, i_size, dt, d_inner);
Set_E_Z<<<numBlocksI, threadsPerBlock>>>(d_ez, d_hy, d_hx, eps, i_size, dt, d_inner);
cudaDeviceSynchronize();
t += dt; // time step counter
out_index += 1; // printing counter
}
w_finish = get_wall_time();
difference = w_finish - w_start;
cout << "Parallel: " << difference << " seconds\n";
// probe clean up
probef.flush();
probef.close();
probef2.flush();
probef2.close();
for (int it = 0; it < 11; it++) {
outFiles[it].flush();
outFiles[it].close();
};
// memory clean up
free(ex);
free(ey);
free(ez);
free(hx);
free(hy);
free(hz);
free(inner_indices);
cudaFree(d_ex);
cudaFree(d_ey);
cudaFree(d_ez);
cudaFree(d_hx);
cudaFree(d_hy);
cudaFree(d_hz);
cudaFree(d_inner);
return 0;
}
|
21,003 | __global__ void somme( int taille, float * a, float * b, float *c ){
int index=threadIdx.x+blockDim.x*blockIdx.x;
if(index>=taille) return;
c[index]=a[index]+b[index];
}
__global__ void prod( int taille, float * a, float b, float *c ){
int index=threadIdx.x+blockDim.x*blockIdx.x;
if(index>=taille) return;
c[index]=a[index]*b;
} |
21,004 | #pragma region License
/*
The MIT License
Copyright (c) 2009 Sky Morey
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#pragma endregion
/*
//http://supercomputingblog.com/cuda/cuda-tutorial-1-getting-started/
//http://people.eku.edu/ritchisong/301notes2.htm
typedef struct {
int state;
} Element0;
__global__ void In(char** lookup)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
Element0* element0 = (Element0* )lookup[0][x];
}
__global__ void Box(char** lookup)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
Element0* element0 = (Element0* )lookup[0][x];
int s = element0->state;
}
*/ |
21,005 |
__device__ void rgb_hsv_single(unsigned char rc, unsigned char gc, unsigned char bc, float *h, float *s, float *v)
{
// Adapted and simplified from https://github.com/jakebesworth/Simple-Color-Conversions
float min, max, delta;
float r, g, b;
r = (float) rc / 255.0;
g = (float) gc / 255.0;
b = (float) bc / 255.0;
min = r < g ? r : g;
min = min < b ? min : b;
max = r > g ? r : g;
max = max > b ? max : b;
delta = max - min;
*v = max;
*s = max < 0.0001 ? 0 : delta / max;
if(*s < 0.001) *h = 0;
else if(r == max) *h = g == min ? 5 + (max - b) / delta : 1 - (max - g) / delta;
else if(g == max) *h = b == min ? 1 + (max - r) / delta : 3 - (max - b) / delta;
else if(b == max && r == min) *h = 3 + (max - g) / delta;
else *h = 5 - (max - r) / delta;
*h /= 6;
*h = *h < 1 ? *h : 1;
}
__global__ void rgb_hsv(unsigned char *img, float *result, int width, int height)
{
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < width && y < height) {
int idx = (x + y * width) * 3;
rgb_hsv_single(img[idx], img[idx + 1], img[idx + 2], &result[idx], &result[idx + 1], &result[idx + 2]);
}
}
|
21,006 | #include "includes.h"
__global__ void calculateEvalue( const int q_begin, const int matchSize, const double totalDatabaseSize, const double K, const double lambda, const int* queryLengthArray, const int* queryIDArray, const int* scoreArray, double* evalueArray) {
const int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx < matchSize) {
const int queryLength = queryLengthArray[queryIDArray[idx] - q_begin];
const int score = scoreArray [idx];
evalueArray[idx] = K * totalDatabaseSize * queryLength * exp(-lambda * score);
}
} |
21,007 | #include "includes.h"
__device__ __forceinline__ float imag(const float2& val)
{
return val.y;
}
__global__ void MemsetKernel(const float value, int w, int h, float *image)
{
int i = threadIdx.y + blockDim.y * blockIdx.y;
int j = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= h || j >= w) return;
const int pos = i * w + j;
image[pos] = value;
} |
21,008 | #include <math.h>
#include <stdio.h>
static const int blockSize = 1024;
static const int gridSize = 24; //this number is hardware-dependent; usually #SM*2 is a good number.
__device__ float myPower(float* number, int degree) {
float result = 1.0;
int fraction = 0;
if(degree == 0) {
return result;
} else if( degree < 0) {
degree = degree * (-1);
fraction = 1;
}
for(int i = 1; i <= degree; i++) {
result *= (*number);
}
if(fraction == 0) {
return result;
} else {
return 1/result;
}
}
__device__ float function(float* x, float* coefficients, unsigned int polynomialDegree) {
unsigned int polynomialItertor = 0;
float functionResult = 0;
float tmpCalc;
for(polynomialItertor = 0; polynomialItertor <= polynomialDegree; polynomialItertor++) {
tmpCalc = coefficients[polynomialItertor] * myPower(x,polynomialItertor);
functionResult += tmpCalc;
}
return functionResult;
}
__host__ float functionHost(float x, float* coefficients, unsigned int polynomialDegree) {
unsigned int polynomialItertor = 0;
float functionResult = 0;
float tmpCalc;
for(polynomialItertor = 0; polynomialItertor <= polynomialDegree; polynomialItertor++) {
tmpCalc = coefficients[polynomialItertor] * pow(x,polynomialItertor);
functionResult += tmpCalc;
}
return functionResult;
}
__global__ void numericalIntegrationArray(float* coefficients, unsigned int polynomialDegree, float* xArray_device, float* yArray_device, int N) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
if( i < N) {
yArray_device[i] = function(&xArray_device[i],coefficients,polynomialDegree);
}
}
__global__ void sumCommMultiBlock(const float *gArr, float arraySize, float *gOut) {
int thIdx = threadIdx.x;
int gthIdx = thIdx + blockIdx.x*blockSize;
const int gridSize = blockSize*gridDim.x;
int sum = 0;
for (int i = gthIdx; i < arraySize; i += gridSize)
sum += gArr[i];
__shared__ float shArr[blockSize];
shArr[thIdx] = sum;
__syncthreads();
for (int size = blockSize/2; size>0; size/=2) { //uniform
if (thIdx<size)
shArr[thIdx] += shArr[thIdx+size];
__syncthreads();
}
if (thIdx == 0)
gOut[blockIdx.x] = shArr[0];
}
__host__ float sumArray(float* arr, int numberOfPoints) {
float* dev_arr;
cudaMalloc((void**)&dev_arr, numberOfPoints * sizeof(float));
cudaMemcpy(dev_arr, arr, numberOfPoints * sizeof(float), cudaMemcpyHostToDevice);
float out;
float* dev_out;
cudaMalloc((void**)&dev_out, sizeof(float)*gridSize);
sumCommMultiBlock<<<gridSize, blockSize>>>(dev_arr, numberOfPoints, dev_out);
//dev_out now holds the partial result
sumCommMultiBlock<<<1, blockSize>>>(dev_out, gridSize, dev_out);
//dev_out[0] now holds the final result
cudaDeviceSynchronize();
cudaMemcpy(&out, dev_out, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(dev_arr);
cudaFree(dev_out);
return out;
}
int GPU_Integration(float* coefficients, unsigned int polynomialDegree, float low, float high, float precision, float *result, int nThx) {
int numberOfPoints = (int) (high-low) / precision;
int sizeOfArray = sizeof(float)*numberOfPoints;
float *array, *xArray_device, *yArray_device;
cudaEvent_t start, stop;
array = (float*) malloc(sizeOfArray);
for(int i = 0; i < numberOfPoints; i++) {
array[i] = low+i*precision;
}
cudaMalloc((void**)&xArray_device, sizeOfArray);
cudaMemcpy(xArray_device, array, sizeOfArray,cudaMemcpyHostToDevice);
cudaMalloc((void**)&yArray_device, sizeOfArray);
float* coefficients_d;
cudaMalloc((void**) &coefficients_d, sizeof(float)*(polynomialDegree+1));
cudaMemcpy(coefficients_d, coefficients, sizeof(float)*(polynomialDegree+1), cudaMemcpyHostToDevice);
int nBLK = (int)(numberOfPoints+nThx-1)/nThx;
printf(" GPU integral with parameter : \n");
printf(" Number of blocks: %d\n", nBLK);
printf(" Number of thread per block: %d\n", nThx);
printf(" Precision of integral calculation %f\n", precision);
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
numericalIntegrationArray<<<nBLK,nThx>>>(coefficients_d,polynomialDegree,xArray_device,yArray_device,sizeOfArray);
cudaDeviceSynchronize();
printf("sizeofArray = %d", sizeOfArray);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf(" GPU time is %f ms\n", time);
cudaMemcpy(array, yArray_device, sizeOfArray, cudaMemcpyDeviceToHost);
cudaFree(coefficients_d);
cudaFree(xArray_device);
cudaFree(yArray_device);
*result = sumArray(array,numberOfPoints);
*result *= precision;
free(array);
return 0;
}
int main(void) {
const unsigned int polynomial1Size = 2;
const unsigned int polynomial2Size = 5;
float polynomial1[3];
polynomial1[0] = 1.25;
polynomial1[1] = 2.5;
polynomial1[2] = 1.0;
float polynomial2[6];
polynomial2[0] = 3.1;
polynomial2[1] = 2.5;
polynomial2[2] = 1.3;
polynomial2[3] = 10.1;
polynomial2[4] = 54.0;
polynomial2[5] = 1.25;
float lowData = 0.0;
float highData = 4.0;
float prec1 = 0.001;
float prec2 = 0.0001;
float result1 = 0.0;
float result2 = 0.0;
float result3 = 0.0;
float result4 = 0.0;
float result11 = 0.0;
float result21 = 0.0;
float result31 = 0.0;
float result41 = 0.0;
int nThx = 128;
int nThx1 = 256;
printf("Function 1:");
GPU_Integration(polynomial1, polynomial1Size, lowData, highData, prec1, &result1, nThx);
printf(" Result %f\n", result1);
printf("Function 1:");
GPU_Integration(polynomial1, polynomial1Size, lowData, highData, prec1, &result2, nThx1);
printf(" Result %f\n", result2);
printf("Function 1:");
GPU_Integration(polynomial1, polynomial1Size, lowData, highData, prec2, &result3, nThx);
printf(" Result %f\n", result3);
printf("Function 1:");
GPU_Integration(polynomial1, polynomial1Size, lowData, highData, prec2, &result4, nThx1);
printf(" Result %f\n", result4);
printf("Function 2:");
GPU_Integration(polynomial2, polynomial2Size, lowData, highData, prec1, &result11, nThx);
printf(" Result %f\n", result11);
printf("Function 2:");
GPU_Integration(polynomial2, polynomial2Size, lowData, highData, prec1, &result21, nThx1);
printf(" Result %f\n", result21);
printf("Function 2:");
GPU_Integration(polynomial2, polynomial2Size, lowData, highData, prec2, &result31, nThx);
printf(" Result %f\n", result31);
printf("Function 2:");
GPU_Integration(polynomial2, polynomial2Size, lowData, highData, prec2, &result41, nThx1);
printf(" Result %f\n", result41);
}
|
21,009 | /*
#v1
Ideia: Transformar as matrizes em transpostas para nao precisar fazer ler dois ponteiros, apenas usar o deslocamento
Resultado: Aumento de performance. Tempo 1/8 vezes o anterior #8.2 -> 1.1
#v2
Ideia: Transformar matriz em vetor para preparar para CUDA
Resultado: Perda de desempenho. Tempo 2.4 vezes o anterior #1.1 -> 2.4
#v2.1
Ideia: otimizar o codigo antes do CUDA procurando por calculos repetidos e os atribuindo a auxiliares
Resultado: Ganho de desempenho. Tempo 10/15 vezes o anterior #2.4 -> 1.55
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define NTRANS 0
#define TRANS 1
struct _matriz
{
int n;
int m;
int *cont;
}; typedef struct _matriz Matriz;
Matriz *criarMatriz(int n, int m)
{
Matriz *mat = (Matriz*) malloc(sizeof(Matriz));
mat->n = n;
mat->m = m;
mat->cont = (int*) malloc(n * m * sizeof(int*));
return mat;
}
void liberarMatriz(Matriz *m)
{
free(m->cont);
free(m);
}
Matriz *gerarMatriz(int n, int m)
{
Matriz *mat = criarMatriz(n, m);
for(int i = 0; i < n; i++)
for(int j = 0; j < m; j++)
{
mat->cont[i * m + j] = rand() % 100;
}
return mat;
}
void printarMatriz(Matriz *mat)
{
for(int i = 0; i < mat->n; i++)
{
for(int j = 0; j < mat->m; j++)
printf("%d ", mat->cont[i * mat->m + j]);
printf("\n");
}
}
void multiplicarMatrizes(Matriz *a, Matriz *b, Matriz *c)
{
int aux;
for(int i = 0; i < a->n; i++)
for(int j = 0; j < b->n; j++)
{
aux = i * c->m + j;
c->cont[aux] = 0;
for(int k = 0; k < b->m; k++)
c->cont[aux] += a->cont[i * a->m + k] * b->cont[j * b->m + k];
}
}
Matriz *lerMatriz(char *nome, int n, int m, short int trans)
{
Matriz *mat = NULL;
FILE *f = fopen(nome, "r");
if(trans)
{
mat = criarMatriz(m, n);
for(int i = 0; i < n; i++)
for(int j = 0; j < m; j++)
fscanf(f, " %d", &(mat->cont[j * n + i]));
}
else
{
mat = criarMatriz(n, m);
for(int i = 0; i < n; i++)
for(int j = 0; j < m; j++)
fscanf(f, " %d", &(mat->cont[i * m + j]));
}
fclose(f);
return mat;
}
void salvarMatriz(Matriz *mat, short int trans)
{
static int i = 0;
char nome[100];
if(trans) sprintf(nome, "%d-%dx%d.txt", i, mat->m, mat->n);
else sprintf(nome, "%d-%dx%d.txt", i, mat->n, mat->m);
FILE *f = fopen(nome, "w");
if(trans)
for(int i = 0; i < mat->m; i++)
{
for(int j = 0; j < mat->n; j++)
fprintf(f, "%d ", mat->cont[j * mat->m + i]);
fprintf(f, "\n");
}
else
for(int i = 0; i < mat->n; i++)
{
for(int j = 0; j < mat->m; j++)
fprintf(f, "%d ", mat->cont[i * mat->m + j]);
fprintf(f, "\n");
}
fclose(f);
i++;
}
struct _input
{
Matriz *a;
Matriz *b;
Matriz *c;
short int salvar;
}; typedef struct _input Input;
Input *lerInput(int argc, char **argv)
{
if(argc >= 6)
{
Input *i = (Input *) malloc(sizeof(Input));
i->salvar = 0;
int n1, m1, n2, m2;
char op;
op = argv[1][0];
sscanf(argv[2], " %d", &n1);
sscanf(argv[3], " %d", &m1);
sscanf(argv[4], " %d", &n2);
sscanf(argv[5], " %d", &m2);
if(m1 == n2)
{
Matriz *a, *b, *c;
switch(op)
{
case 'g':
srand(time(NULL));
a = gerarMatriz(n1, m1);
b = gerarMatriz(m2, n2); //INVERTIDOS PARA A TRANSPOSTA
if(argc == 7 && argv[6][0] == 's')
i->salvar = 1;
break;
case 'f':
a = lerMatriz(argv[6], n1, m1, NTRANS);
b = lerMatriz(argv[7], n2, m2, TRANS);
break;
default:
return 0;
}
c = criarMatriz(n1, m2);
i->a = a;
i->b = b;
i->c = c;
return i;
}
else
printf("Matrizes Incompativeis!\n");
}
else
printf("Argumentos invalidos!\n");
return NULL;
}
double medirTempoExecMul(Input *i)
{
clock_t tempo = clock();
multiplicarMatrizes(i->a, i->b, i->c);
tempo = clock() - tempo;
return ((double) tempo / CLOCKS_PER_SEC);
}
void salvarELiberarMatrizes(Input *i)
{
if(i->salvar)
{
salvarMatriz(i->a, NTRANS);
salvarMatriz(i->b, TRANS);
}
salvarMatriz(i->c, NTRANS);
liberarMatriz(i->a);
liberarMatriz(i->b);
liberarMatriz(i->c);
free(i);
}
int main(int argc, char ** argv)
{
clock_t tempo = clock();
Input *i = lerInput(argc, argv);
printf("Tempo de criacao: %lf\n", (((double) clock() - tempo) / CLOCKS_PER_SEC));
printf("Tempo de execucao: %lf\n", medirTempoExecMul(i));
salvarELiberarMatrizes(i);
return 0;
}
|
21,010 | #include <iostream>
#include "memory.h"
struct aAnimal
{
virtual void speak()const = 0;
virtual aAnimal* clone() const=0;
virtual ~aAnimal(){
std::cout << "Animal Destructor\n";
}
};
//Yes pure virtual functions can have a definition
void aAnimal::speak()const{ std::cout << "I am ";}
struct Mouse: public aAnimal
{
virtual void speak()const{
aAnimal::speak();
std::cout << " a mouse!\n";
}
virtual Mouse* clone()const{
std::cout << "Mouse is cloned!\n";
return new Mouse(*this);}
};
struct Cat : public aAnimal
{
virtual void speak()const {
aAnimal::speak();
std::cout << " a cat!\n";
}
virtual Cat* clone()const{
std::cout << "Cat is cloned!\n";
return new Cat(*this);}
};
int main()
{
{
std::cout << "Test correct behaviour of handle: cat and mouse\n";
dg::ClonePtr<aAnimal> h0, h1(new Mouse()); //default and pointer constructor
dg::ClonePtr<aAnimal> h2(*h1); //reference constructor
dg::ClonePtr<aAnimal> h3(h0); // copy an empty object
aAnimal* ptr = new Cat();
h0.reset(ptr); //pointer reset
h1.reset( *h2); //reference reset
*h1=*h2;//reference test
using std::swap;
swap(h1,h0); //swap test
h0->speak();
h1->speak();
h2->speak();
h1 = h0;
h1->speak();
h0.reset(nullptr);
{
Cat cat;
h0 = cat;
}
std::cout<< "Are you a cat?\n";
h0->speak();
}
{
std::cout << "Test correct behaviour of buffer class with mouse\n";
dg::Buffer<Mouse> buffer;
buffer.data().speak();
dg::Buffer<Mouse> buffer2 = buffer;
buffer2.data().speak();
std::swap( buffer, buffer2);
}
return 0;
}
|
21,011 | #include "includes.h"
__global__ void kCorrectPreds(float* mat, float* p, float* target, unsigned int len, float cutoff) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
target[i] = mat[i] * (p[i] >= cutoff) + (1 - mat[i]) * (p[i] < cutoff);
}
} |
21,012 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void process_kernel1(float *input1, float *input2, float *output, int datasize){
int blockNum = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x * blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int i = blockNum * (blockDim.x * blockDim.y * blockDim.z) + threadNum;
if (i < datasize){
output[i]=sin(input1[i])+cos(input2[i]);
}
}
__global__ void process_kernel2(float *input, float *output, int datasize){
int blockNum = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x * blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int i = blockNum * (blockDim.x * blockDim.y * blockDim.z) + threadNum;
if (i < datasize){
output[i]=log(input[i]);
}
}
__global__ void process_kernel3(float *input, float *output, int datasize){
int blockNum = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x;
int threadNum = threadIdx.z * (blockDim.x * blockDim.y) + threadIdx.y * (blockDim.x) + threadIdx.x;
int i = blockNum * (blockDim.x * blockDim.y * blockDim.z) + threadNum;
if (i < datasize){
output[i]=sqrt(input[i]);
}
}
int main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
int numElements = 0;
scanf("%d",&numElements);
size_t size = numElements * sizeof(float);
float *h_input1 = (float *)malloc(size);
float *h_input2 = (float *)malloc(size);
float *h_output = (float *)malloc(size);
// Verify that allocations succeeded
if (h_input1 == NULL || h_input2 == NULL || h_output == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
//Trying for random values
/*for (int i = 0; i < numElements; ++i)
{
h_input1[i] = rand()/(float)RAND_MAX;
h_input2[i] = rand()/(float)RAND_MAX;
}*/
//taking inputs
printf("Enter input1 elements: \n");
for (int i = 0; i < numElements; ++i)
scanf("%f", &h_input1[i]);
printf("Enter input2 elements: \n");
for (int i = 0; i < numElements; ++i)
scanf("%f", &h_input2[i]);
float *d_input1 = NULL;
err = cudaMalloc((void **)&d_input1, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_input2 = NULL;
err = cudaMalloc((void **)&d_input2, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_output1 = NULL;
err = cudaMalloc((void **)&d_output1, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_output2 = NULL;
err = cudaMalloc((void **)&d_output2, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_output3 = NULL;
err = cudaMalloc((void **)&d_output3, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors input1 and input2 in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_input1, h_input1, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector input1 from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_input2, h_input2, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector input2 from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
dim3 threadsPerBlock1(32,32,1);
dim3 blocksPerGrid1(4,2,2);
process_kernel1<<<blocksPerGrid1, threadsPerBlock1>>>(d_input1, d_input2, d_output1, numElements);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch process_kernel1 kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
dim3 threadsPerBlock2(8,8,16);
dim3 blocksPerGrid2(2,8,1);
process_kernel2<<<blocksPerGrid2, threadsPerBlock2>>>(d_output1, d_output2, numElements);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch process_kernel2 kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
dim3 threadsPerBlock3(128,8,1);
dim3 blocksPerGrid3(16,1,1);
process_kernel3<<<blocksPerGrid3, threadsPerBlock3>>>(d_output2, d_output3, numElements);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch process_kernel3 kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_output, d_output3, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector output from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
printf("%.2f ",h_output[i]);
}
// Free device global memory
err = cudaFree(d_input1);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector input1 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_input2);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector input2 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_output1);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector output1 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_output2);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector output2 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_output3);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector output3 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_input1);
free(h_input2);
free(h_output);
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
return 0;
} |
21,013 | #include "includes.h"
__global__ void kernel_1024_one_256(float *A, float *B, float *bnBias, float *bnScale, float *C) {
int tile = blockIdx.x, in_channel = threadIdx.x, line = threadIdx.y;
int ind = line*256 + in_channel;
extern __shared__ float shared_[];
float *weights = shared_ + 1024*4, *output = weights + 256*16, *input = shared_;
float *bias = output + 4*256, *scale = bias + 256;
for (int i = 0; i < 4; i++)
input[ind + i*1024] = A[tile*4096 + i*1024 + ind];
bias[in_channel] = bnBias[in_channel];
scale[in_channel] = bnScale[in_channel];
output[ind] = 0.0f;
__syncthreads();
for (int k = 0; k < 1024; k += 16) {
float *B_start = B + k*256;
for (int i = 0; i < 4; i++)
weights[ind + i*1024] = B_start[i*1024 + ind];
__syncthreads();
float *A_start = input + k;
for (int p = 0; p < 16; p++) {
output[ind] += A_start[line*1024 + p] * weights[in_channel + p*256];
}
__syncthreads();
}
float *C_start = C + tile*1024, res = scale[in_channel] * output[ind] + bias[in_channel];
C_start[ind] = res > 0 ? res : 0;
} |
21,014 | #include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
int WIDTH = 400;
int HEIGHT = 300;
__device__ double translatex(int x)
{
return x/100.0 - 2.0;
}
__device__ double translatey(int y)
{
return y/100.0 - 1.5;
}
__global__ void mandel(int* gpu_t)
{
int tmax = 100;
double x = translatex(blockIdx.x);
double y = translatey(blockIdx.y);
double a = 0.0;
double b = 0.0;
int i;
for(i=0; i<tmax; i++)
{
double anew = a*a - b*b + x;
double bnew = 2*a*b + y;
a = anew;
b = bnew;
if(a*a + b*b > 4.0)
break;
}
*(gpu_t + 400*blockIdx.x + blockIdx.y) = i+1;
}
int main(int argc, char const *argv[])
{
int *gpu_t;
cudaMalloc((void**)&gpu_t, HEIGHT*WIDTH*sizeof(int));
int cpu_t[WIDTH][HEIGHT];
dim3 grid(HEIGHT, WIDTH);
mandel<<<grid, 1>>>(gpu_t);
cudaMemcpy(cpu_t, gpu_t, HEIGHT*WIDTH*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(gpu_t);
int x;
int y;
for(x = 0; x<WIDTH; x++)
{
for(y = 0;y< HEIGHT; y++)
{
printf("%d\n", cpu_t[x][y]);
}
}
}
|
21,015 | /**********************************************************************\
* Author: Jose A. Iglesias-Guitian *
* C/C++ code *
* Introduction to CUDA *
/**********************************************************************/
// Instructions: How to compile this program.
// nvcc 3_add_parallel_threads.cu -L /usr/local/cuda/lib -lcudart -o 3_add_parallel_threads
// Multiple blocks, one thread each
#include <stdio.h>
__global__ void add(int *a, int *b, int *c, int N) {
int id = blockIdx.x*blockDim.x + threadIdx.x;
if( id < N ){
c[id] = a[id] + b[id];
}
}
int main() {
// Vector size
int N = 100000;
// Host vectors
int *a, *b;
int *c; // output vector
// Device vectors
int *d_a, *d_b;
int *d_c; // device copies
// Size in bytes of each vector
size_t size = N*sizeof(int);
// Allocate host memory
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
// Allocate device memory
cudaMalloc((void **) &d_a, size);
cudaMalloc((void **) &d_b, size);
cudaMalloc((void **) &d_c, size);
// Initialize host vectors
for( int i = 0; i < N; i++) {
a[i] = i;
b[i] = -(i-1);
}
// Copy host input vectors to device
cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, size, cudaMemcpyHostToDevice );
// Number of thread per block
int threadCount = 128;
// Number of blocks per grid
int blockCount = (int)ceil((float)N/threadCount);
// Launch add() on GPU
add<<<blockCount,threadCount>>>(d_a, d_b, d_c, N);
// Copy result to host
cudaMemcpy( c, d_c, size, cudaMemcpyDeviceToHost);
// Results should sum up to N
int sum = 0;
for (int i = 0; i < N; i++) {
if (i < 5) {
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
sum += c[i];
}
printf("...\n");
printf("Should be %d\nResults: %d\n", N,sum);
// Cleanup host
free(a);
free(b);
free(c);
// Cleanup device
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
21,016 | #include <cuda_runtime.h>
#include <stdio.h>
#include <iostream>
using namespace std;
/* Mirror operations */
__global__
void mirror(uchar4* inputChannel, uchar4* outputChannel, int numRows, int numCols, bool vertical)
{
int TotalThread = blockDim.x * gridDim.x;
int stripe = numRows*numCols / TotalThread;
int col = (blockIdx.x * blockDim.x + threadIdx.x) * stripe;
int LoopLim = col + stripe;
for(int i=col ; i<LoopLim && i<numRows*numCols; i++)
{
unsigned char Y = 0.299f * inputChannel[i].x + 0.587 * inputChannel[i].y + 0.114 * inputChannel[i].z;
if(vertical)
outputChannel[i/numCols*numCols+(numCols-i%numCols)-1] = make_uchar4(Y, Y, Y, 255);
else
outputChannel[(numRows- (i/numCols) -1)*numCols +(i%numCols)] = make_uchar4(Y, Y, Y, 255);
}
}
uchar4* mirror_ops(uchar4 *d_inputImageRGBA, size_t numRows, size_t numCols, bool vertical)
{
//Creat Timing Event
cudaEvent_t start, stop;
cudaEventCreate (&start);
cudaEventCreate (&stop);
//Set reasonable block size (i.e., number of threads per block)
dim3 blockSize(9);
//Calculate Grid SIze
dim3 gridSize(6);
//Calculate number of pixels
size_t numPixels = numRows * numCols;
//Allocate Memory Space on Device for output image
uchar4 *d_outputImageRGBA;
cudaMalloc(&d_outputImageRGBA, sizeof(uchar4) * numPixels);
//start Timer
cudaEventRecord(start, 0);
//Call mirror kernel.
mirror<<<gridSize, blockSize>>>(d_inputImageRGBA, d_outputImageRGBA, numRows, numCols, vertical);
//Stop Timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaDeviceSynchronize();
//Initialize memory on host for output uchar4*
uchar4* h_out;
h_out = (uchar4*)malloc(sizeof(uchar4) * numPixels);
//copy output from device to host
cudaMemcpy(h_out, d_outputImageRGBA, sizeof(uchar4) * numPixels, cudaMemcpyDeviceToHost);
//Cleanup memory on device
cudaFree(d_inputImageRGBA);
cudaFree(d_outputImageRGBA);
//Calculate Elapsed Time
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time = %5.2f ms\n", elapsedTime);
//return h_out
return h_out;
}
|
21,017 | /**
* Main CUDA file for running parallel cellular automaton.
* @author Logan Apple
* @date 5/15/2020
*/
#include "gol.cuh"
// What if I just passed the grid instead?
__host__ __device__ uint8_t count_neighbors(int x, int y,
int width, int height,
uint8_t* cells) {
uint8_t alive = 0;
if (x < 0 || x >= width || y < 0 || y >= height) {
return 0;
}
for (int i = y - 1; i <= y + 1; ++i) {
for (int j = x - 1; j <= x + 1; ++j) {
if (i != y || j != x) {
if (i >= 0 && i < height && j >= 0 && j < width) {
alive += cells[i * width + j];
}
}
}
}
return alive;
}
__global__ void naive_update_kernel(int width, int height,
uint8_t* cells, uint8_t* updated_cells) {
const int num_threads_x = blockDim.x * gridDim.x;
const int num_threads_y = blockDim.y * gridDim.y;
// Thread indices.
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
int tidy = blockIdx.y * blockDim.y + threadIdx.y;
for (; tidy < height; tidy += num_threads_y) {
for (; tidx < width; tidx += num_threads_x) {
uint8_t neighbors = count_neighbors(tidx, tidy, width, height, cells);
// Any live cell with two or three neighbors survives.
if (cells[tidy * width + tidx] == 1 &&
(neighbors == 2 || neighbors == 3)) {
updated_cells[tidy * width + tidx] = 1;
}
// Any dead cell with three live neighbors comes to life.
else if (cells[tidy * width + tidx] == 0 && neighbors == 3) {
updated_cells[tidy * width + tidx] = 1;
}
// Any other cells die.
else {
updated_cells[tidy * width + tidx] = 0;
}
}
}
}
__global__ void optimized_update_kernel(int width, int height,
uint8_t* cells, uint8_t* updated_cells) {
extern __shared__ uint8_t shmem[];
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
int tidy = blockIdx.y * blockDim.y + threadIdx.y;
int i = threadIdx.y;
int j = threadIdx.x;
if (tidx >= 0 && tidx < width && tidy >= 0 && tidy < height) {
shmem[i * width + j] = cells[tidy * width + tidx];
}
__syncthreads();
if (tidx >= 0 && tidx < width && tidy >= 0 && tidy < height) {
uint8_t neighbors = 0;
// Take advantage of loop unrolling to make this faster.
#pragma unroll
for (int x = -1; x <= 1; ++x) {
#pragma unroll
for (int y = -1; y <= 1; ++y) {
int y2 = i + y;
int x2 = j + x;
if (x != 0 || y != 0) {
if (y2 >= 0 && y2 < height &&
x2 >= 0 && x2 < width) {
neighbors += shmem[y2 * width + x2];
}
}
}
}
// Any live cell with two or three neighbors survives.
if ((neighbors == 2 || neighbors == 3) && shmem[i * width + j] == 1) {
updated_cells[tidy * width + tidx] = 1;
}
// Any dead cell with three live neighbors comes to life.
else if (neighbors == 3 && shmem[i * width + j] == 0) {
updated_cells[tidy * width + tidx] = 1;
}
// Any other cells die.
else {
updated_cells[tidy * width + tidx] = 0;
}
}
}
__global__ void optimized_update_kernel_bitwise(int width, int height,
uint8_t* cells, uint8_t* updated_cells) {
const int num_threads_x = blockDim.x * gridDim.x;
const int num_threads_y = blockDim.y * gridDim.y;
// Thread indices.
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
int tidy = blockIdx.y * blockDim.y + threadIdx.y;
for (; tidy < height; tidy += num_threads_y) {
for (; tidx < width; tidx += num_threads_x) {
for (int k = 0; k < 8; ++k) {
uint8_t current = (cells[tidy * width + tidx] & (1 << k)) >> k;
uint8_t top_left = 0,
top_mid = 0,
top_right = 0,
mid_left = 0,
mid_right = 0,
bot_left = 0,
bot_mid = 0,
bot_right = 0;
// If there's a top-left relative to the current position.
if (tidy > 0) {
// If k is 0, then we need the previous set of 8 cells, else
// we can just use the previous bit in the current set.
if (k == 0 && tidx > 0) {
top_left = (cells[(tidy - 1) * width + (tidx - 1)] &
(1 << 7)) >> 7;
}
else {
top_left = (cells[(tidy - 1) * width + tidx] &
(1 << (k - 1))) >> (k - 1);
}
}
// If there's a top relative to the current position.
if (tidy > 0) {
top_mid = (cells[(tidy - 1) * width + tidx] &
(1 << k)) >> k;
}
// If there's a top-right relative to the current position.
if (tidy > 0) {
// If k is 7, then we need the next set of 8 cells, else
// we can just use the next bit in the current set.
if (tidx < width - 1 && k == 7) {
top_right = (cells[(tidy - 1) * width + (tidx + 1)] &
(1 << 0)) >> 0;
}
else {
top_right = (cells[(tidy - 1) * width + tidx] &
(1 << (k + 1))) >> (k + 1);
}
}
// If there's a left relative to the current position.
if (tidx > 0 && k == 0) {
// If k is 0, then we need the previous set of 8 cells, else
// we can just use the previous bit in the current set.
mid_left = (cells[tidy * width + (tidx - 1)] &
(1 << 7)) >> 7;
}
else {
mid_left = (cells[tidy * width + tidx] &
(1 << (k - 1))) >> (k - 1);
}
// If there's a right relative to the current position.
if (k == 7 && tidx < width - 1) {
// If k is 7, then we need the next set of 8 cells, else
// we can just use the next bit in the current set.
mid_right = (cells[tidy * width + (tidx + 1)]
& (1 << 0)) >> 0;
}
else {
mid_right = (cells[tidy * width + tidx] &
(1 << (k + 1))) >> (k + 1);
}
// If there's a bottom-left relative to the current position.
if (tidy < height - 1) {
// If k is 0, then we need the previous set of 8 cells, else
// we can just use the previous bit in the current set.
if (k == 0 && tidx > 0) {
bot_left = (cells[(tidy + 1) * width + (tidx - 1)] &
(1 << 7)) >> 7;
}
else {
bot_left = (cells[(tidy + 1) * width + tidx] &
(1 << (k - 1))) >> (k - 1);
}
}
// If there's a bottom relative to the current position.
if (tidy < height - 1) {
bot_mid = (cells[(tidy + 1) * width + tidx] &
(1 << k)) >> k;
}
// If there's a bottom-right relative to the current position.
if (tidy < height - 1) {
// If k is 7, then we need the next set of 8 cells, else
// we can just use the next bit in the current set.
if (k == 7 && tidx < width - 1) {
bot_right = (cells[(tidy + 1) * width + (tidx + 1)] &
(1 << 0)) >> 0;
}
else {
bot_right = (cells[(tidy + 1) * width + tidx] &
(1 << (k + 1))) >> (k + 1);
}
}
uint8_t neighbors = top_left + top_mid + top_right +
mid_left + mid_right +
bot_left + bot_mid + bot_right;
// Any live cell with two or three neighbors survives.
if ((neighbors == 2 || neighbors == 3) && current == 1) {
updated_cells[tidy * width + tidx] |= 1 << k;
}
// Any dead cell with three live neighbors comes to life.
else if (neighbors == 3 && current == 0) {
updated_cells[tidy * width + tidx] |= 1 << k;
}
}
}
}
}
void call_cuda_gol_update(int num_threads,
int width, int height,
uint8_t* cells, uint8_t* updated_cells,
bool optimized) {
int actual_width = width % 8 == 0 ? width / 8 : width;
int x_blocks = (actual_width + num_threads - 1) / num_threads;
int y_blocks = (height + num_threads - 1) / num_threads;
dim3 block_size(num_threads, num_threads);
dim3 grid_size(x_blocks, y_blocks);
if (optimized) {
if (width % 8 == 0) {
optimized_update_kernel_bitwise<<<grid_size, block_size>>>
(actual_width, height, cells, updated_cells);
}
else {
optimized_update_kernel<<<grid_size, block_size,
(num_threads + 2) * (num_threads + 2) * sizeof(uint8_t)>>>
(width, height, cells, updated_cells);
}
}
else {
naive_update_kernel<<<grid_size, block_size>>>(width, height,
cells, updated_cells);
}
} |
21,018 | #include <iostream>
#include <string>
#include <sstream>
#include <fstream>
#include <algorithm>
#include <chrono>
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <cuda.h>
#include <unistd.h>
//#define _DEBUG_
//#define _TIME_MEASURE_
#ifdef _DEBUG_
#include <string>
#include <sstream>
int __print_step = 0;
void __pt_log(const char *h_, const char *f_, ...){
std::stringstream ss;
ss << h_ << f_ << '\n';
std::string format = ss.str();
va_list va;
va_start(va, f_);
vprintf(format.c_str(), va);
va_end(va);
__print_step++;
}
#define VA_ARGS(...) , ##__VA_ARGS__
#define LOG(f_, ...) __pt_log(\
"[LOG] Step %3d: ", (f_), \
__print_step VA_ARGS(__VA_ARGS__))
#else
#define LOG(f_, ...)
#endif
#define INF 1000000000
#define CEIL(a, b) (( (a) - 1 ) / (b) + 1 )
int **Dist;
int *data;
int block_size;
int vert, edge;
int vert2;
inline void init(){
vert2 = vert*vert;
Dist = new int*[vert];
data = new int[vert2];
std::fill(data, data + vert2, INF);
for(int i=0;i<vert;++i){
Dist[i] = data + i*vert;
Dist[i][i] = 0;
}
if(vert < block_size){
block_size = vert;
}
}
inline void finalize(){
delete[] Dist;
delete[] data;
}
void dump_from_file_and_init(const char *file){
std::ifstream fin(file);
std::stringstream ss;
ss << fin.rdbuf();
ss >> vert >> edge;
LOG("vert: %d, edge: %d", vert, edge);
init();
int i, j, w;
while(--edge >=0){
ss >> i >> j >> w;
Dist[i][j] = w;
}
fin.close();
}
void dump_to_file(const char *file){
std::ofstream fout(file);
fout.write((char*)data, sizeof(int)*vert2);
fout.close();
}
__global__ void init_gpu(int reps){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= reps) return;
}
__global__ void phase_one(int32_t* const dist, int block_size, int round, int width, int vert){
extern __shared__ int s[];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int c = block_size * round + ty;
const int r = block_size * round + tx;
const int cell = c*width+r;
const int s_cell = ty*block_size+tx;
if(c >= vert || r >= vert){
s[s_cell] = INF;
}else{
s[s_cell] = dist[cell];
}
__syncthreads();
int n, k;
for(k=0;k<block_size;++k){
// min(dist[ty][tx], dist[ty][i] + dist[i][tx])
n = s[ty*block_size+k] + s[k*block_size+tx];
if(n < s[s_cell]){
s[s_cell] = n;
}
__syncthreads();
}
dist[cell] = s[s_cell];
}
__global__ void phase_two(int32_t* const dist, int block_size, int round, int width, int vert){
extern __shared__ int s2[];
int* const s_m = s2; //main(block)
int* const s_c = s2 + block_size*block_size; //center(pivot)
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int mc, mr; //main
int cc, cr; //center(pivot)
if(bx >= round)++bx; //shift
if(by == 0){ //horizontal
mc = block_size * round + ty;
mr = block_size * bx + tx;
cc = mc;
cr = block_size * round + tx;
}else{ //vertical
mc = block_size * bx + ty;
mr = block_size * round + tx;
cc = block_size * round + ty;
cr = mr;
}
int m_cell = mc * width + mr;
int c_cell = cc * width + cr;
int s_cell = ty * block_size + tx;
if(mc >= vert || mr >= vert) s_m[s_cell] = INF;
else s_m[s_cell] = dist[m_cell];
if(cc >= vert || cr >= vert) s_c[s_cell] = INF;
else s_c[s_cell] = dist[c_cell];
__syncthreads();
int n, k;
if(by == 0){
for(k=0;k<block_size;++k){
n = s_c[ty*block_size+k] + s_m[k*block_size+tx];
if(n < s_m[s_cell]){
s_m[s_cell] = n;
}
__syncthreads();
}
}else{
for(k=0;k<block_size;++k){
n = s_m[ty*block_size+k] + s_c[k*block_size+tx];
if(n < s_m[s_cell]){
s_m[s_cell] = n;
}
__syncthreads();
}
}
dist[m_cell] = s_m[s_cell];
}
__global__ void phase_three(int32_t* const dist, int block_size, int round, int width, int vert){
int bs2 = block_size*block_size;
extern __shared__ int s3[];
int* const s_m = s3;
int* const s_l = s3 + bs2;
int* const s_r = s_l + bs2;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
if(bx >= round)++bx; //shift x
if(by >= round)++by; //shift y
const int mc = block_size * by + ty;
const int mr = block_size * bx + tx;
const int lc = mc;
const int lr = block_size * round + tx;
const int rc = block_size * round + ty;
const int rr = mr;
const int m_cell = mc*width + mr;
const int l_cell = lc*width + lr;
const int r_cell = rc*width + rr;
const int s_cell = ty*block_size + tx;
if(mc >= vert || mr >= vert) s_m[s_cell] = INF;
else s_m[s_cell] = dist[m_cell];
if(lc >= vert || lr >= vert) s_l[s_cell] = INF;
else s_l[s_cell] = dist[l_cell];
if(rc >= vert || rr >= vert) s_r[s_cell] = INF;
else s_r[s_cell] = dist[r_cell];
__syncthreads();
int n, k;
for(k=0;k<block_size;++k){
n = s_l[ty*block_size+k] + s_r[k*block_size+tx];
if(n<s_m[s_cell]){
s_m[s_cell] = n;
}
__syncthreads();
}
dist[m_cell] = s_m[s_cell];
}
extern __shared__ int S[];
void block_FW(){
#ifdef _TIME_MEASURE_
auto start = std::chrono::high_resolution_clock::now();
#endif
cudaStream_t init_stream;
cudaStreamCreate(&init_stream);
int Round = CEIL(vert, block_size);
int padded_size = Round * block_size;
size_t vert_w_bytes = vert * sizeof(int);
size_t padded_w_bytes = padded_size * sizeof(int);
int32_t *device_ptr;
//size_t pitch;
dim3 p2b(Round-1, 2, 1); //phase 2 block
dim3 p3b(Round-1, Round-1, 1); //phase 3 block
dim3 dimt(block_size, block_size, 1); //thread
//cudaMallocPitch(&device_ptr, &pitch, vert_byte, vert_byte, vert);
cudaMalloc(&device_ptr, padded_w_bytes * padded_size);
//size_t pitch_int = pitch / sizeof(int);
//LOG("pitch => %zu bytes (%zu words)", pitch, pitch_int);
LOG("the number of blocks: %d", Round);
init_gpu<<< 1, dimt >>>(32);
//dst_ptr, dst_pitch, src, src_pitch, w, h, kind
cudaMemcpy2DAsync(device_ptr, padded_w_bytes, data, vert_w_bytes,
vert_w_bytes, vert, cudaMemcpyHostToDevice, init_stream);
size_t bs2b3 = block_size * block_size * sizeof(int) * 3;
cudaDeviceSynchronize();
for(int r=0; r < Round; ++r){
LOG("Round %d/%d", r+1, Round);
phase_one<<< 1 , dimt , bs2b3 >>>(device_ptr, block_size, r, padded_size, vert);
phase_two<<< p2b , dimt , bs2b3 >>>(device_ptr, block_size, r, padded_size, vert);
phase_three<<< p3b , dimt , bs2b3 >>>(device_ptr, block_size, r, padded_size, vert);
}
cudaMemcpy2D(data, vert_w_bytes, device_ptr, padded_w_bytes,
vert_w_bytes, vert, cudaMemcpyDeviceToHost);
cudaFree(device_ptr);
cudaStreamDestroy(init_stream);
#ifdef _TIME_MEASURE_
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff = end - start;
double elapsed_time = diff.count() * 1000;
printf("Total time: %f ms (%f GFLOPS)\n", elapsed_time, 2*vert*vert*vert / (elapsed_time * 1e6));
#endif
}
int main(int argc, char **argv){
dump_from_file_and_init(argv[1]);
block_size = std::atoi(argv[3]);
block_FW();
dump_to_file(argv[2]);
finalize();
return 0;
}
|
21,019 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
#include <curand.h>
#include <curand_kernel.h>
#define NUM_THREADS 1000
#define NUM_BLOCKS 100
#define HH 1e7
typedef struct {
int width;
int height;
double* elements;
} Matrix;
extern "C" void fkpaths(double *domain, Matrix SITES, Matrix OXY, Matrix UV, double *KXY, Matrix FKSOL);
#define CUDA_CALL(x) do { if ((x) != cudaSuccess) { \
printf("Error at %s : %d \n",__FILE__, __LINE__);\
return EXIT_FAILURE;}} while(0)
#define CURAND_CALL(x) do { if ((x) != CURAND_STATUS_SUCCESS) { \
printf("Error at %s : %d\n",__FILE__, __LINE__);\
return EXIT_FAILURE;}} while(0)
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//###################################################################
__global__ void setup_kernel(curandState *state)
{
int bid=blockIdx.x;
int tid=threadIdx.x;
int NPATHS = blockDim.x;
int thread=bid*NPATHS+tid;
// each thread gets same seed, different seq number, no offset
curand_init(1234,thread,0,&state[thread]);
}
//##################################################################
__global__ void gpu_fkpaths(curandState *state, double *dev_domain, Matrix dev_OXY, Matrix dev_UV, double *dev_KXY, Matrix dev_SITES, Matrix dev_FKSOL){
int bid = blockIdx.x;
int tid = threadIdx.x;
int NPATHS = blockDim.x;
int thread = bid*NPATHS + tid;
// copy state to local memory for efficiency;
curandState localState=state[thread];
int M,N;
M = dev_OXY.height;
N = dev_OXY.width;
double xc,yc; //x,y current
double xn,yn; //x,y new
double tau=0.0;
double delx;
delx=(dev_domain[1]-dev_domain[0])/(double)(N-1);
double dely;
dely=(dev_domain[3]-dev_domain[2])/(double)(M-1);
xc = dev_SITES.elements[bid*dev_SITES.width + 0];
yc = dev_SITES.elements[bid*dev_SITES.width + 1];
int i,j;
double uc,vc;
tau = 0.0;
while( ((xc - dev_domain[0])*(xc - dev_domain[1])<0) && ((yc - dev_domain[2])*(yc - dev_domain[3])<0) ){
//find uvindx
j = ceil( (xc - dev_domain[0])/delx );
i = ceil( (yc - dev_domain[2])/dely );
if (j<0){
j=0;
}
else{
if (j>(N-1)) j=N-1;
}
if (i<0){
i=0;
}
else{
if (i>(M-1)) i=M-1;
}
uc=-dev_UV.elements[i*dev_UV.width+j];
vc=-dev_UV.elements[i*dev_UV.width+N+j];
xn = xc + HH * uc + sqrt(HH)*sqrt(2*dev_KXY[0])*curand_normal(&localState);
yn = yc + HH * vc + sqrt(HH)*sqrt(2*dev_KXY[1])*curand_normal(&localState);
xc=xn;
yc=yn;
tau = tau + HH;
}
double lam = 1e-11;
int II,JJ;
JJ = ceil( (xc - dev_domain[0])/delx );
II = ceil( (yc - dev_domain[2])/dely );
if (JJ<0){
JJ=0;
}
else{
if (JJ>(N-1)) JJ=N-1;
}
if (II<0){
II=0;
}
else{
if (II>(M-1)) II=M-1;
}
dev_FKSOL.elements[bid * dev_FKSOL.width + tid] = dev_OXY.elements[II * dev_OXY.width + JJ] * exp(-lam*tau);
//dev_FKSOL.elements[bid * dev_FKSOL.width + tid] = (double)bid;
// copy state back to global memory
state[thread]=localState;
}
void fkpaths(double *domain, Matrix SITES, Matrix OXY, Matrix UV, double *KXY, Matrix FKSOL){
int NOBS;
int NPATHS;
NOBS = FKSOL.height;
NPATHS = FKSOL.width;
double *dev_domain;
gpuErrchk( cudaMalloc( (void **)&dev_domain, 4*sizeof(double)) );
gpuErrchk( cudaMemcpy(dev_domain, domain, 4*sizeof(double), cudaMemcpyHostToDevice) );
Matrix dev_OXY;
dev_OXY.height=OXY.height;
dev_OXY.width=OXY.width;
gpuErrchk( cudaMalloc( (void **)&dev_OXY.elements, dev_OXY.height*dev_OXY.width*sizeof(double)) );
gpuErrchk( cudaMemcpy(dev_OXY.elements, OXY.elements, dev_OXY.height*dev_OXY.width*sizeof(double), cudaMemcpyHostToDevice) );
Matrix dev_UV;
dev_UV.height=UV.height;
dev_UV.width=UV.width;
gpuErrchk( cudaMalloc( (void **)&dev_UV.elements, dev_UV.height*dev_UV.width*sizeof(double)) );
gpuErrchk( cudaMemcpy(dev_UV.elements, UV.elements, dev_UV.height*dev_UV.width*sizeof(double), cudaMemcpyHostToDevice) );
double *dev_KXY;
gpuErrchk( cudaMalloc( (void **)&dev_KXY, 2*sizeof(double)) );
gpuErrchk( cudaMemcpy(dev_KXY, KXY, 2*sizeof(double), cudaMemcpyHostToDevice) );
Matrix dev_SITES;
dev_SITES.height = SITES.height;
dev_SITES.width = SITES.width;
gpuErrchk( cudaMalloc( (void **)&dev_SITES.elements, dev_SITES.height*dev_SITES.width*sizeof(double) ) );
gpuErrchk( cudaMemcpy(dev_SITES.elements, SITES.elements, dev_SITES.height * dev_SITES.width * sizeof(double), cudaMemcpyHostToDevice) );
Matrix dev_FKSOL;
dev_FKSOL.height=NOBS;
dev_FKSOL.width=NPATHS;
gpuErrchk( cudaMalloc( (void **)&dev_FKSOL.elements, dev_FKSOL.height*dev_FKSOL.width*sizeof(double)) );
gpuErrchk( cudaPeekAtLastError() );
curandState *devStates;
gpuErrchk ( cudaMalloc( (void **)&devStates, NPATHS*sizeof(curandState)) );
setup_kernel<<<NOBS, NPATHS>>>(devStates);
gpuErrchk( cudaPeekAtLastError() );
gpu_fkpaths<<<NOBS,NPATHS>>>(devStates, dev_domain, dev_OXY, dev_UV, dev_KXY, dev_SITES, dev_FKSOL);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaMemcpy(FKSOL.elements, dev_FKSOL.elements, FKSOL.height * FKSOL.width * sizeof(double), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaPeekAtLastError() );
//gpuErrchk( cudaDeviceSynchronize() );
//printf("test = %f\n",FKSOL.elements[3]);
//free
cudaFree(dev_domain);
cudaFree(dev_OXY.elements);
cudaFree(dev_UV.elements);
cudaFree(dev_KXY);
cudaFree(dev_SITES.elements);
cudaFree(dev_FKSOL.elements);
//printf("%s\n", "done.");
}
|
21,020 | #include "includes.h"
__global__ void BaseNeuronGetIntArray(int *arr1, int *arr2, int n_elem, int step1, int step2)
{
int array_idx = threadIdx.x + blockIdx.x * blockDim.x;
if (array_idx<n_elem) {
arr2[array_idx*step2] = arr1[array_idx*step1];
}
} |
21,021 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
__global__ void conv1d(int *input, int *kernel, int *output, int l, int k) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int r = k / 2;
int start = tid - r;
int temp = 0;
for (int j = 0; j < k; j++) {
if ((start + j >= 0) && (start + j < l)) {
temp += input[start + j] * kernel[j];
}
}
output[tid] = temp;
}
int main() {
int l = 20480;
int k = 7;
int i;
int *input, *kernel, *output;
int *dev_input, *dev_kernel, *dev_output;
cudaMalloc((void**)&dev_input, sizeof(int) * l);
cudaMalloc((void**)&dev_kernel, sizeof(int) * k);
cudaMalloc((void**)&dev_output, sizeof(int) * l);
cudaMallocHost((void**)&input, sizeof(int) * l);
cudaMallocHost((void**)&kernel, sizeof(int) * k);
cudaMallocHost((void**)&output, sizeof(int) * l);
for (i = 0; i < l; i++) {
input[i] = round(rand());
}
for (i = 0; i < k; i++) {
kernel[i] = round(rand());
}
printf("Start convolution\n");
clock_t start_time = clock();
cudaMemcpy(dev_input, input, sizeof(int) * l, cudaMemcpyHostToDevice);
cudaMemcpy(dev_kernel, kernel, sizeof(int) * k, cudaMemcpyHostToDevice);
int block = 256;
int grid = (l + block - 1) / block;
conv1d<<<grid, block>>>(input, kernel, output, l, k);
cudaMemcpy(output, dev_output, sizeof(int) * l, cudaMemcpyDeviceToHost);
clock_t end_time = clock();
printf("Time consuming of 1D convolution of %d array with %d kernel is %f ms.\n", l, k, static_cast<double>(end_time - start_time)/CLOCKS_PER_SEC*1000);
cudaFree(dev_input);
cudaFree(dev_kernel);
cudaFree(dev_output);
cudaFreeHost(input);
cudaFreeHost(kernel);
cudaFreeHost(output);
return 0;
}
|
21,022 |
//This benchmark measures the maximum read bandwidth of GPU memory
//Compile this file using the following command to disable L1 cache:
// nvcc -Xptxas -dlcm=cg -Xptxas -dscm=wt l2_bw.cu
//This code have been tested on Volta V100 architecture
//You can check the mem BW from the NVPROF (dram_read_throughput+dram_write_throughput)
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define BLOCKS_NUM 160
#define THREADS_NUM 1024 //thread number/block
#define TOTAL_THREADS (BLOCKS_NUM*THREADS_NUM)
#define ARRAY_SIZE 8388608 //Array size has to exceed L2 size to avoid L2 cache residence
#define WARP_SIZE 32
#define L2_SIZE 1572864 //number of floats L2 can store
#define clock_freq_MHZ 1132
// GPU error check
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/*
Four Vector Addition using flost4 types
Send as many as float4 read requests on the flight to increase Row buffer locality of DRAM and hit the max BW
*/
__global__ void mem_bw (float* A, float* B, float* C, float* D, float* E, float* F, uint32_t *startClk, uint32_t *stopClk){
// block and thread index
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// synchronize all threads
asm volatile ("bar.sync 0;");
// start timing
uint32_t start = 0;
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
for(int i = idx; i < ARRAY_SIZE/4; i += blockDim.x * gridDim.x) {
float4 a1 = reinterpret_cast<float4*>(A)[i];
float4 b1 = reinterpret_cast<float4*>(B)[i];
float4 d1 = reinterpret_cast<float4*>(D)[i];
float4 e1 = reinterpret_cast<float4*>(E)[i];
float4 f1 = reinterpret_cast<float4*>(F)[i];
float4 c1;
c1.x = a1.x + b1.x + d1.x + e1.x + f1.x;
c1.y = a1.y + b1.y + d1.y + e1.y + f1.y;
c1.z = a1.z + b1.z + d1.z + e1.z + f1.z;
c1.w = a1.w + b1.w + d1.w + e1.w + f1.w;
reinterpret_cast<float4*>(C)[i] = c1;
}
// synchronize all threads
// synchronize all threads
asm volatile ("bar.sync 0;");
// stop timing
uint32_t stop = 0;
asm volatile("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// write time and data back to memory
startClk[idx] = start;
stopClk[idx] = stop;
}
int main(){
uint32_t *startClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t));
uint32_t *stopClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t));
float *A = (float*) malloc(ARRAY_SIZE*sizeof(float));
float *B = (float*) malloc(ARRAY_SIZE*sizeof(float));
float *C = (float*) malloc(ARRAY_SIZE*sizeof(float));
float *D = (float*) malloc(ARRAY_SIZE*sizeof(float));
float *E = (float*) malloc(ARRAY_SIZE*sizeof(float));
float *F = (float*) malloc(ARRAY_SIZE*sizeof(float));
uint32_t *startClk_g;
uint32_t *stopClk_g;
float *A_g;
float *B_g;
float *C_g;
float *D_g;
float *E_g;
float *F_g;
for (uint32_t i=0; i<ARRAY_SIZE; i++){
A[i] = (float)i;
B[i] = (float)i;
D[i] = (float)i;
E[i] = (float)i;
F[i] = (float)i;
}
gpuErrchk( cudaMalloc(&startClk_g, TOTAL_THREADS*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&stopClk_g, TOTAL_THREADS*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&A_g, ARRAY_SIZE*sizeof(float)) );
gpuErrchk( cudaMalloc(&B_g, ARRAY_SIZE*sizeof(float)) );
gpuErrchk( cudaMalloc(&C_g, ARRAY_SIZE*sizeof(float)) );
gpuErrchk( cudaMalloc(&D_g, ARRAY_SIZE*sizeof(float)) );
gpuErrchk( cudaMalloc(&E_g, ARRAY_SIZE*sizeof(float)) );
gpuErrchk( cudaMalloc(&F_g, ARRAY_SIZE*sizeof(float)) );
gpuErrchk( cudaMemcpy(A_g, A, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(B_g, B, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(D_g, D, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(E_g, E, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(F_g, F, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice) );
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
mem_bw<<<BLOCKS_NUM,THREADS_NUM>>>(A_g, B_g, C_g, D_g, E_g, F_g, startClk_g, stopClk_g);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaMemcpy(startClk, startClk_g, TOTAL_THREADS*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(stopClk, stopClk_g, TOTAL_THREADS*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(C, C_g, ARRAY_SIZE*sizeof(float), cudaMemcpyDeviceToHost) );
float mem_bw;
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
unsigned N = ARRAY_SIZE * 6 * 4; //6 arrays of floats types
mem_bw = (float)(N)/((float)(stopClk[0]-startClk[0]));
printf("Mem BW= %f (Byte/Clk)\n", mem_bw);
printf("Mem BW= %f (GB/sec)\n", (float)N/milliseconds/1e6);
printf("Total Clk number = %u \n", stopClk[0]-startClk[0]);
}
|
21,023 | #include "stdio.h"
#include <cuda_runtime.h>
static const int N=100000;
/*
Saxpy: Z = a * X + Y, where
- all variables are single precision,
- a is a constant
- X is a vector
- Y is a vector
*/
__global__ void saxpy (float a, float *x, float *y, float *z, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
z[i] = a * x[i] + y[i];
}
}
#define CUDA_CHECK(str) \
if (err != cudaSuccess) { \
fprintf(stderr, str); \
fprintf(stderr, "\n (error code %s)\n", cudaGetErrorString(err)); \
exit(-1); \
}
int main (int argc, char **argv) {
float *hostX = (float*) malloc(N*sizeof(float));
float *hostY = (float*) malloc(N*sizeof(float));
for (int i = 0; i < N; i++) {
hostX[i] = i;
hostY[i] = i*i;
}
cudaError_t err = cudaSuccess;
// Allocate device arrays
float *devX, *devY, *devZ;
err = cudaMalloc((void**)&devX, N*sizeof(float));
CUDA_CHECK("Failed to allocate device vector X!");
err = cudaMalloc((void**)&devY, N*sizeof(float));
CUDA_CHECK("Failed to allocate device vector Y!");
err = cudaMalloc((void**)&devZ, N*sizeof(float));
CUDA_CHECK("Failed to allocate device vector Z!");
// Copy host array contents to device
err = cudaMemcpy(devX, hostX, N*sizeof(float), cudaMemcpyHostToDevice);
CUDA_CHECK("Failed to copy vector X from host to device!");
err = cudaMemcpy(devY, hostY, N*sizeof(float), cudaMemcpyHostToDevice);
CUDA_CHECK("Failed to copy vector Y from host to device!");
// Launch saxpy kernel
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
float a = 3.49230;
saxpy<<<blocksPerGrid, threadsPerBlock>>>(a, devX, devY, devZ, N);
err = cudaGetLastError();
CUDA_CHECK("Failed to launch saxpy kernel!");
// Copy result back to host
float *hostZ = (float*) malloc(N*sizeof(float));
err = cudaMemcpy(hostZ, devZ, N*sizeof(float), cudaMemcpyDeviceToHost);
CUDA_CHECK("Failed to copy vector Z from device to host!");
// Verify correctness of result
for (int i = 0; i < N; i++) {
float ex = (a * hostX[i] + hostY[i]);
float err = (hostZ[i] - ex)/ex;
if (err < 0) err = -err;
if (err > 1e-6) {
fprintf(stderr, "Result failed at element %d (dev: %f, host: %f, error: %f)\n", i, hostZ[i], ex, err);
exit(-1);
}
}
// Free all allocated memory
free(hostX); free(hostY); free(hostZ);
cudaFree(devX); cudaFree(devY); cudaFree(devZ);
} |
21,024 | #include "includes.h"
__global__ void count(int *data,int input, int *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(data[i] == input)
{
int a = 1;
atomicAdd(result,a);
}
} |
21,025 | ///////////////////////////////////////////////////
//////////// Multi-Node Kernels ///////////////////
///////////////////////////////////////////////////
__device__ void ComputeParticles_Multi(void* params){
//CUDA Threads
int warp_size = 2;
int tid = threadIdx.x%warp_size;
//Extract all the values.
long int np = *((long int*) params);
long int nd = *(((long int*) params)+1);
int size = np * nd;
double *mass = (((double*) params)+2);
double *pos = mass + 1;
double *vel = pos + size;
double *acc = vel + size;
double *f = acc + size;
double *pe = f + size;
double *ke = pe + size;
int i, j;
double d, d2;
double PI2 = 3.141592653589793 / 2.0;
double rij[3];
//Compute all the potential energy and forces.
//for(k=0; k<np; k++){
while(tid<np){
for(i=0; i<nd; i++){
f[i+tid*nd] = 0.0;
}
for(j=0; j<np; j++){
if(tid == j){ continue; }
d = 0.0;
for(i=0; i<nd; i++){
rij[i] = pos[tid*nd+i] - pos[j*nd+i];
d += pow(rij[i], 2);
}
d = sqrt(d);
d2 = d < PI2? d : PI2;
pe[tid] += 0.5 * pow(sin(d2), 2);
for(i=0; i<nd; i++){
f[i+tid*nd] = f[i+tid*nd] - rij[i] *sin(1.0 * d2) / d;
}
}
tid += warp_size;
}
int tid2 = threadIdx.x%warp_size;
// for(k=0;k<np;k++){
while(tid2<np){
// compute kinetic
for(i=0; i<nd; i++){
ke[tid2] += vel[i+tid2*nd] * vel[i+tid2*nd];
}
ke[tid2] *= 0.5 * (*mass);
tid2 += warp_size;
}
}
__device__ void UpdatePosVelAccel_Multi(void* params){
//Unpack Table
long int np = *((long int*)params);
long int nd = *(((long int*)params) + 1);
int size = np * nd;
int warp_size = 2;
double mass = *(((double*)params) + 2);
double *pos = ((double*)params) + 3;
double *vel = pos + size;
double *acc = vel + size;
double *f = acc + size;
double dt = 1; //changes results
//double dt = .0001;
int i,j;
double rmass = 1.0 / mass;
// int tid2 = threadIdx.x%warp_size;
//O(np*nd)
//Begin computation
for(j=0; j<np; j++){
for ( i = threadIdx.x%warp_size; i < nd; i += warp_size){
pos[i+j*nd] = pos[i+j*nd] + vel[i+j*nd] * dt + 0.5 * acc[i+j*nd] * dt * dt;
vel[i+j*nd] += 0.5 * dt * (f[i+j*nd] * rmass + acc[i+j*nd]);
acc[i+j*nd] = f[i+j*nd] * rmass;
}
}
}
__device__ void MDProxy(void* params){
int i;
int num_steps = 8;
for(i=0; i<num_steps; i++){
ComputeParticles_Multi(params);
UpdatePosVelAccel_Multi(params);
}
}
|
21,026 | #include "includes.h"
__global__ void set_all_zero_kernel(double *ua_gpu, double *ub_gpu, double *uc_gpu)
{
ua_gpu[blockIdx.x * blockDim.x + blockIdx.y] = 0;
ub_gpu[blockIdx.x * blockDim.x + blockIdx.y] = 0;
uc_gpu[blockIdx.x * blockDim.x + blockIdx.y] = 0;
// TODO: sync CPU after this -> move to utils.cu file
} |
21,027 | #include "input.hh"
#include "graph.hh"
#include "../runtime/graph.hh"
#include "../runtime/node.hh"
#include "../memory/alloc.hh"
namespace ops
{
namespace
{
std::size_t unique_id()
{
static std::size_t res = 0;
return res++;
}
}
Input::Input(const Shape& shape)
: Op("input", shape)
, id_(unique_id())
, data_(nullptr)
{}
std::size_t Input::input_id() const
{
return id_;
}
void Input::compile()
{
auto& g = Graph::instance();
auto& inputs = Graph::instance().input_shapes_get();
auto shape = inputs.find(this)->second;
data_ = tensor_alloc(shape.total());
g.add_compiled(this, {}, {data_}, nullptr, shape, data_);
}
}
|
21,028 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define N 5
__global__ void Add(int *a, int*b, int *c){
int i = blockIdx.x;
if(i<N){
c[i] = a[i] + b[i];
}
}
int main(){
int a[N] = {1,2,3,4,5}, b[N] = {5,6,7,8,9};
int c[N];
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void**)&dev_a, N*sizeof(int));
cudaMalloc((void**)&dev_b, N*sizeof(int));
cudaMalloc((void**)&dev_c, N*sizeof(int));
cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice);
Add<<<2,1>>>(dev_a, dev_b, dev_c); // HERE IS THE CRITICAL LINE !!!!!!
cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost);
int i; printf("c[i] = ");
for(i=0;i<N;i++){
printf("%d ", c[i]);
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
printf("\n");
return 0;
} |
21,029 | #include<iostream>
#include<math.h>
#include<stdint.h>
#include<stdlib.h>
#define N 16
#define M 16
__global__
void convolve(uint8_t input[N][M], uint8_t *val, int i, int j)
{
int kernel[25] = { 1, 4, 7, 4, 1,
4,16,26,16, 4,
7,26,41,26, 7,
4,16,26,16, 4,
1, 4, 7, 4, 1 };
int k_pos = 0;
int weight = 0;
int denom = 0;
for (int y = j-2; y<j+3; y++)
{
if (y>=0 && y<M)
{
for (int x = i-2; x<i+3; x++)
{
if (x>=0 && x<N)
{
//printf("(%d,%d)\nkpos = %d\n",x,y,k_pos);
int k = kernel[k_pos];
weight += k * int(input[x][y]);
denom += k;
}
k_pos++;
}
}
else
k_pos+=5;
}
*val = uint8_t(weight/denom);
}
void gauss(uint8_t input[N][M], uint8_t output[N][M])
{
uint8_t *val;
cudaMallocManaged(&val, sizeof(uint8_t));
for (int i = 0; i<N; i++)
{
for (int j = 0; j<M; j++)
{
convolve<<<1,1>>>(input, val, i, j);
cudaDeviceSynchronize();
output[i][j] = *val;
}
}
cudaFree(val);
}
void print(uint8_t image[N][M])
{
for (int i=0; i<N; i++)
{
for (int j=0; j<M; j++)
std::cout<< int(image[i][j]) << ",\t";
std::cout<< "\n";
}
}
int main()
{
srand(NULL);
uint8_t *image, blur[N][M];
cudaMallocManaged(&image, N*M*sizeof(uint8_t));
for (int i = 0; i<N; i++)
for (int j = 0; j<M; j++)
reinterpret_cast<uint8_t (*)[M]>(image)[i][j] = rand()% 256;
print(reinterpret_cast<uint8_t (*)[M]>(image));
gauss(reinterpret_cast<uint8_t (*)[M]>(image), blur);
std::cout<<"\n";
print(blur);
cudaFree(image);
cudaFree(blur);
return 0;
}
|
21,030 | #include "includes.h"
__device__ void softmax_device(float *input, int n, float temp, int stride, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for(i = 0; i < n; ++i){
int val = input[i*stride];
largest = (val>largest) ? val : largest;
}
for(i = 0; i < n; ++i){
float e = expf(input[i*stride]/temp - largest/temp);
sum += e;
output[i*stride] = e;
}
for(i = 0; i < n; ++i){
output[i*stride] /= sum;
}
}
__device__ void softmax_device(int n, float *input, float temp, float *output)
{
int i;
float sum = 0;
float largest = -INFINITY;
for(i = 0; i < n; ++i){
int val = input[i];
largest = (val>largest) ? val : largest;
}
for(i = 0; i < n; ++i){
float e = exp(input[i]/temp - largest/temp);
sum += e;
output[i] = e;
}
for(i = 0; i < n; ++i){
output[i] /= sum;
}
}
__global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= batch*groups) return;
int b = id / groups;
int g = id % groups;
softmax_device(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset);
} |
21,031 | #include "test.cu"
|
21,032 | #include <curand.h>
#include <curand_kernel.h>
#include <math.h>
#include <iostream>
#define ITERATIONS 104000
#define BATCH_SIZE 2097152
#define MAX_REGISTER 5
#define SEED 314159
#define MAX_ZEROES 3
#define MAX_TRANSFERS 7
#define MAX_JUMPS 4
#define MAX_INSTRUCTIONS 150
#define PROGRAM_LINES 10
#define PROGRAM_SIZE (40 * sizeof(unsigned char))
#define BLOCK_SIZE 256
#define TOTAL_BLOCKS (BATCH_SIZE / BLOCK_SIZE)
#define TOTAL_PROGRAMS_MEMORY (PROGRAM_SIZE * BATCH_SIZE * sizeof(unsigned char))
#define TOTAL_RESULTS_MEMORY (BATCH_SIZE * sizeof(unsigned char))
#define TOTAL_RANDOM_STATE_SIZE (BATCH_SIZE * sizeof(curandState))
__global__
void initialize_states(curandState *states) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(SEED, index, 0, &states[index]);
}
#define random(min, max) ((unsigned char)truncf(curand_uniform(&state) * (max - min + 0.999999f) + min))
__global__
void compute_program(int n, int it, unsigned char *programs, unsigned char *results, curandState *states, unsigned short *executedInstructions) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
// Get local copy
unsigned char P[PROGRAM_SIZE];
curandState state = states[index];
memcpy(P, &programs[PROGRAM_SIZE * index], PROGRAM_SIZE);
// Generate program
int zeroes = MAX_ZEROES;
int jumps = MAX_JUMPS;
int transfers = MAX_TRANSFERS;
for (int i = 0; i < PROGRAM_LINES; i++) {
if (i + 1 == PROGRAM_LINES)
zeroes = 0;
if (zeroes == 0) {
if (jumps == 0) {
if (transfers == 0) {
P[i * 4] = 0;
} else {
P[i * 4] = random(1, 2);
}
} else {
if (transfers == 0) {
P[i * 4] = 1 + random(0, 1) * 2;
} else {
P[i * 4] = random(1, 3);
}
}
} else {
if (jumps == 0) {
if (transfers == 0) {
P[i * 4] = random(0, 1);
} else {
P[i * 4] = random(0, 2);
}
} else {
if (transfers == 0) {
P[i * 4] = random(0, 2);
// Map 2 -> 3
if (P[i * 4] == 2)
P[i * 4]++;
} else {
P[i * 4] = random(0, 3);
}
}
}
if (P[i * 4] == 0) {
zeroes--;
P[i * 4 + 1] = random(1, MAX_REGISTER);
} else if (P[i * 4] == 1) {
P[i * 4 + 1] = random(1, MAX_REGISTER);
} else if (P[i * 4] == 2) {
transfers--;
P[i * 4 + 1] = random(1, MAX_REGISTER);
P[i * 4 + 2] = random(2, MAX_REGISTER);
if (P[i * 4 + 1] == P[i * 4 + 2]) {
P[i * 4 + 2] = 1;
}
} else if (P[i * 4] == 3) {
jumps--;
P[i * 4 + 1] = random(1, MAX_REGISTER);
P[i * 4 + 2] = random(1, MAX_REGISTER);
P[i * 4 + 3] = random(1, PROGRAM_LINES + 1);
}
}
// Execution
unsigned short count = 0;
int ip = 0;
int R[MAX_REGISTER];
for (int i = 0; i < MAX_REGISTER; i++)
R[i] = 0;
while ((0 <= ip && ip < PROGRAM_LINES) && count < MAX_INSTRUCTIONS) {
count++;
int kind = P[ip * 4 + 0];
int p1 = P[ip * 4 + 1];
int p2 = P[ip * 4 + 2];
int p3 = P[ip * 4 + 3];
if (kind == 0)
R[p1 - 1] = 0;
else if (kind == 1)
R[p1 - 1]++;
else if (kind == 2)
R[p2 - 1] = R[p1 - 1];
else if (kind == 3 && (R[p1 - 1] == R[p2 - 1])) {
ip = p3 - 1;
continue;
}
ip += 1;
}
if (count < MAX_INSTRUCTIONS) {
results[index] = R[0];
executedInstructions[index] = count;
}
// Reload memory
states[index] = state;
memcpy(&programs[PROGRAM_SIZE * index], P, PROGRAM_SIZE);
}
void print_program(unsigned char *program) {
for (int i = 0; i < PROGRAM_LINES; i++) {
int kind = program[i * 4 + 0];
int p1 = program[i * 4 + 1];
int p2 = program[i * 4 + 2];
int p3 = program[i * 4 + 3];
if (kind == 0)
printf("Z(%d)\n", p1);
else if (kind == 1)
printf("S(%d)\n", p1);
else if (kind == 2)
printf("T(%d,%d)\n", p1, p2);
else if (kind == 3)
printf("J(%d,%d,%d)\n", p1, p2, p3);
}
}
int main() {
unsigned char bestProgram[PROGRAM_SIZE];
unsigned char bestProgramResult = 0;
unsigned short bestExecutedInstructions = 1000;
curandState *randomStates;
unsigned char *programs, *results;
unsigned short *executedInstructions;
cudaMalloc(&randomStates, TOTAL_RANDOM_STATE_SIZE);
cudaMallocManaged(&programs, TOTAL_PROGRAMS_MEMORY);
cudaMallocManaged(&results, TOTAL_RESULTS_MEMORY);
cudaMallocManaged(&executedInstructions, BATCH_SIZE * sizeof(unsigned short));
int device;
cudaGetDevice(&device);
initialize_states<<<TOTAL_BLOCKS, BLOCK_SIZE>>>(randomStates);
for (int i = 0; i < ITERATIONS; i++) {
cudaMemPrefetchAsync(programs, TOTAL_PROGRAMS_MEMORY, device, NULL);
cudaMemPrefetchAsync(results, TOTAL_RESULTS_MEMORY, device, NULL);
cudaMemPrefetchAsync(executedInstructions, BATCH_SIZE * sizeof(unsigned short), device, NULL);
compute_program<<<TOTAL_BLOCKS, BLOCK_SIZE>>>(BATCH_SIZE, i, programs, results, randomStates, executedInstructions);
cudaDeviceSynchronize();
for (int j = 0; j < BATCH_SIZE; j++) {
if (results[j] > bestProgramResult || (results[j] == bestProgramResult && executedInstructions[j] < bestExecutedInstructions)) {
cudaMemcpy(bestProgram, &programs[j * PROGRAM_SIZE], PROGRAM_SIZE, cudaMemcpyDeviceToHost);
bestProgramResult = results[j];
bestExecutedInstructions = executedInstructions[j];
printf("Better program found: %2d in %3d instructions (@%d[%d])\n", bestProgramResult, bestExecutedInstructions, i, j);
print_program(bestProgram);
}
}
}
cudaFree(randomStates);
cudaFree(programs);
cudaFree(results);
} |
21,033 | /*
Copyright 2017 the arraydiff authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "common.cuh"
#include <cuda_runtime_api.h>
#include <stdint.h>
/* Broadcast add kernels: [a] . [an] -> [an] . */
__global__ void bcast_add_I1a_I2an_O1an_fwd_f32_kernel(
uint32_t chan_dim,
uint32_t batch_sz,
const float *shift,
const float *x,
float *y)
{
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t chan_idx = idx % chan_dim;
uint32_t batch_idx = idx / chan_dim;
if (chan_idx < chan_dim && batch_idx < batch_sz) {
y[idx] = x[idx] + shift[chan_idx];
}
}
extern "C" void arraydiff_cuda_kernel_bcast_add_I1a_I2an_O1an_fwd_f32(
size_t chan_dim,
size_t batch_sz,
const float *shift,
const float *x,
float *y,
cudaStream_t stream)
{
uint32_t n = chan_dim * batch_sz;
bcast_add_I1a_I2an_O1an_fwd_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
chan_dim, batch_sz, shift, x, y);
}
__global__ void bcast_add_I1a_I2an_O1an_fwdaccum_f32_kernel(
uint32_t chan_dim,
uint32_t batch_sz,
const float *shift,
const float *x,
float *y)
{
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t chan_idx = idx % chan_dim;
uint32_t batch_idx = idx / chan_dim;
if (chan_idx < chan_dim && batch_idx < batch_sz) {
y[idx] += x[idx] + shift[chan_idx];
}
}
extern "C" void arraydiff_cuda_kernel_bcast_add_I1a_I2an_O1an_fwdaccum_f32(
size_t chan_dim,
size_t batch_sz,
const float *shift,
const float *x,
float *y,
cudaStream_t stream)
{
uint32_t n = chan_dim * batch_sz;
bcast_add_I1a_I2an_O1an_fwdaccum_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
chan_dim, batch_sz, shift, x, y);
}
__global__ void bcast_add_I1a_I2an_O1an_bwd_shift_deterministic_f32_kernel(
uint32_t num_rounds,
uint32_t chan_dim,
uint32_t batch_sz,
const float *y_grad,
float *shift_grad)
{
__shared__ float cache[1024];
uint32_t chan_idx = blockIdx.x;
float shift_grad_acc = 0.0f;
if (chan_idx < chan_dim) {
for (uint32_t round = 0; round < num_rounds; round++) {
uint32_t round_offset = round * blockDim.x;
uint32_t round_idx = round_offset + threadIdx.x;
uint32_t batch_idx = round_idx;
if (batch_idx < batch_sz) {
uint32_t idx = chan_idx + chan_dim * batch_idx;
float dy = y_grad[idx];
shift_grad_acc += dy;
}
}
}
cache[threadIdx.x] = shift_grad_acc;
__syncthreads();
threadblock1024_reduce_sum_f32(cache);
if (chan_idx < chan_dim) {
if (threadIdx.x == 0) {
shift_grad[blockIdx.x] += cache[0];
}
}
}
extern "C" void arraydiff_cuda_kernel_bcast_add_I1a_I2an_O1an_bwd_shift_deterministic_f32(
size_t chan_dim,
size_t batch_sz,
const float *y_grad,
float *shift_grad,
cudaStream_t stream)
{
uint32_t num_rounds = (batch_sz + 1024-1) / 1024;
uint32_t num_blocks = chan_dim;
bcast_add_I1a_I2an_O1an_bwd_shift_deterministic_f32_kernel<<<num_blocks, 1024, 0, stream>>>(
num_rounds, chan_dim, batch_sz, y_grad, shift_grad);
}
__global__ void bcast_add_I1a_I2an_O1an_bwd_input_f32_kernel(
uint32_t chan_dim,
uint32_t batch_sz,
const float *y_grad,
float *x_grad)
{
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t chan_idx = idx % chan_dim;
uint32_t batch_idx = idx / chan_dim;
if (chan_idx < chan_dim && batch_idx < batch_sz) {
x_grad[idx] += y_grad[idx];
}
}
extern "C" void arraydiff_cuda_kernel_bcast_add_I1a_I2an_O1an_bwd_input_f32(
size_t chan_dim,
size_t batch_sz,
const float *y_grad,
float *x_grad,
cudaStream_t stream)
{
uint32_t n = chan_dim * batch_sz;
bcast_add_I1a_I2an_O1an_bwd_input_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
chan_dim, batch_sz, y_grad, x_grad);
}
/* Broadcast add kernels: [a] . [xyan] -> [xyan] . */
__global__ void bcast_add_I1a_I2xyan_O1xyan_fwd_f32_kernel(
uint32_t prefix_dim,
uint32_t chan_dim,
uint32_t batch_sz,
const float *shift,
const float *x,
float *y)
{
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t prefix_idx = idx % prefix_dim;
uint32_t chan_idx = (idx / prefix_dim) % chan_dim;
uint32_t batch_idx = (idx / prefix_dim) / chan_dim;
if (prefix_idx < prefix_dim && chan_idx < chan_dim && batch_idx < batch_sz) {
y[idx] = x[idx] + shift[chan_idx];
}
}
extern "C" void arraydiff_cuda_kernel_bcast_add_I1a_I2xyan_O1xyan_fwd_f32(
size_t prefix_dim,
size_t chan_dim,
size_t batch_sz,
const float *shift,
const float *x,
float *y,
cudaStream_t stream)
{
uint32_t n = prefix_dim * chan_dim * batch_sz;
bcast_add_I1a_I2xyan_O1xyan_fwd_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
prefix_dim, chan_dim, batch_sz, shift, x, y);
}
__global__ void bcast_add_I1a_I2xyan_O1xyan_fwdaccum_f32_kernel(
uint32_t prefix_dim,
uint32_t chan_dim,
uint32_t batch_sz,
const float *shift,
const float *x,
float *y)
{
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t prefix_idx = idx % prefix_dim;
uint32_t chan_idx = (idx / prefix_dim) % chan_dim;
uint32_t batch_idx = (idx / prefix_dim) / chan_dim;
if (prefix_idx < prefix_dim && chan_idx < chan_dim && batch_idx < batch_sz) {
y[idx] += x[idx] + shift[chan_idx];
}
}
extern "C" void arraydiff_cuda_kernel_bcast_add_I1a_I2xyan_O1xyan_fwdaccum_f32(
size_t prefix_dim,
size_t chan_dim,
size_t batch_sz,
const float *shift,
const float *x,
float *y,
cudaStream_t stream)
{
uint32_t n = prefix_dim * chan_dim * batch_sz;
bcast_add_I1a_I2xyan_O1xyan_fwdaccum_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
prefix_dim, chan_dim, batch_sz, shift, x, y);
}
__global__ void bcast_add_I1a_I2xyan_O1xyan_bwd_shift_deterministic_f32_kernel(
uint32_t num_rounds,
uint32_t prefix_dim,
uint32_t chan_dim,
uint32_t batch_sz,
const float *y_grad,
float *shift_grad)
{
__shared__ float cache[1024];
uint32_t chan_idx = blockIdx.x;
float shift_grad_acc = 0.0f;
if (chan_idx < chan_dim) {
for (uint32_t round = 0; round < num_rounds; round++) {
uint32_t round_offset = round * blockDim.x;
uint32_t round_idx = round_offset + threadIdx.x;
uint32_t prefix_idx = round_idx % prefix_dim;
uint32_t batch_idx = round_idx / prefix_dim;
if (prefix_idx < prefix_dim && batch_idx < batch_sz) {
uint32_t idx = prefix_idx + prefix_dim * (chan_idx + chan_dim * batch_idx);
float dy = y_grad[idx];
shift_grad_acc += dy;
}
}
}
cache[threadIdx.x] = shift_grad_acc;
__syncthreads();
threadblock1024_reduce_sum_f32(cache);
if (chan_idx < chan_dim) {
if (threadIdx.x == 0) {
shift_grad[blockIdx.x] += cache[0];
}
}
}
extern "C" void arraydiff_cuda_kernel_bcast_add_I1a_I2xyan_O1xyan_bwd_shift_deterministic_f32(
size_t prefix_dim,
size_t chan_dim,
size_t batch_sz,
const float *y_grad,
float *shift_grad,
cudaStream_t stream)
{
uint32_t num_rounds = (prefix_dim * batch_sz + 1024-1) / 1024;
uint32_t num_blocks = chan_dim;
bcast_add_I1a_I2xyan_O1xyan_bwd_shift_deterministic_f32_kernel<<<num_blocks, 1024, 0, stream>>>(
num_rounds, prefix_dim, chan_dim, batch_sz, y_grad, shift_grad);
}
__global__ void bcast_add_I1a_I2xyan_O1xyan_bwd_input_f32_kernel(
uint32_t prefix_dim,
uint32_t chan_dim,
uint32_t batch_sz,
const float *y_grad,
float *x_grad)
{
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t prefix_idx = idx % prefix_dim;
uint32_t chan_idx = (idx / prefix_dim) % chan_dim;
uint32_t batch_idx = (idx / prefix_dim) / chan_dim;
if (prefix_idx < prefix_dim && chan_idx < chan_dim && batch_idx < batch_sz) {
x_grad[idx] += y_grad[idx];
}
}
extern "C" void arraydiff_cuda_kernel_bcast_add_I1a_I2xyan_O1xyan_bwd_input_f32(
size_t prefix_dim,
size_t chan_dim,
size_t batch_sz,
const float *y_grad,
float *x_grad,
cudaStream_t stream)
{
uint32_t n = prefix_dim * chan_dim * batch_sz;
bcast_add_I1a_I2xyan_O1xyan_bwd_input_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
prefix_dim, chan_dim, batch_sz, y_grad, x_grad);
}
/* Broadcast multiply-add kernels: [a] . [a] . [xyan] -> [xyan]. */
__global__ void bcast_mult_add_I1a_I2a_I3xyan_O1xyan_fwd_f32_kernel(
uint32_t prefix_dim,
uint32_t chan_dim,
uint32_t batch_sz,
const float *scale,
const float *shift,
const float *x,
float *y)
{
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t prefix_idx = idx % prefix_dim;
uint32_t chan_idx = (idx / prefix_dim) % chan_dim;
uint32_t batch_idx = (idx / prefix_dim) / chan_dim;
if (prefix_idx < prefix_dim && chan_idx < chan_dim && batch_idx < batch_sz) {
y[idx] = scale[chan_idx] * x[idx] + shift[chan_idx];
}
}
extern "C" void arraydiff_cuda_kernel_bcast_mult_add_I1a_I2a_I3xyan_O1xyan_fwd_f32(
size_t prefix_dim,
size_t chan_dim,
size_t batch_sz,
const float *scale,
const float *shift,
const float *x,
float *y,
cudaStream_t stream)
{
uint32_t n = prefix_dim * chan_dim * batch_sz;
bcast_mult_add_I1a_I2a_I3xyan_O1xyan_fwd_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
prefix_dim, chan_dim, batch_sz, scale, shift, x, y);
}
__global__ void bcast_mult_add_I1a_I2a_I3xyan_O1xyan_bwd_scale_shift_deterministic_f32_kernel(
uint32_t num_rounds,
uint32_t prefix_dim,
uint32_t chan_dim,
uint32_t batch_sz,
const float *x,
const float *y_grad,
float *scale_grad,
float *shift_grad)
{
__shared__ float cache[1024];
uint32_t chan_idx = blockIdx.x;
float scale_grad_acc = 0.0f;
float shift_grad_acc = 0.0f;
if (chan_idx < chan_dim) {
for (uint32_t round = 0; round < num_rounds; round++) {
uint32_t round_offset = round * blockDim.x;
uint32_t round_idx = round_offset + threadIdx.x;
uint32_t prefix_idx = round_idx % prefix_dim;
uint32_t batch_idx = round_idx / prefix_dim;
if (prefix_idx < prefix_dim && batch_idx < batch_sz) {
uint32_t idx = prefix_idx + prefix_dim * (chan_idx + chan_dim * batch_idx);
float dy = y_grad[idx];
scale_grad_acc += dy * x[idx];
shift_grad_acc += dy;
}
}
}
cache[threadIdx.x] = scale_grad_acc;
__syncthreads();
threadblock1024_reduce_sum_f32(cache);
if (chan_idx < chan_dim) {
if (threadIdx.x == 0) {
scale_grad[blockIdx.x] += cache[0];
}
}
__syncthreads();
cache[threadIdx.x] = shift_grad_acc;
__syncthreads();
threadblock1024_reduce_sum_f32(cache);
if (chan_idx < chan_dim) {
if (threadIdx.x == 0) {
shift_grad[blockIdx.x] += cache[0];
}
}
}
extern "C" void arraydiff_cuda_kernel_bcast_mult_add_I1a_I2a_I3xyan_O1xyan_bwd_shift_deterministic_f32(
size_t prefix_dim,
size_t chan_dim,
size_t batch_sz,
const float *x,
const float *y_grad,
float *scale_grad,
float *shift_grad,
cudaStream_t stream)
{
uint32_t num_rounds = (prefix_dim * batch_sz + 1024-1) / 1024;
uint32_t num_blocks = chan_dim;
bcast_mult_add_I1a_I2a_I3xyan_O1xyan_bwd_scale_shift_deterministic_f32_kernel<<<num_blocks, 1024, 0, stream>>>(
num_rounds, prefix_dim, chan_dim, batch_sz, x, y_grad, scale_grad, shift_grad);
}
__global__ void bcast_mult_add_I1a_I2a_I3xyan_O1xyan_bwd_input_f32_kernel(
uint32_t prefix_dim,
uint32_t chan_dim,
uint32_t batch_sz,
const float *scale,
const float *y_grad,
float *x_grad)
{
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t prefix_idx = idx % prefix_dim;
uint32_t chan_idx = (idx / prefix_dim) % chan_dim;
uint32_t batch_idx = (idx / prefix_dim) / chan_dim;
if (prefix_idx < prefix_dim && chan_idx < chan_dim && batch_idx < batch_sz) {
x_grad[idx] += scale[chan_idx] * y_grad[idx];
}
}
extern "C" void arraydiff_cuda_kernel_bcast_mult_add_I1a_I2a_I3xyan_O1xyan_bwd_input_f32(
size_t prefix_dim,
size_t chan_dim,
size_t batch_sz,
const float *scale,
const float *y_grad,
float *x_grad,
cudaStream_t stream)
{
uint32_t n = prefix_dim * chan_dim * batch_sz;
bcast_mult_add_I1a_I2a_I3xyan_O1xyan_bwd_input_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
prefix_dim, chan_dim, batch_sz, scale, y_grad, x_grad);
}
/* Broadcast add kernels: [an] . [xyan] -> [xyan]. */
__global__ void bcast_add_I1an_I2xyan_O1xyan_fwd_f32_kernel(
uint32_t prefix_dim,
uint32_t chan_dim,
uint32_t batch_sz,
const float *shift,
const float *x,
float *y)
{
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t prefix_idx = idx % prefix_dim;
uint32_t chan_idx = (idx / prefix_dim) % chan_dim;
uint32_t batch_idx = (idx / prefix_dim) / chan_dim;
uint32_t shift_idx = chan_idx + chan_dim * batch_idx;
if (prefix_idx < prefix_dim && chan_idx < chan_dim && batch_idx < batch_sz) {
y[idx] = x[idx] + shift[shift_idx];
}
}
extern "C" void arraydiff_cuda_kernel_bcast_add_I1an_I2xyan_O1xyan_fwd_f32(
size_t prefix_dim,
size_t chan_dim,
size_t batch_sz,
const float *shift,
const float *x,
float *y,
cudaStream_t stream)
{
uint32_t n = prefix_dim * chan_dim * batch_sz;
bcast_add_I1an_I2xyan_O1xyan_fwd_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
prefix_dim, chan_dim, batch_sz, shift, x, y);
}
__global__ void bcast_add_I1an_I2xyan_O1xyan_bwd_shift_deterministic_f32_kernel(
uint32_t num_rounds,
uint32_t prefix_dim,
uint32_t chan_dim,
uint32_t batch_sz,
const float *y_grad,
float *shift_grad)
{
__shared__ float cache[1024];
uint32_t chan_idx = blockIdx.x % chan_dim;
uint32_t batch_idx = blockIdx.x / chan_dim;
float shift_grad_acc = 0.0f;
if (chan_idx < chan_dim && batch_idx < batch_sz) {
for (uint32_t round = 0; round < num_rounds; round++) {
uint32_t round_offset = round * blockDim.x;
//uint32_t block_dim = min(blockDim.x, prefix_dim - round_offset);
uint32_t round_idx = round_offset + threadIdx.x;
uint32_t prefix_idx = round_idx;
if (prefix_idx < prefix_dim) {
uint32_t idx = prefix_idx + prefix_dim * (chan_idx + chan_dim * batch_idx);
float dy = y_grad[idx];
shift_grad_acc += dy;
}
}
}
cache[threadIdx.x] = shift_grad_acc;
__syncthreads();
threadblock1024_reduce_sum_f32(cache);
if (chan_idx < chan_dim && batch_idx < batch_sz) {
if (threadIdx.x == 0) {
shift_grad[blockIdx.x] += cache[0];
}
}
}
extern "C" void arraydiff_cuda_kernel_bcast_add_I1an_I2xyan_O1xyan_bwd_shift_deterministic_f32(
size_t prefix_dim,
size_t chan_dim,
size_t batch_sz,
const float *y_grad,
float *shift_grad,
cudaStream_t stream)
{
uint32_t num_rounds = (prefix_dim + 1024-1) / 1024;
uint32_t num_blocks = chan_dim * batch_sz;
bcast_add_I1an_I2xyan_O1xyan_bwd_shift_deterministic_f32_kernel<<<num_blocks, 1024, 0, stream>>>(
num_rounds, prefix_dim, chan_dim, batch_sz, y_grad, shift_grad);
}
__global__ void bcast_add_I1an_I2xyan_O1xyan_bwd_input_f32_kernel(
uint32_t prefix_dim,
uint32_t chan_dim,
uint32_t batch_sz,
const float *y_grad,
float *x_grad)
{
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
uint32_t prefix_idx = idx % prefix_dim;
uint32_t chan_idx = (idx / prefix_dim) % chan_dim;
uint32_t batch_idx = (idx / prefix_dim) / chan_dim;
if (prefix_idx < prefix_dim && chan_idx < chan_dim && batch_idx < batch_sz) {
x_grad[idx] += y_grad[idx];
}
}
extern "C" void arraydiff_cuda_kernel_bcast_add_I1an_I2xyan_O1xyan_bwd_input_f32(
size_t prefix_dim,
size_t chan_dim,
size_t batch_sz,
const float *y_grad,
float *x_grad,
cudaStream_t stream)
{
uint32_t n = prefix_dim * chan_dim * batch_sz;
bcast_add_I1an_I2xyan_O1xyan_bwd_input_f32_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
prefix_dim, chan_dim, batch_sz, y_grad, x_grad);
}
|
21,034 | #include "includes.h"
__global__ void addKernel(int* c, const int* a, const int* b, int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
// since we're asking for one more thread than elements in the arrays
// we need to handle size to make sure we don't access beyond the end of the array
if (i < size) {
c[i] = a[i] + b[i];
}
} |
21,035 | #include "includes.h"
__global__ void windowBartlett2d(float* idata, int length, int height)
{
int tidx = threadIdx.x + blockIdx.x*blockDim.x;
int tidy = threadIdx.y + blockIdx.y*blockDim.y;
if (tidx < length && tidy < height)
{
idata[tidy * length + tidx] = 0;
}
} |
21,036 | #include "Constants.cu"
__global__ void CUDA_global_selfProd(svm_precision* b_selfProd, svm_precision* b_inputData);
__global__ void CUDA_global_test(svm_precision* b_output, svm_precision* b_selfProd, svm_precision* b_inputData, svm_precision* b_class, svm_precision* b_alpha);
__global__ void CUDA_global_errorCacheUpdate(svm_precision* b_output, svm_precision* b_selfProd, svm_precision* b_inputData);
__global__ void CUDA_global_SVMOutput(svm_precision* b_output, svm_precision* b_selfProd, svm_precision* b_inputData, svm_precision* b_class, svm_precision* b_alpha);
__host__ void CUDA_host_updateConstantBuffer(constantBuffer* src);
extern "C" void CUDA_updateConstantBuffer(void* src){
CUDA_host_updateConstantBuffer((constantBuffer*)src);
}
extern "C" void CUDA_selfProd(unsigned int num_threads,svm_precision* b_selfProd, svm_precision* b_inputData){
dim3 grid(int(svm_precision(num_threads)/thread_group_size)+1, 1, 1);
dim3 threads(thread_group_size, 1, 1);
CUDA_global_selfProd<<< grid, threads >>>(b_selfProd,b_inputData);
}
extern "C" void CUDA_testInstances(unsigned int num_threads, svm_precision* b_output, svm_precision* b_selfProd, svm_precision* b_inputData, svm_precision* b_class, svm_precision* b_alpha){
dim3 grid(int(svm_precision(num_threads)/thread_group_size)+1, 1, 1);
dim3 threads(thread_group_size, 1, 1);
CUDA_global_test<<< grid, threads >>>(b_output,b_selfProd,b_inputData,b_class,b_alpha);
}
extern "C" void CUDA_lagrangeUpdate(unsigned int num_threads, svm_precision* b_output, svm_precision* b_selfProd, svm_precision* b_inputData){
dim3 grid(int(svm_precision(num_threads)/thread_group_size)+1, 1, 1);
dim3 threads(thread_group_size, 1, 1);
CUDA_global_errorCacheUpdate<<< grid, threads >>>(b_output,b_selfProd,b_inputData);
}
extern "C" void CUDA_SVMOutput(unsigned int num_threads, svm_precision* b_output, svm_precision* b_selfProd, svm_precision* b_inputData, svm_precision* b_class, svm_precision* b_alpha){
dim3 grid(int(svm_precision(num_threads)/thread_group_size)+1, 1, 1);
dim3 threads(thread_group_size, 1, 1);
CUDA_global_SVMOutput<<< grid, threads >>>(b_output,b_selfProd,b_inputData,b_class,b_alpha);
} |
21,037 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define ROW 6
#define N (6*6) //2048*2048
#define THREADS_PER_BLOCK (ROW/2) //1024
#define RADIUS 3
#define BLOCK_SIZE (THREADS_PER_BLOCK-2*RADIUS)
// forward declaration
__global__ void dila(int *in, int *out);
void random_ints(int * mat, int n) {
srand(time(0));
int i;
for (i = 0; i < n; i++) {
mat[i] = (rand() % 2) * 10;
}
}
void printMatrix(int * mat, int n) {
int i, j;
for (i = 0; i < ROW; i++) {
for (j = 0; j < ROW; j++) {
printf("%d ", mat[i * ROW + j]);
}
printf("\n");
}
}
int main(void) {
int *in, *out; // host copies a,b
int *d_in,*d_out; // device copies a,b
int size = N * sizeof(int);
// Alloc space for host copies a,b and setup input
in = (int *)malloc(size);
random_ints(in, N);
out = (int *)malloc(size);
printf("IN\n");
printMatrix(in, N);
// Allocate space for device copies of in, out
cudaMalloc((void **)&d_in, size);
cudaMalloc((void **)&d_out, size);
cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice);
dila<<< (N/THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>(d_in,d_out);
// Copy result back to host
cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost);
printf("OUTZ\n");
printMatrix(out, N);
// Cleanup
free(in); free(out);
cudaFree(d_in); cudaFree(d_out);
return 0;
}
__global__ void dila(int *in, int *out) {
__shared__ float temp[BLOCK_SIZE + 2 * RADIUS];
int gindex = threadIdx.x + blockIdx.x * blockDim.x;
int lindex = threadIdx.x + RADIUS;
// 1. Read input elements into shared memory
temp[lindex] = in[gindex];
if (threadIdx.x < RADIUS) {
temp[lindex - RADIUS] = in[gindex - RADIUS];
temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE];
}
__syncthreads();
// 2. Apply the dilatation
float result = 0.0; // low number
for (int offset = -RADIUS ; offset <= RADIUS ; offset++){
result = fmaxf(result, temp[lindex + offset]);
}
// 3. Copy the result
out[gindex] = result;
}
|
21,038 | // RUN: %run_test hipify "%s" "%t" %hipify_args %clang_args
#include <iostream>
// CHECK: #include <hip/hip_runtime.h>
#include <cuda.h>
#define TOKEN_PASTE(X, Y) X ## Y
#define ARG_LIST_AS_MACRO a, device_x, device_y
#define KERNEL_CALL_AS_MACRO axpy<float><<<1, kDataLen>>>
#define KERNEL_NAME_MACRO axpy<float>
// CHECK: #define COMPLETE_LAUNCH hipLaunchKernelGGL(axpy, dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y)
#define COMPLETE_LAUNCH axpy<<<1, kDataLen>>>(a, device_x, device_y)
template<typename T>
__global__ void axpy(T a, T *x, T *y) {
y[threadIdx.x] = a * x[threadIdx.x];
}
int main(int argc, char* argv[]) {
const int kDataLen = 4;
float a = 2.0f;
float host_x[kDataLen] = {1.0f, 2.0f, 3.0f, 4.0f};
float host_y[kDataLen];
// Copy input data to device.
float* device_x;
float* device_y;
// CHECK: hipMalloc(&device_x, kDataLen * sizeof(float));
cudaMalloc(&device_x, kDataLen * sizeof(float));
#ifdef HERRING
// CHECK: hipMalloc(&device_y, kDataLen * sizeof(float));
cudaMalloc(&device_y, kDataLen * sizeof(float));
#else
// CHECK: hipMalloc(&device_y, kDataLen * sizeof(double));
cudaMalloc(&device_y, kDataLen * sizeof(double));
#endif
// CHECK: hipMemcpy(device_x, host_x, kDataLen * sizeof(float), hipMemcpyHostToDevice);
cudaMemcpy(device_x, host_x, kDataLen * sizeof(float), cudaMemcpyHostToDevice);
// Launch the kernel in numerous different strange ways to exercise the prerocessor.
// CHECK: hipLaunchKernelGGL(axpy, dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y);
axpy<<<1, kDataLen>>>(a, device_x, device_y);
// CHECK: hipLaunchKernelGGL(axpy<float>, dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y);
axpy<float><<<1, kDataLen>>>(a, device_x, device_y);
// CHECK: hipLaunchKernelGGL(axpy<float>, dim3(1), dim3(kDataLen), 0, 0, a, TOKEN_PASTE(device, _x), device_y);
axpy<float><<<1, kDataLen>>>(a, TOKEN_PASTE(device, _x), device_y);
// CHECK: hipLaunchKernelGGL(axpy<float>, dim3(1), dim3(kDataLen), 0, 0, ARG_LIST_AS_MACRO);
axpy<float><<<1, kDataLen>>>(ARG_LIST_AS_MACRO);
// CHECK: hipLaunchKernelGGL(KERNEL_NAME_MACRO, dim3(1), dim3(kDataLen), 0, 0, ARG_LIST_AS_MACRO);
KERNEL_NAME_MACRO<<<1, kDataLen>>>(ARG_LIST_AS_MACRO);
// CHECK: hipLaunchKernelGGL(axpy<float>, dim3(1), dim3(kDataLen), 0, 0, ARG_LIST_AS_MACRO);
KERNEL_CALL_AS_MACRO(ARG_LIST_AS_MACRO);
// CHECK: COMPLETE_LAUNCH;
COMPLETE_LAUNCH;
// Copy output data to host.
// CHECK: hipDeviceSynchronize();
cudaDeviceSynchronize();
// CHECK: hipMemcpy(host_y, device_y, kDataLen * sizeof(float), hipMemcpyDeviceToHost);
cudaMemcpy(host_y, device_y, kDataLen * sizeof(float), cudaMemcpyDeviceToHost);
// Print the results.
for (int i = 0; i < kDataLen; ++i) {
std::cout << "y[" << i << "] = " << host_y[i] << "\n";
}
// CHECK: hipDeviceReset();
cudaDeviceReset();
return 0;
}
|
21,039 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include <cuda.h>
__global__ void mean_filter_gpu(unsigned char* input_image, int img_height, int img_width,unsigned char* filtered_image, int window_size){
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
// Check window size to avoid errors at edges
int window_checker = (window_size-1)/2;
if( row < img_height && col < img_width){
int top_margin, bottom_margin, left_margin, right_margin;
// Check top margin
if((row-window_checker) < 0)
top_margin = 0;
else
top_margin = row - window_checker;
// Check bottom margin
if((row+window_checker) <= img_height - 1)
bottom_margin = row + window_checker;
else
bottom_margin = img_height - 1;
// Check left margin
if((col-window_checker) >= 0)
left_margin = col - window_checker;
else
left_margin = 0;
// Check right margin
if((col+window_checker) <= img_width - 1)
right_margin = col + window_checker;
else
right_margin = img_width - 1;
double val;
for (int m = top_margin; m <= bottom_margin; m++){
for(int n = left_margin; n <= right_margin; n++){
val += input_image[(m*img_height)+n];
}
}
int cal_window_size = (bottom_margin - top_margin + 1) * (right_margin - left_margin + 1);
filtered_image[row*img_height+col] = val/cal_window_size; // Find mean and replace
}
}
void mean_filter_cpu(unsigned char* input_image, int img_height,int img_width,unsigned char* filtered_image, int window_size){
// Check window size to avoid errors at edges
int window_checker = (window_size - 1)/2;
// For all raws
for (int row = 0; row < img_height; row++){
int top_margin, bottom_margin;
// Check top margin
if((row - window_checker) < 0)
top_margin = 0;
else
top_margin = row - window_checker;
// Check bottom margin
if((row + window_checker) <= img_height - 1)
bottom_margin = row + window_checker;
else
bottom_margin = img_height - 1;
// For all columns
for(int col = 0; col < img_width; col++){
int left_margin, right_margin;
// Check left margin
if((col - window_checker) >= 0)
left_margin = col - window_checker;
else
left_margin = 0;
// Check right margin
if((col + window_checker) <= img_width - 1)
right_margin = col+window_checker;
else
right_margin = img_width - 1;
double val;
for (int m = top_margin; m <= bottom_margin ; m++){
for(int n=left_margin;n<=right_margin;n++){
val+=input_image[(m*img_height)+n];
}
}
int cal_window_size = (bottom_margin-top_margin+1) * (right_margin-left_margin+1);
filtered_image[row*img_height+col] = val/cal_window_size; // Find mean and replace
}
}
}
int main(int argc, char *argv[]) {
int img_width, img_height, img_size;
img_width = 1280;
img_height = 1280;
img_size = img_width * img_height;
int window_size = 5; // Assign variables as required for image resolution, and window size (640x640, 1280x1280, 3, 5)
// Read the image
FILE* f = fopen("image_1280x1280.bmp", "rb"); // Provide an image out of two (image_640x640.bmp, image_1280x1280.bmp)
// Initializing parameters (host)
unsigned char* input_img_cpu = new unsigned char[img_size];
unsigned char* filtered_output_img_cpu = new unsigned char[img_size];
// Initializing parameters (device)
unsigned char* mean_device_image = new unsigned char[img_size];
unsigned char* input_img_gpu;
unsigned char* filtered_output_img_gpu;
// Read image in host
fread(input_img_cpu, sizeof(unsigned char), img_size, f);
fclose(f); // Close image reading
// Assign block and grid sizes for maximum utilization
int block_size = 32;
int grid_size = img_width/block_size;
dim3 dimBlock(block_size, block_size, 1);
dim3 dimGrid(grid_size, grid_size, 1);
int current_iteration;
int total_iterations = 30; // Run code 30 times and get average time
double total_cpu_time = 0;
double total_gpu_time = 0;
for(current_iteration=0;current_iteration<total_iterations;current_iteration++){
// Allocate memory in the device for raw input image and filtered image
cudaMalloc((void **)&input_img_gpu,img_size*sizeof(unsigned char));
cudaMalloc((void **)&filtered_output_img_gpu,img_size*sizeof(unsigned char));
// Copy raw input image GPU
cudaMemcpy(input_img_gpu, input_img_cpu,img_size*sizeof(unsigned char),cudaMemcpyHostToDevice);
// Execute in GPU
clock_t start_d = clock();
mean_filter_gpu <<< dimGrid, dimBlock >>> (input_img_gpu, img_height,img_width, filtered_output_img_gpu, window_size);
cudaThreadSynchronize();
clock_t end_d = clock();
// Execute in CPU
clock_t start_h = clock();
mean_filter_cpu(input_img_cpu, img_height, img_width, filtered_output_img_cpu, window_size);
clock_t end_h = clock();
cudaMemcpy(mean_device_image, filtered_output_img_gpu, img_size*sizeof(unsigned char), cudaMemcpyDeviceToHost);
total_gpu_time += (double)(end_d - start_d)/CLOCKS_PER_SEC;
total_cpu_time += (double)(end_h - start_h)/CLOCKS_PER_SEC;
// Free device memory
cudaFree(input_img_gpu);
cudaFree(filtered_output_img_gpu);
}
printf("Average GPU Time: %f\n",(total_gpu_time/total_iterations));
printf("Average CPU Time: %f\n",(total_cpu_time/total_iterations));
return 0;
}
|
21,040 | #include "includes.h"
__global__ void kBoundingBoxSoftMaxGrad( float* mat, int* bbox, int* label, int* seg, float* indices, float* width_offset, float* height_offset, int size, int width, int height, int depth, float scale_width, float scale_height, float* grad) {
const unsigned int len = width * height * depth * size;
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
int ind, image_id, source_depth, x1, y1, x2, y2, start,
end, src_image_id, num_bboxes, num_bboxes_of_this_depth, box_id, inside;
float source_x, source_y;
for (unsigned int i = idx; i < len; i += numThreads) {
ind = i;
image_id = ind % size; ind /= size;
source_x = scale_width * (ind % width); ind /= width;
source_y = scale_height * (ind % height); ind /= height;
source_depth = ind % depth;
src_image_id = (int)indices[image_id];
start = seg[src_image_id];
end = seg[src_image_id + 1];
num_bboxes = 0;
num_bboxes_of_this_depth = 0;
for (box_id = start; box_id < end; box_id++) {
x1 = bbox[box_id << 2] - width_offset[image_id];
y1 = bbox[(box_id << 2) + 1] - height_offset[image_id];
x2 = bbox[(box_id << 2) + 2] - width_offset[image_id];
y2 = bbox[(box_id << 2) + 3] - height_offset[image_id];
inside = (source_x >= x1 && source_x <= x2 && source_y >= y1 && source_y <= y2) ? 1:0;
num_bboxes += inside;
num_bboxes_of_this_depth += (inside == 1 && label[box_id] == source_depth) ? 1: 0;
}
grad[i] = mat[i] - ((num_bboxes > 0) ? ((float)num_bboxes_of_this_depth / num_bboxes) : (source_depth == 0 ? 1:0));
}
} |
21,041 | #include "includes.h"
__global__ void arrayTest(int n, long *factor, long *arr, long *result, int *const_arr1, long *const_arr2)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i == 0) {
/*
printf("In ArrayTest n=%d factor=%p arr=%p result=%p \n",n,factor,arr,result);
printf("In const %d %d %d\n",const_arr1[0],const_arr1[1],const_arr1[2]);
printf("In const %ld %ld %ld\n",const_arr2[0],const_arr2[1],const_arr2[2]);
*/
}
if (i<n)
{
int idx = i * 3;
result[idx]=arr[idx] * factor[i];
result[idx + 1]=arr[idx + 1] * factor[i];
result[idx + 2]=arr[idx + 2] * factor[i];
/*
printf("ArrayTest [%ld] * [%ld %ld %ld] = [%ld %ld %ld] \n", factor[i],
arr[idx],arr[idx+1],arr[idx+2],
result[idx],result[idx+1],result[idx+2]);
*/
}
} |
21,042 | #include "includes.h"
__global__ void matrixTranspose(double *a, double *c, int cr, int cc){
int x = blockIdx.x * blockDim.x + threadIdx.x; // col
int y = blockIdx.y * blockDim.y + threadIdx.y; // row
if(x < cc && y < cr){
for(int i = 0; i<cc; i++) {
c[y * cc + x+i] = a[x * cc + y + i];
}
}
} |
21,043 | /* SorensonPar.cu
Parallel Implementation of Algorithm 4.1
as discussed in Sorenson and Parberry's
1994 paper "Two Fast Parallel Prime Number
Sieves".
Authors:
Daniel Anzaldo
David Frank
Antonio Lanfranchi
*/
// Visual Studio Dependencies (Can be commented out)
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// C dependencies
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
// C++ dependencies
#include <algorithm>
typedef unsigned long long big;
// GLOBAL VARIABLES--------------------------------------
typedef struct Wheel_t // Struct-of-Arrays Wheel
{
bool * rp; // Numbers relatively prime to m
big * dist; // D s.t. x + d is the smallest integer >dist[x] relatively prime to m
} Wheel_k;
bool * S; // Global shared bit array of numbers up to N
int P; // Global number of processors
bool check_cuda_status = false; // turn to false when running on circe
/* These are for tracking time */
struct timezone myTimezone;
struct timeval startTime, endTime;
// HOST FUNCTION HEADERS---------------------------------
/* gcd
Host version of the Euclidean Method
*/
__host__ big gcd(big u, big v);
/* EratosthenesSieve
HELPER: for Algorithm 4.1 Sequential Portion
The most basic form of generating primes.
Used to help find the first k primes.
Returns the k-th prime.
*/
big EratosthenesSieve(long double x);
/* Algorithm 4.1 Sequential Portion
Running Time: O(sqrt(n))
Space: O(sqrt(n)) up to O(sqrt(n)/log log n)
*/
cudaError_t algorithm4_1(big n);
/* Algorithm 4.1 Helper: Parallel Sieve
All CUDA-related functionality goes here.
This code will change for different kernel versions.
*/
cudaError_t parallelSieve(
big n, big k, big m, const Wheel_k &wheel, big range);
/* Frees the memory allocated on the device and returns any errors*/
cudaError_t cleanup(bool *d_S, Wheel_k &wheel, cudaError_t cudaStatus);
/* Set a checkpoint and show the total running time in seconds */
double report_running_time(const char *arr);
// DEVICE MATH FUNCTIONS---------------------------------
/* gcd_d
Device version of the Euclidean Method
find number c such that: a = sc, b = tc
*/
__device__ big gcd_d(big a, big b)
{
big tmp;
while (b!=0)
{
tmp = a;
a = b;
b = tmp%b;
}
return a;
}
/* gcd_d
Device version of the Binary Method
with bit arithmetic
*/
/*
__device__ big gcd_d(big u, big v)
{
big g = 1;
while ((u % 2 == 0) && (v % 2 == 0))
{
g <<= 1;
u >>= 1;
v >>= 1;
}
while (u != 0 && v != 0)
if (u % 2 == 0) u >>= 1;
else if (v % 2 == 0) v >>= 1;
else if (u > v) u = (u - v) >> 1;
else v = (v - u) >> 1;
return (g * (u + v));
}
*/
/* sqrt_d
Device version of the Square Root Function
Babylonian Method
*/
__device__ big sqrt_d(big a)
{
big root = a/2;
for (big n = 0; n < 10; n++)
{
root = 0.5 * (root + (a/root));
}
return root;
}
__device__ big min_d(big a, big b)
{
return (a < b) ? a : b;
}
__device__ big max_d(big a, big b)
{
return (a > b) ? a : b;
}
// ALGORITHM 4.1 KERNEL VERSIONS-------------------------
/* Algorithm 4.1: Parallel Sieve Kernel version 1
Parallelization: O(sqrt(n)) processors
Space: O(sqrt(n)) up to O(sqrt(n)/log log n)
PRAM Mode: Exclusive Read, Exclusive Write (EREW)
Remarks: No optimizations yet performed.
For n = 1 billion, it uses 31623 threads
*/
__global__ void parallelSieveKernel(
big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S)
{
big sqrt_N = sqrt_d(n);
// Express the sieve in thread mode.
big i = threadIdx.x + blockIdx.x * blockDim.x;
// Threads beyond n will not do work.
if (i <= n)
{
big L = range * i + 1;
big R = min_d(range * (i + 1), n);
/* Range Sieving */
for (big x = L; x < R; x++)
d_S[x] = d_wheel.rp[x % m];
/* For every prime from prime[k] up to sqrt(N) */
for (big q = k; q < sqrt_N; q++)
{
if (d_S[q])
{
/* Compute smallest f s.t.
gcd_d(qf, m) == 1,
qf >= max_d(L, q^2) */
big f = max_d(q - 1, (big)((L / q) - 1));
/* f = f + W_k[f mod m].dist */
f += d_wheel.dist[f % m];
/* Remove the multiples of current prime */
while ((q * f) <= R)
{
// EREW Precaution. May need to be atomic operation.
if (!(d_S[q * f])) d_S[q * f] = false;
f += d_wheel.dist[f % m];
}
}
}
}
}
/* TODO: Algorithm 4.1: Parallel Sieve Kernel version 2
Remarks: Prime table S within [0, sqrt(n)] migrated to const memory
Wheel completely migrated to const memory
Beware that const memory is only 64kB.
Benchmark with the Profiler first before creating this!
*/
__global__ void parallelSieveKernel2(
big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S);
/* TODO: Algorithm 4.1: Parallel Sieve Kernel version 3
Remarks: Prime table S within [0, sqrt(n)] migrated to const memory
Wheel completely migrated to const memory
Probable use of the shared memory
Probable use of registers
Beware that register is only 4B or 32b.
Beware that const memory is only 64kB.
Benchmark with the Profiler first before creating this!
*/
__global__ void parallelSieveKernel3(
big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S);
/* MAIN
To run this add the ff. args:
1. N = the number up to which you're sieving
*/
int main(int argc, char **argv)
{
big N = (big)strtoull(argv[1], NULL, 10);
S = new bool[N]; //(bool*)malloc(N * sizeof(bool));
printf("Find primes up to: %llu\n\n", N);
/* start counting time */
gettimeofday(&startTime, &myTimezone);
cudaError_t x = algorithm4_1(N);
/* check the total running time */
report_running_time("Algorithm 4.1");
if (check_cuda_status)
{
if (x != cudaSuccess) {
printf("Algorithm 4.1 failed to execute!");
return 1;
}
}
// Display the primes.
for (int i = 0; i < N; i++)
if (S[i]) printf("%llu ", i);
delete[] S;
return 0;
}
// HOST FUNCTION DEFINITIONS-----------------------------
// Euclidean Method
__host__ big gcd(big u, big v)
{
big tmp;
while (v != 0)
{
tmp = u;
u = v;
v = tmp%v;
}
return u;
}
// Binary Method
/*
__host__ big gcd(big u, big v)
{
big g = 1;
while ((u % 2 == 0) && (v % 2 == 0))
{
g <<= 1;
u >>= 1;
v >>= 1;
}
while (u != 0 && v != 0)
if (u % 2 == 0) u >>= 1;
else if (v % 2 == 0) v >>= 1;
else if (u > v) u = (u - v) >> 1;
else v = (v - u) >> 1;
return (g * (u + v));
}
*/
big EratosthenesSieve(long double k, big n)
{
big kthPrime = 0;
// 0 and 1 are non-primes.
S[0] = S[1] = false;
for (big i = 2; i < n; i++)
S[i] = true;
// Simple Sieving Operation.
for (big i = 2; i < (big)sqrtl(n); i++)
if (S[i])
{
int j;
for (j = i*i; j < n; j += i)
S[j] = false;
}
// Find the k-th prime.
for (big i = k; i > 2; i--)
if (S[i]) kthPrime = i;
return kthPrime;
}
cudaError_t algorithm4_1(big n)
{
/* VARIABLES */
big range;
big sqrt_N = (big)sqrtl((long double)n);
Wheel_k wheel;
/* Allocation of wheel */
wheel.rp = new bool[n];
wheel.dist = new big[n];
/* Find the first k primes
K = maximal s.t. S[K] <= (log N) / 4
Find primes up to sqrt(N) */
big k = EratosthenesSieve(log10l((long double)n) / 4, n);
/* Find the product of the first k primes m */
big m = 1;
for (big ii = 0; ii < k; ii++)
if (S[ii]) m *= ii;
/* Compute k-th wheel W_k
FUTURE OPTIMIZATION: Delegate kernel for computation */
for (big x = 0; x < n; x++)
{
// True if rp[x] is relatively prime to m
wheel.rp[x] = (gcd(x, m) == 1);
/* This is d s.t. x + d is
the smallest integer >dist[x]
relatively prime to m */
int d = 0;
while (gcd(x + d, m) != 1)
d++;
wheel.dist[x] = d;
}
/* Delta = ceil(n/p) */
range = (big)ceill(n / (long double)P);
/* PARALLEL PART */
cudaError_t parallelStatus = parallelSieve(n, k, m, wheel, range);
if (check_cuda_status)
{
if (parallelStatus != cudaSuccess) {
fprintf(stderr, "parallelSieve() failed!");
}
}
/* FREE */
delete[] wheel.rp;
delete[] wheel.dist;
return parallelStatus;
}
cudaError_t parallelSieve(
big n, big k, big m, const Wheel_k &wheel, big range)
{
cudaError_t cudaStatus;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
/* The Number Field S
will be migrated to GLOBAL memory
OPTIMIZATION: ranges will be migrated to SHARED memory
OPTIMIZATION: [0, sqrt(n)] will be migrated to CONSTANT memory
*/
bool * d_S = NULL;
// The Wheel Precomputed Table
// will be migrated to GLOBAL memory
// OPTIMIZATION: may be migrated to CONSTANT memory as well
Wheel_k d_wheel;
d_wheel.rp = NULL;
d_wheel.dist = NULL;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
return cudaStatus;
}
}
// Measure start time for CUDA portion
cudaEventRecord(start, 0);
// CUDA Memory Allocations.
cudaStatus = cudaMalloc((void**)&d_S, n * sizeof(bool));
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed on number field S!\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = cudaMalloc((void**)&(d_wheel.rp), n * sizeof(bool));
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed on wheel.rp!\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = cudaMalloc((void**)&(d_wheel.dist), n * sizeof(big));
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed on wheel.dist!\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// cudaMemCpy -> Device
cudaStatus = cudaMemcpy(d_S, S, n * sizeof(bool), cudaMemcpyHostToDevice);
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed! S->d_S.\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = cudaMemcpy(d_wheel.rp, wheel.rp, n * sizeof(bool), cudaMemcpyHostToDevice);
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed! wheel.rp->d_wheel.rp\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = cudaMemcpy(d_wheel.dist, wheel.dist, n * sizeof(big), cudaMemcpyHostToDevice);
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed! wheel.dist->d_wheel.dist\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// Kernel Call
dim3 gridSize(ceill(ceill(sqrt(n))/256), 1, 1);
dim3 blockSize(256, 1, 1);
parallelSieveKernel<<<gridSize, blockSize>>>(n, k, m, wheel, range, d_S);
cudaStatus = cudaGetLastError();
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "parallelSieveKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = cudaDeviceSynchronize();
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// cudaMemCpy -> Host
cudaStatus = cudaMemcpy(S, d_S, n * sizeof(bool), cudaMemcpyDeviceToHost);
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed! d_S->S.\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// Measure stop time for CUDA portion
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Time to generate: %0.5f ms\n", elapsedTime);
// cudaFree
return cleanup(d_S, d_wheel, cudaStatus);
}
cudaError_t cleanup(bool *d_S, Wheel_k &wheel, cudaError_t cudaStatus)
{
cudaFree(d_S);
cudaFree(wheel.rp);
cudaFree(wheel.dist);
return cudaStatus;
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time(const char *arr) {
long sec_diff, usec_diff;
gettimeofday(&endTime, &myTimezone);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for %s: %ld.%06ld sec\n\n", arr, sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
|
21,044 | #include "includes.h"
__global__ void CumulatePositionOfNewObjects(float* mask , float* maskNewIds , float* maskOut, int mask_size, int mask_cols, float* centers, int centers_size, int centers_columns){
int idx = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
int icol = idx % mask_cols;
int irow = idx / mask_cols;
int i_mask, i_obj;
if (idx<mask_size){
i_mask = mask[idx];
i_obj = maskNewIds[i_mask];
maskOut[idx] = i_obj;
if (i_obj*centers_columns+2<centers_size){
atomicAdd(centers + 0 + i_obj*centers_columns , (float)icol);
atomicAdd(centers + 1 + i_obj*centers_columns , (float)irow);
atomicAdd(centers + 2 + i_obj*centers_columns , 1.0f);
}
}
} |
21,045 | /*
* 线性内存通常使用cudaMalloc()分配,并使用cudaFree()释放,
* 并且主机内存和设备内存之间的数据传输通常使用cudaMemcpy()完成。
* 在内核的向量加法代码示例中,需要将向量从主机存储器复制到设备存储器:
*/
#include <stdio.h>
// Kernel definition
__global__ void VecAdd(float* A, float* B, float* C)
{
int i = threadIdx.x;
C[i] = A[i] + B[i];
C[i] = A[i] + B[i];
}
int main()
{
// 内核调用使用数据空间M
int M = 10;
M = 10;
int i, N;
size_t size = M * sizeof(float);
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
float* h_C = (float*)malloc(size);
for (i = 0; i < M; i++) h_A[i] = (float )i + 1.00;
for (i = 0; i < M; i++) h_B[i] = (float )i + 100.00;
//*h_A = 100;
//*h_B = 200;
float* d_A;
cudaMalloc(&d_A, size);
float* d_B;
cudaMalloc(&d_B, size);
float* d_C;
cudaMalloc(&d_C, size);
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, size, cudaMemcpyHostToDevice);
N=5;
// Kernel invocation with N threads
// N个线程的内核调用
VecAdd<<<1, N>>>(d_A, d_B, d_C);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
for(i = 0; i < M; i++)
{
if (h_C[i] > 0)
printf("[Pthread%d]\t%.2f + %.2f = %.2f\n", i, h_A[i], h_B[i], h_C[i]);
else
printf("[MemoryV%d]\t%.2f + %.2f = %.2f\n", i, h_A[i], h_B[i], h_C[i]);
}
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// Free host memory
free(h_A);
free(h_B);
free(h_C);
}
|
21,046 | #include "includes.h"
__global__ void update3(float *rho_out, float *H0_out, const float *yDotS, const float *yDotY)
{
*rho_out = 1.0f / *yDotS;
if (*yDotY > 1e-5)
*H0_out = *yDotS / *yDotY;
} |
21,047 | // Steps to be followed while creating a new table
// 1. Parse the query - Create TableName
// 2. Check if TableName already exists in Database, if not add it to Database.
// 3. Create TableName.data and TableName.mdata inside DB
// 4. Poppulate TableName.mdata using specifics in the query
// Sample Query - "CREATE TABLE Persons (
// ID int NOT NULL,
// LastName varchar(255) NOT NULL,
// FirstName varchar(255),
// Age int,
// PRIMARY KEY (ID)
// );"
#include "sql_create.cuh"
#define invalidQuery(query) {utils::invalidQuery(query); return;}
using namespace std;
namespace create {
bool isSpecialChar(char ch) {
return (ch == '(' || ch == ')' || ch == ',' || ch == ';');
}
void execute(string query) {
for (int i = 0; i < query.size(); ++i) {
if (isSpecialChar(query[i])) {
query.insert(i, " ");
query.insert(i + 2, " ");
i += 2;
}
}
string word;
stringstream iss(query);
iss >> word;
utils::toLower(word);
if (word != "create")
invalidQuery(query);
iss >> word;
utils::toLower(word);
if (word != "table")
invalidQuery(query);
//table name
iss >> word;
utils::toLower(word);
if (utils::tableExists(word))
invalidQuery(query);
Metadata m(word);
iss >> word;
if (word != "(")
invalidQuery(query);
string col_name, col_type, key;
string varchar_size;
while (true) {
iss >> col_name;
utils::toLower(col_name);
if (col_name == "primary") {
iss >> word;
utils::toLower(word);
if (word != "key")
invalidQuery(query);
iss >> word;
if (word != "(")
invalidQuery(query);
iss >> key;
//make this column primary key
m.appendKey(key);
iss >> word;
if (word != ")")
invalidQuery(query);
} else {
iss >> col_type;
if (col_type == "varchar") {
iss >> word;
if (word != "(")
invalidQuery(query);
iss >> varchar_size;
iss >> word;
if (word != ")")
invalidQuery(query);
col_type += "(" + varchar_size + ")";
}
ColType c = newColType(col_type);
m.append(col_name, c, false);
}
iss >> word;
if (word != "," && word != ")")
invalidQuery(query);
if (word == ")")
break;
}
m.commit();
utils::addTable(m.tableName);
}
} |
21,048 | #include <stdio.h>
__global__ void transpose(unsigned char *odata, const unsigned char *idata,
int H, int W)
{
int N = gridDim.y; // batch size
int n = blockIdx.y; // batch number
int C = gridDim.z; // channel
int c = blockIdx.z; // channel number
long idx = n * blockDim.x * gridDim.x * C +
threadIdx.x * gridDim.x * C +
blockIdx.x * C+
c;
int img_coor = idx % (H*W*C); //coordinate of one image, not idx of batch image
int h = img_coor / (W*C); // dst idx
int w = img_coor % (W*C)/C; // dst idx
long src_idx = n * (H * W * C) +
h * (W * C) +
w * C +
c;
long dst_idx = n * (C * H * W) +
c * (H * W)+
h * W+
w;
odata[dst_idx] = idata[src_idx];
}
int main(){
// dim3 dimBlock(32,32,1); << Max total is 1024 , so , x=32 ,y=32 , some one use 1024 to handle flatten tensor is fine.
// dim3 dimGrid(19,19,3); << x = 608 / 32 = 19 , same on y , z = channel * batch_size, assume channel = 3.
int BATCH = 10;
int HEIGHT = 50;
int WIDTH = 50;
int C = 3;
int SIZE = HEIGHT * WIDTH * C;
cudaStream_t stream1;
cudaStreamCreate ( &stream1) ;
dim3 dimBlock(1024, 1, 1);
dim3 dimGrid(int(SIZE/C/1024)+1,BATCH,C);
// init host array
unsigned char host[SIZE*BATCH];
// init src image
for(int i = 0; i < SIZE*BATCH; i++){
// host_src[i] = i+1;
host[i] = (i%C);
}
for(int i = 0; i < 30*3; i+=3){ // N H W C
printf("%d\n",host[i]);
}
printf("============================\n");
// init device array
unsigned char *device_src, *device_dst;
cudaMalloc((unsigned char **)&device_src, SIZE* BATCH* sizeof(unsigned char));
cudaMalloc((unsigned char **)&device_dst, SIZE* BATCH* sizeof(unsigned char));
cudaMemcpy(device_src , host , SIZE * BATCH * sizeof(unsigned char), cudaMemcpyHostToDevice);
// run kernel
transpose<<<dimGrid, dimBlock, 0, stream1>>>(device_dst, device_src, HEIGHT, WIDTH);
cudaDeviceSynchronize();
// take out output
cudaMemcpy(host, device_dst, SIZE * BATCH * sizeof(unsigned char), cudaMemcpyDeviceToHost);
// DEBUG : print first image in batch , first 30 pixel in 3 channels.
for(int n = 0; n<SIZE*BATCH ; n+=SIZE){
for(int c = 0; c<SIZE ; c+=HEIGHT*WIDTH){ // N C H W
for(int i = 0 ; i < 10; i++){
printf("batch: %d, idx: %d, count: %d, value: %d\n", n/SIZE, n+c+i, i, host[n+c+i]);
}
}
printf("------------------------------\n");
}
// deinit GPU
cudaFree(device_src);
cudaFree(device_dst);
return 0;
}
// clear && clear && nvcc NHWC2NCHW_free.cu -o trans.o && ./trans.o |
21,049 | #include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#include<string.h>
#include<math.h>
#define NUM 10000000
#define CUDA_ERROR_EXIT(str) do{\
cudaError err = cudaGetLastError();\
if( err != cudaSuccess){\
printf("Cuda Error: '%s' for %s\n", cudaGetErrorString(err), str);\
exit(-1);\
}\
}while(0);
#define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec))
__global__ void calculate(long * gpu_mem,long nthreads,long pwr, long start, long end)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= nthreads)
return;
if(tid%2==0){
gpu_mem[start+tid*pwr]=gpu_mem[start+tid*pwr]^gpu_mem[start+tid*pwr+pwr-1];
}
else{
gpu_mem[start+tid*pwr+pwr-1]=gpu_mem[start+tid*pwr]^gpu_mem[start+tid*pwr+pwr-1];
}
}
int main(int argc, char **argv)
{
struct timeval t_start, t_end;
long i;
long *gpu_mem;
unsigned long num = NUM; /*Default value of num from MACRO*/
int SEED;
long blocks;
if(argc == 3){
num = atoi(argv[1]); /*Update after checking*/
if(num <= 0)
num = NUM;
SEED=atoi(argv[2]);
}
unsigned long n=num;
/* Allocate host (CPU) memory and initialize*/
srand(SEED);
long * ar = (long *)malloc(num*sizeof(long));
for(i=0; i<num; ++i){
*(ar+i)= (long)(random());
}
gettimeofday(&t_start, NULL);
/* Allocate GPU memory and copy from CPU --> GPU*/
long start=0;
long end=-1;
long ans=0;
while(1){
cudaMalloc(&gpu_mem, n * sizeof(long));
CUDA_ERROR_EXIT("cudaMalloc");
cudaMemcpy(gpu_mem, ar, n * sizeof(long) , cudaMemcpyHostToDevice);
CUDA_ERROR_EXIT("cudaMemcpy");
if(num==0)break;
start=end+1;
long lg=floor(log(num)*1.00/log(2));
end=(long)(start+pow(2,lg)-1);
if(num==1){
ans=(ans^ar[start]);
break;
}
long j;
long size=end+1-start;
long times=(long)(log(size)/log(2));
for(j=1;j<=times;j++){
long nthreads=size/((long)(pow(2,j)));
long pwr=(long)(pow(2,j));
blocks=nthreads/1024;
if(nthreads%1024)blocks++;
calculate<<<blocks, 1024>>>(gpu_mem,nthreads,pwr,start,end);
CUDA_ERROR_EXIT("kernel invocation");
}
cudaMemcpy(ar, gpu_mem, n * sizeof(long) , cudaMemcpyDeviceToHost);
CUDA_ERROR_EXIT("memcpy");
cudaFree(gpu_mem);
ans=(ans^ar[start]);
num=num-(long)(pow(2,lg));
}
gettimeofday(&t_end, NULL);
printf("Total time = %ld microsecs\n", TDIFF(t_start, t_end));
cudaFree(gpu_mem);
printf("result = %ld\n", ans);
free(ar);
}
|
21,050 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void __launch_bounds__(256,1) curvi_1 (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (j>=2 & k>=2 & j<=N-3 & k<=N-3) {
for (int i=2; i<=N-3; i++) {
double _t_21_;
double _t_153_;
double _t_150_;
double _t_162_;
double _t_156_;
double _t_16_;
double _t_134_;
double _t_173_;
double _t_192_;
double _t_189_;
double _t_46_;
double _t_201_;
double _t_195_;
double _t_41_;
double _t_170_;
double _t_34_;
double _t_182_;
double _t_176_;
double _t_29_;
double _t_131_;
double _t_9_;
double _t_143_;
double _t_137_;
double _t_4_;
double _t_177_;
double _t_118_;
double _t_100_;
double _t_196_;
double _t_81_;
double _t_127_;
double _t_63_;
double _t_138_;
double _t_157_;
double _t_132_;
double _t_74_;
double _t_56_;
double _t_111_;
double _t_93_;
double _t_129_;
double _t_151_;
double _t_148_;
double _t_171_;
double _t_168_;
double _t_190_;
double _t_187_;
double _t_144_;
double _t_86_;
double _t_68_;
double _t_123_;
double _t_142_;
double _t_105_;
double _t_163_;
double _t_161_;
double _t_183_;
double _t_181_;
double _t_202_;
double _t_200_;
double r1ic0jc0kc0 = r1[i][j][k];
double _t_113_;
double _t_58_;
double _t_76_;
double _t_95_;
double _t_117_;
double _t_90_;
double _t_122_;
double _t_110_;
double _t_99_;
double _t_104_;
double _t_92_;
double _t_51_;
double _t_62_;
double _t_53_;
double _t_67_;
double _t_55_;
double _t_80_;
double _t_85_;
double _t_73_;
double _t_10_;
double _t_0_;
double _t_22_;
double _t_35_;
double _t_47_;
double _t_17_;
double _t_14_;
double _t_42_;
double _t_39_;
double _t_30_;
double _t_27_;
double _t_5_;
double _t_2_;
_t_21_ = met1[i][j][k-2] * la[i][j][k-2] * met2[i][j][k-2];
_t_153_ = 2.0 * mu[i][j][k-2];
_t_153_ += la[i][j][k-2];
_t_150_ = met1[i][j][k-2] * _t_153_ * met2[i][j][k-2];
_t_162_ = met1[i][j][k-2] * mu[i][j][k-2] * met4[i][j][k-2];
_t_156_ = met1[i][j][k-2] * mu[i][j][k-2] * met3[i][j][k-2];
_t_16_ = met1[i][j][k-2] * mu[i][j][k-2] * met3[i][j][k-2];
_t_134_ = 2.0 * mu[i][j][k+2];
_t_134_ += la[i][j][k+2];
_t_173_ = 2.0 * mu[i][j][k+1];
_t_173_ += la[i][j][k+1];
_t_192_ = 2.0 * mu[i][j][k-1];
_t_192_ += la[i][j][k-1];
_t_189_ = met1[i][j][k-1] * _t_192_ * met2[i][j][k-1];
_t_46_ = met1[i][j][k-1] * la[i][j][k-1] * met2[i][j][k-1];
_t_201_ = met1[i][j][k-1] * mu[i][j][k-1] * met4[i][j][k-1];
_t_195_ = met1[i][j][k-1] * mu[i][j][k-1] * met3[i][j][k-1];
_t_41_ = met1[i][j][k-1] * mu[i][j][k-1] * met3[i][j][k-1];
_t_170_ = met1[i][j][k+1] * _t_173_ * met2[i][j][k+1];
_t_34_ = met1[i][j][k+1] * la[i][j][k+1] * met2[i][j][k+1];
_t_182_ = met1[i][j][k+1] * mu[i][j][k+1] * met4[i][j][k+1];
_t_176_ = met1[i][j][k+1] * mu[i][j][k+1] * met3[i][j][k+1];
_t_29_ = met1[i][j][k+1] * mu[i][j][k+1] * met3[i][j][k+1];
_t_131_ = met1[i][j][k+2] * _t_134_ * met2[i][j][k+2];
_t_9_ = met1[i][j][k+2] * la[i][j][k+2] * met2[i][j][k+2];
_t_143_ = met1[i][j][k+2] * mu[i][j][k+2] * met4[i][j][k+2];
_t_137_ = met1[i][j][k+2] * mu[i][j][k+2] * met3[i][j][k+2];
_t_4_ = met1[i][j][k+2] * mu[i][j][k+2] * met3[i][j][k+2];
_t_177_ = -c1 * u2[i-1][j][k+1];
_t_118_ = c1 * u2[i-1][j][k+1];
_t_177_ += c1 * u2[i+1][j][k+1];
_t_100_ = c1 * u2[i+1][j][k+1];
_t_118_ -= c1 * u2[i-1][j][k-1];
_t_196_ = -c1 * u2[i-1][j][k-1];
_t_100_ -= c1 * u2[i+1][j][k-1];
_t_196_ += c1 * u2[i+1][j][k-1];
_t_196_ -= c2 * u2[i-2][j][k-1];
_t_81_ = -c1 * u2[i-2][j][k-1];
_t_81_ += c1 * u2[i-2][j][k+1];
_t_177_ -= c2 * u2[i-2][j][k+1];
_t_196_ += c2 * u2[i+2][j][k-1];
_t_127_ = c1 * _t_195_ * _t_196_;
_t_63_ = -c1 * u2[i+2][j][k-1];
_t_63_ += c1 * u2[i+2][j][k+1];
_t_177_ += c2 * u2[i+2][j][k+1];
_t_127_ += c1 * _t_176_ * _t_177_;
_t_81_ += c2 * u2[i-2][j][k+2];
_t_138_ = -c2 * u2[i-2][j][k+2];
_t_118_ += c2 * u2[i-1][j][k+2];
_t_138_ -= c1 * u2[i-1][j][k+2];
_t_100_ += c2 * u2[i+1][j][k+2];
_t_138_ += c1 * u2[i+1][j][k+2];
_t_63_ += c2 * u2[i+2][j][k+2];
_t_138_ += c2 * u2[i+2][j][k+2];
_t_127_ += c2 * _t_137_ * _t_138_;
_t_81_ -= c2 * u2[i-2][j][k-2];
_t_157_ = -c2 * u2[i-2][j][k-2];
_t_118_ -= c2 * u2[i-1][j][k-2];
_t_157_ -= c1 * u2[i-1][j][k-2];
_t_100_ -= c2 * u2[i+1][j][k-2];
_t_157_ += c1 * u2[i+1][j][k-2];
_t_63_ -= c2 * u2[i+2][j][k-2];
_t_157_ += c2 * u2[i+2][j][k-2];
_t_127_ += c2 * _t_156_ * _t_157_;
_t_132_ = -c2 * u1[i-2][j][k+2];
_t_74_ = c2 * u1[i-2][j][k+2];
_t_132_ += c2 * u1[i+2][j][k+2];
_t_56_ = c2 * u1[i+2][j][k+2];
_t_132_ -= c1 * u1[i-1][j][k+2];
_t_111_ = c2 * u1[i-1][j][k+2];
_t_132_ += c1 * u1[i+1][j][k+2];
_t_93_ = c2 * u1[i+1][j][k+2];
_t_129_ = strx[i] * _t_131_ * _t_132_;
_t_127_ += c2 * _t_129_ * stry[j];
_t_74_ -= c2 * u1[i-2][j][k-2];
_t_151_ = -c2 * u1[i-2][j][k-2];
_t_111_ -= c2 * u1[i-1][j][k-2];
_t_151_ -= c1 * u1[i-1][j][k-2];
_t_93_ -= c2 * u1[i+1][j][k-2];
_t_151_ += c1 * u1[i+1][j][k-2];
_t_56_ -= c2 * u1[i+2][j][k-2];
_t_151_ += c2 * u1[i+2][j][k-2];
_t_148_ = strx[i] * _t_150_ * _t_151_;
_t_127_ += c2 * _t_148_ * stry[j];
_t_74_ += c1 * u1[i-2][j][k+1];
_t_171_ = -c2 * u1[i-2][j][k+1];
_t_111_ += c1 * u1[i-1][j][k+1];
_t_171_ -= c1 * u1[i-1][j][k+1];
_t_93_ += c1 * u1[i+1][j][k+1];
_t_171_ += c1 * u1[i+1][j][k+1];
_t_56_ += c1 * u1[i+2][j][k+1];
_t_171_ += c2 * u1[i+2][j][k+1];
_t_168_ = strx[i+2] * _t_170_ * _t_171_;
_t_127_ += c1 * _t_168_ * stry[j];
_t_74_ -= c1 * u1[i-2][j][k-1];
_t_190_ = -c2 * u1[i-2][j][k-1];
_t_111_ -= c1 * u1[i-1][j][k-1];
_t_190_ -= c1 * u1[i-1][j][k-1];
_t_93_ -= c1 * u1[i+1][j][k-1];
_t_190_ += c1 * u1[i+1][j][k-1];
_t_56_ -= c1 * u1[i+2][j][k-1];
_t_190_ += c2 * u1[i+2][j][k-1];
_t_187_ = strx[i-2] * _t_189_ * _t_190_;
_t_127_ += c1 * _t_187_ * stry[j];
_t_144_ = -c2 * u3[i-2][j][k+2];
_t_86_ = c2 * u3[i-2][j][k+2];
_t_144_ += c2 * u3[i+2][j][k+2];
_t_68_ = c2 * u3[i+2][j][k+2];
_t_144_ -= c1 * u3[i-1][j][k+2];
_t_123_ = c2 * u3[i-1][j][k+2];
_t_144_ += c1 * u3[i+1][j][k+2];
_t_142_ = _t_143_ * _t_144_;
_t_127_ += c2 * _t_142_ * stry[j];
_t_105_ = c2 * u3[i+1][j][k+2];
_t_86_ -= c2 * u3[i-2][j][k-2];
_t_163_ = -c2 * u3[i-2][j][k-2];
_t_123_ -= c2 * u3[i-1][j][k-2];
_t_163_ -= c1 * u3[i-1][j][k-2];
_t_105_ -= c2 * u3[i+1][j][k-2];
_t_163_ += c1 * u3[i+1][j][k-2];
_t_68_ -= c2 * u3[i+2][j][k-2];
_t_163_ += c2 * u3[i+2][j][k-2];
_t_161_ = _t_162_ * _t_163_;
_t_127_ += c2 * _t_161_ * stry[j];
_t_86_ += c1 * u3[i-2][j][k+1];
_t_183_ = -c2 * u3[i-2][j][k+1];
_t_123_ += c1 * u3[i-1][j][k+1];
_t_183_ -= c1 * u3[i-1][j][k+1];
_t_105_ += c1 * u3[i+1][j][k+1];
_t_183_ += c1 * u3[i+1][j][k+1];
_t_68_ += c1 * u3[i+2][j][k+1];
_t_183_ += c2 * u3[i+2][j][k+1];
_t_181_ = _t_182_ * _t_183_;
_t_127_ += c1 * _t_181_ * stry[j];
_t_86_ -= c1 * u3[i-2][j][k-1];
_t_202_ = -c2 * u3[i-2][j][k-1];
_t_123_ -= c1 * u3[i-1][j][k-1];
_t_202_ -= c1 * u3[i-1][j][k-1];
_t_105_ -= c1 * u3[i+1][j][k-1];
_t_202_ += c1 * u3[i+1][j][k-1];
_t_68_ -= c1 * u3[i+2][j][k-1];
_t_202_ += c2 * u3[i+2][j][k-1];
_t_200_ = _t_201_ * _t_202_;
_t_127_ += c1 * _t_200_ * stry[j];
r1ic0jc0kc0 += _t_127_;
_t_113_ = 2.0 * mu[i-1][j][k];
_t_58_ = 2.0 * mu[i+2][j][k];
_t_76_ = 2.0 * mu[i-2][j][k];
_t_95_ = 2.0 * mu[i+1][j][k];
_t_76_ += la[i-2][j][k];
_t_113_ += la[i-1][j][k];
_t_95_ += la[i+1][j][k];
_t_58_ += la[i+2][j][k];
_t_117_ = met1[i-1][j][k] * la[i-1][j][k] * met3[i-1][j][k];
_t_90_ = stry[j] * _t_117_ * _t_118_;
_t_122_ = met1[i-1][j][k] * la[i-1][j][k] * met4[i-1][j][k];
_t_90_ += _t_122_ * _t_123_;
_t_110_ = met1[i-1][j][k] * _t_113_ * met2[i-1][j][k];
_t_90_ += strx[i] * _t_110_ * _t_111_;
_t_99_ = met1[i+1][j][k] * la[i+1][j][k] * met3[i+1][j][k];
_t_90_ += stry[j] * _t_99_ * _t_100_;
_t_104_ = met1[i+1][j][k] * la[i+1][j][k] * met4[i+1][j][k];
_t_90_ += _t_104_ * _t_105_;
_t_92_ = met1[i+1][j][k] * _t_95_ * met2[i+1][j][k];
_t_90_ += strx[i] * _t_92_ * _t_93_;
_t_51_ = stry[j] * c1 * _t_90_;
_t_62_ = met1[i+2][j][k] * la[i+2][j][k] * met3[i+2][j][k];
_t_53_ = stry[j] * _t_62_ * _t_63_;
_t_67_ = met1[i+2][j][k] * la[i+2][j][k] * met4[i+2][j][k];
_t_53_ += _t_67_ * _t_68_;
_t_55_ = met1[i+2][j][k] * _t_58_ * met2[i+2][j][k];
_t_53_ += strx[i] * _t_55_ * _t_56_;
_t_80_ = met1[i-2][j][k] * la[i-2][j][k] * met3[i-2][j][k];
_t_53_ += stry[j] * _t_80_ * _t_81_;
_t_85_ = met1[i-2][j][k] * la[i-2][j][k] * met4[i-2][j][k];
_t_53_ += _t_85_ * _t_86_;
_t_73_ = met1[i-2][j][k] * _t_76_ * met2[i-2][j][k];
_t_53_ += strx[i] * _t_73_ * _t_74_;
_t_51_ += stry[j] * c2 * _t_53_;
r1ic0jc0kc0 += _t_51_;
_t_10_ = -c2 * u2[i][j-2][k+2];
_t_10_ -= c1 * u2[i][j-1][k+2];
_t_10_ += c1 * u2[i][j+1][k+2];
_t_10_ += c2 * u2[i][j+2][k+2];
_t_0_ = c2 * _t_9_ * _t_10_;
_t_22_ = -c2 * u2[i][j-2][k-2];
_t_22_ -= c1 * u2[i][j-1][k-2];
_t_22_ += c1 * u2[i][j+1][k-2];
_t_22_ += c2 * u2[i][j+2][k-2];
_t_0_ += c2 * _t_21_ * _t_22_;
_t_35_ = -c2 * u2[i][j-2][k+1];
_t_35_ -= c1 * u2[i][j-1][k+1];
_t_35_ += c1 * u2[i][j+1][k+1];
_t_35_ += c2 * u2[i][j+2][k+1];
_t_0_ += c1 * _t_34_ * _t_35_;
_t_47_ = -c2 * u2[i][j-2][k-1];
_t_47_ -= c1 * u2[i][j-1][k-1];
_t_47_ += c1 * u2[i][j+1][k-1];
_t_47_ += c2 * u2[i][j+2][k-1];
_t_0_ += c1 * _t_46_ * _t_47_;
_t_17_ = -c2 * u1[i][j-2][k-2];
_t_17_ -= c1 * u1[i][j-1][k-2];
_t_17_ += c1 * u1[i][j+1][k-2];
_t_17_ += c2 * u1[i][j+2][k-2];
_t_14_ = stry[j] * _t_16_ * _t_17_;
_t_0_ += c2 * _t_14_ * strx[i];
_t_42_ = -c2 * u1[i][j-2][k-1];
_t_42_ -= c1 * u1[i][j-1][k-1];
_t_42_ += c1 * u1[i][j+1][k-1];
_t_42_ += c2 * u1[i][j+2][k-1];
_t_39_ = stry[j] * _t_41_ * _t_42_;
_t_0_ += c1 * _t_39_ * strx[i];
_t_30_ = -c2 * u1[i][j-2][k+1];
_t_30_ -= c1 * u1[i][j-1][k+1];
_t_30_ += c1 * u1[i][j+1][k+1];
_t_30_ += c2 * u1[i][j+2][k+1];
_t_27_ = stry[j-2] * _t_29_ * _t_30_;
_t_0_ += c1 * _t_27_ * strx[i];
_t_5_ = -c2 * u1[i][j-2][k+2];
_t_5_ -= c1 * u1[i][j-1][k+2];
_t_5_ += c1 * u1[i][j+1][k+2];
_t_5_ += c2 * u1[i][j+2][k+2];
_t_2_ = stry[j+2] * _t_4_ * _t_5_;
_t_0_ += c2 * _t_2_ * strx[i];
r1ic0jc0kc0 += _t_0_;
r1[i][j][k] = r1ic0jc0kc0;
}
}
}
__global__ void __launch_bounds__(128,4) curvi_2 (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_i= (int)(blockDim.z);
int i0 = (int)(blockIdx.z)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.z);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (j>=2 & k>=2 & j<=N-3 & k<=N-3 & i>=2 & i<=N-3) {
double _t_17_;
double _t_10_;
double _t_22_;
double _t_35_;
double _t_47_;
double _t_30_;
double _t_42_;
double _t_5_;
double _t_21_;
double _t_0_;
double _t_16_;
double _t_58_;
double _t_14_;
double _t_46_;
double _t_41_;
double _t_39_;
double _t_69_;
double _t_34_;
double _t_29_;
double _t_64_;
double _t_27_;
double _t_9_;
double _t_4_;
double _t_53_;
double _t_2_;
double r1ic0jc0kc0 = r1[i][j][k];
double _t_54_;
double _t_81_;
double _t_59_;
double _t_76_;
double _t_65_;
double _t_87_;
double _t_70_;
double _t_92_;
double _t_51_;
double _t_75_;
double _t_80_;
double _t_86_;
double _t_91_;
_t_17_ = -c2 * u1[i][j-2][k-2];
_t_17_ += c2 * u1[i][j-2][k+2];
_t_17_ -= c1 * u1[i][j-2][k-1];
_t_17_ += c1 * u1[i][j-2][k+1];
_t_10_ = -c2 * u2[i][j+2][k-2];
_t_10_ -= c1 * u2[i][j+2][k-1];
_t_10_ += c1 * u2[i][j+2][k+1];
_t_10_ += c2 * u2[i][j+2][k+2];
_t_22_ = -c2 * u2[i][j-2][k-2];
_t_22_ -= c1 * u2[i][j-2][k-1];
_t_22_ += c1 * u2[i][j-2][k+1];
_t_22_ += c2 * u2[i][j-2][k+2];
_t_35_ = -c2 * u2[i][j+1][k-2];
_t_35_ -= c1 * u2[i][j+1][k-1];
_t_35_ += c1 * u2[i][j+1][k+1];
_t_35_ += c2 * u2[i][j+1][k+2];
_t_47_ = -c2 * u2[i][j-1][k-2];
_t_47_ -= c1 * u2[i][j-1][k-1];
_t_47_ += c1 * u2[i][j-1][k+1];
_t_47_ += c2 * u2[i][j-1][k+2];
_t_30_ = -c2 * u1[i][j+1][k-2];
_t_30_ -= c1 * u1[i][j+1][k-1];
_t_30_ += c1 * u1[i][j+1][k+1];
_t_30_ += c2 * u1[i][j+1][k+2];
_t_42_ = -c2 * u1[i][j-1][k-2];
_t_42_ -= c1 * u1[i][j-1][k-1];
_t_42_ += c1 * u1[i][j-1][k+1];
_t_42_ += c2 * u1[i][j-1][k+2];
_t_5_ = -c2 * u1[i][j+2][k-2];
_t_5_ -= c1 * u1[i][j+2][k-1];
_t_5_ += c1 * u1[i][j+2][k+1];
_t_5_ += c2 * u1[i][j+2][k+2];
_t_21_ = met1[i][j-2][k] * mu[i][j-2][k] * met2[i][j-2][k];
_t_0_ = c2 * _t_21_ * _t_22_;
_t_16_ = met1[i][j-2][k] * mu[i][j-2][k] * met3[i][j-2][k];
_t_58_ = met1[i][j-2][k] * mu[i][j-2][k] * met1[i][j-2][k];
_t_14_ = stry[j] * _t_16_ * _t_17_;
_t_0_ += c2 * _t_14_ * strx[i];
_t_46_ = met1[i][j-1][k] * mu[i][j-1][k] * met2[i][j-1][k];
_t_0_ += c1 * _t_46_ * _t_47_;
_t_41_ = met1[i][j-1][k] * mu[i][j-1][k] * met3[i][j-1][k];
_t_39_ = stry[j] * _t_41_ * _t_42_;
_t_0_ += c1 * _t_39_ * strx[i];
_t_69_ = met1[i][j-1][k] * mu[i][j-1][k] * met1[i][j-1][k];
_t_34_ = met1[i][j+1][k] * mu[i][j+1][k] * met2[i][j+1][k];
_t_0_ += c1 * _t_34_ * _t_35_;
_t_29_ = met1[i][j+1][k] * mu[i][j+1][k] * met3[i][j+1][k];
_t_64_ = met1[i][j+1][k] * mu[i][j+1][k] * met1[i][j+1][k];
_t_27_ = stry[j-1] * _t_29_ * _t_30_;
_t_0_ += c1 * _t_27_ * strx[i];
_t_9_ = met1[i][j+2][k] * mu[i][j+2][k] * met2[i][j+2][k];
_t_0_ += c2 * _t_9_ * _t_10_;
_t_4_ = met1[i][j+2][k] * mu[i][j+2][k] * met3[i][j+2][k];
_t_53_ = met1[i][j+2][k] * mu[i][j+2][k] * met1[i][j+2][k];
_t_2_ = stry[j+1] * _t_4_ * _t_5_;
_t_0_ += c2 * _t_2_ * strx[i];
r1ic0jc0kc0 += _t_0_;
_t_54_ = -c2 * u2[i-2][j+2][k];
_t_81_ = c2 * u2[i-2][j+2][k];
_t_81_ -= c2 * u2[i-2][j-2][k];
_t_59_ = -c2 * u2[i-2][j-2][k];
_t_59_ += c2 * u2[i+2][j-2][k];
_t_76_ = -c2 * u2[i+2][j-2][k];
_t_54_ += c2 * u2[i+2][j+2][k];
_t_76_ += c2 * u2[i+2][j+2][k];
_t_65_ = -c2 * u2[i-2][j+1][k];
_t_81_ += c1 * u2[i-2][j+1][k];
_t_65_ += c2 * u2[i+2][j+1][k];
_t_76_ += c1 * u2[i+2][j+1][k];
_t_59_ += c1 * u2[i+1][j-2][k];
_t_87_ = -c2 * u2[i+1][j-2][k];
_t_65_ += c1 * u2[i+1][j+1][k];
_t_87_ += c1 * u2[i+1][j+1][k];
_t_54_ += c1 * u2[i+1][j+2][k];
_t_87_ += c2 * u2[i+1][j+2][k];
_t_70_ = -c2 * u2[i-2][j-1][k];
_t_81_ -= c1 * u2[i-2][j-1][k];
_t_70_ += c1 * u2[i+1][j-1][k];
_t_87_ -= c1 * u2[i+1][j-1][k];
_t_70_ += c2 * u2[i+2][j-1][k];
_t_76_ -= c1 * u2[i+2][j-1][k];
_t_59_ -= c1 * u2[i-1][j-2][k];
_t_92_ = -c2 * u2[i-1][j-2][k];
_t_51_ = c2 * _t_58_ * _t_59_;
_t_70_ -= c1 * u2[i-1][j-1][k];
_t_51_ += c1 * _t_69_ * _t_70_;
_t_92_ -= c1 * u2[i-1][j-1][k];
_t_65_ -= c1 * u2[i-1][j+1][k];
_t_51_ += c1 * _t_64_ * _t_65_;
_t_92_ += c1 * u2[i-1][j+1][k];
_t_54_ -= c1 * u2[i-1][j+2][k];
_t_51_ += c2 * _t_53_ * _t_54_;
_t_92_ += c2 * u2[i-1][j+2][k];
_t_75_ = met1[i+2][j][k] * la[i+2][j][k] * met1[i+2][j][k];
_t_51_ += c2 * _t_75_ * _t_76_;
_t_80_ = met1[i-2][j][k] * la[i-2][j][k] * met1[i-2][j][k];
_t_51_ += c2 * _t_80_ * _t_81_;
_t_86_ = met1[i+1][j][k] * la[i+1][j][k] * met1[i+1][j][k];
_t_51_ += c1 * _t_86_ * _t_87_;
_t_91_ = met1[i-1][j][k] * la[i-1][j][k] * met1[i-1][j][k];
_t_51_ += c1 * _t_91_ * _t_92_;
r1ic0jc0kc0 += _t_51_;
r1[i][j][k] = r1ic0jc0kc0;
}
}
extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) {
double *r1;
cudaMalloc (&r1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for r1\n");
cudaMemcpy (r1, h_r1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u1;
cudaMalloc (&u1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u1\n");
cudaMemcpy (u1, h_u1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u2;
cudaMalloc (&u2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u2\n");
cudaMemcpy (u2, h_u2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u3;
cudaMalloc (&u3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u3\n");
cudaMemcpy (u3, h_u3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met1;
cudaMalloc (&met1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met1\n");
cudaMemcpy (met1, h_met1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met2;
cudaMalloc (&met2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met2\n");
cudaMemcpy (met2, h_met2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met3;
cudaMalloc (&met3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met3\n");
cudaMemcpy (met3, h_met3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met4;
cudaMalloc (&met4, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met4\n");
cudaMemcpy (met4, h_met4, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1);
curvi_1 <<<gridconfig, blockconfig>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
dim3 blockconfig_1 (16, 2, 2);
dim3 gridconfig_1 (ceil(N, blockconfig_1.x), ceil(N, blockconfig_1.y), ceil(N, blockconfig_1.z));
curvi_2 <<<gridconfig_1, blockconfig_1>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
cudaMemcpy (h_r1, r1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
}
|
21,051 | #include<iostream>
#define N 33*256
#define threadPerBlock 256
#define blockPerGrid 32
__global__ void dot(float *a,float *b,float *c)
{
__shared__ float cache[threadPerBlock];
int tid = threadIdx.x + blockIdx.x*blockDim.x;
int cacheIndex = threadIdx.x;
float temp =0;
while(tid < N)
{
temp += a[tid]*b[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIndex] = temp;
__syncthreads();
int i=blockDim.x/2;
while(i!=0)
{
if(cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex+i];
__syncthreads();
i /= 2;
}
if(cacheIndex == 0)
c[blockIdx.x] = cache[0];
}
int main()
{
float *a=(float *)malloc(sizeof(float)*N);
float *b=(float *)malloc(sizeof(float)*N);
float *c=(float *)malloc(sizeof(float)*blockPerGrid);
float *dev_a;
float *dev_b;
float *dev_c;
cudaMalloc((void**)&dev_a,sizeof(float)*N);
cudaMalloc((void**)&dev_b,sizeof(float)*N);
cudaMalloc((void**)&dev_c,sizeof(float)*blockPerGrid);
int i;
for(i=0;i<N;i++)
a[i]=b[i]=1;
cudaMemcpy(dev_a,a,sizeof(float)*N,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,sizeof(float)*N,cudaMemcpyHostToDevice);
dot<<<blockPerGrid,threadPerBlock>>>(dev_a,dev_b,dev_c);
cudaMemcpy(c,dev_c,sizeof(float)*blockPerGrid,cudaMemcpyDeviceToHost);
float sum=0;
for(i=0;i<blockPerGrid;i++)
sum+=c[i];
printf("sum is :%f \n",sum);
free(a);
free(b);
free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
}
|
21,052 | #include<stdio.h>
#include<cuda.h>
# define s 1000
__global__ void min(int *a,int *c)
{
int id=threadIdx.x;
*c=a[0];
if(a[id]<*c)
{
*c=a[id];
}
}
__global__ void max(int *a,int *d)
{
int id=threadIdx.x;
*d=a[0];
if(a[id]>*d)
{
*d=a[id];
}
}
int main()
{
int i,a[s],c,d;
int *dev_a,*dev_c,*dev_d;
cudaMalloc((void **) &dev_a, s*sizeof(int));
cudaMalloc((void **) &dev_c, s*sizeof(int));
cudaMalloc((void **) &dev_d, s*sizeof(int));
for(i=0;i<s;i++)
{
a[i]=rand()%1000+1;
}
cudaMemcpy(dev_a,a,s*sizeof(int),cudaMemcpyHostToDevice);
min<<<1,s>>>(dev_a,dev_c);
max<<<1,s>>>(dev_a,dev_d);
cudaMemcpy(&c, dev_c, s*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(&d, dev_d, s*sizeof(int),cudaMemcpyDeviceToHost);
printf("min=%d",c);
printf("max=%d",d);
cudaFree(dev_a);
cudaFree(dev_c);
cudaFree(dev_d);
printf(" ");
return 0;
}
/*
OUTPUT
cpllab00@cpllab:~$ nvcc minmax.cu
cpllab00@cpllab:~$ ./a.out
min=251 max=578 */
|
21,053 | #include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
double wtime(void)
{
double now_time;
struct timeval etstart;
struct timezone tzp;
if (gettimeofday(&etstart, &tzp) == -1)
perror("Error: calling gettimeofday() not successful.\n");
now_time = ((double)etstart.tv_sec) + /* in seconds */
((double)etstart.tv_usec) / 1000000.0; /* in microseconds */
return now_time;
}
#ifdef _TESTING_
int main(int argc, char **argv) {
double time;
time = wtime();
printf("time of day = %10.4f\n", time);
return 0;
}
#endif
|
21,054 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define MAX_DIM 32
#define SCALING_FACTOR 10.0
#define TILE_DIM 32
#define NUM_THREADS 1024
#define MOD_BASE 256
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int find_max(int * arr, int num_elem)
{
int max = 0;
for (int i = 0; i < num_elem; i++) {
if (arr[i] > max) {
max = arr[i];
}
}
return max;
}
int * def_mat_dim(int k)
{
int * dim = (int *) malloc(k * sizeof(int));
int i;
//srand(time(NULL));
for (i = 0; i < k; i++)
{
//dim[i] = 10;
dim[i] = (rand() % MAX_DIM) + 1;
//printf("%d\n", dim[i]);
}
return dim;
}
double * creat_mat(int dimX, int dimY)
{
int x;
double * mat = (double *) malloc(dimX * dimY * sizeof(double));
srand(time(NULL));
for (x = 0; x < dimX * dimY; x++) {
//mat[x] = float(rand()) / float(RAND_MAX) * SCALING_FACTOR;
mat[x] = float(rand()) / float(RAND_MAX) * SCALING_FACTOR;
//printf("%f\n", mat[x]);
}
return mat;
}
void if_mats_equal(double * A, double * B, int rows, int cols)
{
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
if (A[i * rows + j] != B[i * rows + j]) {
printf("Matrices are not equal\n");
return;
}
}
}
printf("Matrices are equal\n");
}
void cpu_mat_mul(double* A, double* B, double* C, int ARows, int ACols, int BRows, int BCols)
{
double sum = 0.0;
for (int i = 0; i < ARows; i++) {
for (int j = 0; j < BCols; j++) {
for (int k = 0; k < ACols; k++) {
sum += A[i * ACols + k] * B[k * BCols + j];
//C[i * BCols + j] += A[i * ACols + k] * B[k * BCols + j];
}
C[i * BCols + j] = double(int(sum) % MOD_BASE);
sum = 0.0;
}
}
}
void print_mat(double * mat, int dimX, int dimY)
{
for (int i = 0; i < dimX; i++) {
for (int j = 0; j < dimY; j++) {
printf("%2.2f ", mat[i * dimX + j]);
}
printf("\n");
}
}
double * cpu_multi_mat_mult(int num_dim, int * dim_list, double ** mat_list) {
int max_dim = find_max(dim_list, num_dim);
double * output_mat1 = (double *) calloc(max_dim * max_dim, sizeof(double));
double * output_mat2 = (double *) calloc(max_dim * max_dim, sizeof(double));
cpu_mat_mul(mat_list[0], mat_list[1], output_mat1, dim_list[0], dim_list[1], dim_list[1], dim_list[2]);
int num_rows = dim_list[0];
int num_cols = dim_list[2];
//print_mat(output_mat1, num_rows, num_cols);
int num_mult;
for (num_mult = 1; num_mult < num_dim - 2; num_mult++) {
if (num_mult % 2 == 1) {
cpu_mat_mul(output_mat1, mat_list[num_mult + 1], output_mat2, num_rows, num_cols, dim_list[num_mult + 1] , dim_list[num_mult + 2]);
}
else {
cpu_mat_mul(output_mat2, mat_list[num_mult + 1], output_mat1, num_rows, num_cols, dim_list[num_mult + 1] , dim_list[num_mult + 2]);
}
num_cols = dim_list[num_mult + 2];
}
//printf("%d %d\n", num_rows, num_cols);
if (num_mult % 2 == 1) {
free(output_mat2);
return output_mat1;
}
else {
free(output_mat1);
return output_mat2;
}
}
__device__
void MatMul(/* parameters */) {
}
/*
__global__
void matmult(double* A, double* B, double* C, int ARows, int ACols, int BRows, int BCols)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
double sum = 0.0;
if ((col < BCols) && (row < ARows)) {
for (int i = 0; i < ACols; i++) {
sum += A[row * ACols + i] * B[i * BCols + col];
}
C[row * BCols + col] = sum;
}
}
*/
__global__
void matmult_general(double* A, double* B, double* C, int ARows, int ACols, int BRows, int BCols)
{
int num_elem_output = ARows * BCols;
int C_elem_row = 0;
int C_elem_col = 0;
double sum = 0.0f;
for (int n = threadIdx.x; n < num_elem_output; n+=NUM_THREADS) {
C_elem_col = n % BCols;
C_elem_row = (n + (BCols - C_elem_col)) / BCols - 1;
for (int i = 0; i < ACols; i++) {
sum += A[C_elem_row * ACols + i] * B[i * BCols + C_elem_col];
}
C[C_elem_row * ACols + C_elem_col] = sum;
sum = 0.0f;
}
}
/*
__global__
void gpu_seq_multi_matmult(int num_dim, int * dim_list, double ** mat_list, double * output_mat1, double * output_mat2)
{
int grid_rows = (dim_list[0] + TILE_DIM - 1) / TILE_DIM;
int grid_cols = (dim_list[2] + TILE_DIM - 1) / TILE_DIM;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(TILE_DIM, TILE_DIM);
if (threadIdx.x == 0) {
matmult<<<dimGrid, dimBlock>>>(mat_list[0], mat_list[1], output_mat, dim_list[0], dim_list[1], dim_list[1], dim_list[2]);
cudaDeviceSynchronize();
}
__syncthreads();
//cudaThreadSynchronize();
}
*/
int main()
{
int num_dim = 100;
int num_mat = num_dim - 1;
int * mat_dim = def_mat_dim(num_dim);
double ** mat_list = (double **) malloc((num_mat) * sizeof(double *));
// printf("Copying matrix dimensions to device\n");
int * d_mat_dim;
cudaMalloc((void **)&d_mat_dim, num_dim * sizeof(int));
cudaMemcpy(d_mat_dim, mat_dim, num_dim * sizeof(int), cudaMemcpyHostToDevice);
// printf("Creating Matrix from on host\n");
int k;
for (k = 0; k < num_mat; k++) {
//printf("================= MATRIX %d ====================\n", k);
//printf("%d %d\n", mat_dim[k], mat_dim[k+1]);
mat_list[k] = creat_mat(mat_dim[k], mat_dim[k+1]);
}
// printf("Allocating space to store output matrix\n");
double * out_mat = (double *) malloc(mat_dim[0] * mat_dim[num_dim-1] * sizeof(double));
double * d_out_mat;
cudaMalloc((void **) &d_out_mat, mat_dim[0] * mat_dim[num_dim-1] * sizeof(double));
// printf("Allocating space for each matrix, and storing pointer address of matrices on the host\n");
double ** int_mat_list = (double **) malloc(num_mat * sizeof(double *));
for (k = 0; k < num_mat; k++) {
cudaMalloc((void **)&int_mat_list[k], mat_dim[k] * mat_dim[k+1] * sizeof(double));
cudaMemcpy(int_mat_list[k], mat_list[k], mat_dim[k] * mat_dim[k+1] * sizeof(double), cudaMemcpyHostToDevice);
}
// printf("Copying pointer addresses of matrices from host to device\n");
double ** d_mat_list;
cudaMalloc(&d_mat_list, num_mat * sizeof(double *));
cudaMemcpy(d_mat_list, int_mat_list, num_mat * sizeof(double *), cudaMemcpyHostToDevice);
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
/*
for (k = 0; k < num_dim-1; k++) {
printf("%d %d %d %d\n", k, mat_dim[k], mat_dim[k+1], &d_mat_list[k]);
cudaMalloc((void **)&d_mat_list[k], mat_dim[k] * mat_dim[k+1] * sizeof(double));
//cudaMemcpy(d_mat_list[k], mat_list[k], mat_dim[k] * mat_dim[k+1] * sizeof(double), cudaMemcpyHostToDevice);
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
}
printf("After d_mat_list\n");
*/
// printf("At the kernel call\n");
/*
int grid_rows = (mat_dim[0] + TILE_DIM - 1) / TILE_DIM;
int grid_cols = (mat_dim[2] + TILE_DIM - 1) / TILE_DIM;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(TILE_DIM, TILE_DIM);
*/
double * cpu_mat = cpu_multi_mat_mult(num_dim, mat_dim, mat_list);
printf("%d %d\n", mat_dim[0], mat_dim[num_dim-1]);
print_mat(cpu_mat, mat_dim[0], mat_dim[num_dim-1]);
printf("\n");
/*
printf("%d %d %d\n", mat_dim[0], mat_dim[1], mat_dim[2]);
//matmult<<<dimGrid, dimBlock>>>(int_mat_list[0], int_mat_list[1], d_out_mat, mat_dim[0], mat_dim[1], mat_dim[1], mat_dim[2]);
matmult_general<<<1, NUM_THREADS>>>(int_mat_list[0], int_mat_list[1], d_out_mat, mat_dim[0], mat_dim[1], mat_dim[1], mat_dim[2]);
cudaThreadSynchronize();
//multi_matmult<<<1, NUM_THREADS>>>(num_dim, d_mat_dim, d_mat_list, d_out_mat);
//gpuErrchk(cudaPeekAtLastError());
error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
cudaMemcpy(out_mat, d_out_mat, mat_dim[0] * mat_dim[num_dim-1] * sizeof(double), cudaMemcpyDeviceToHost);
print_mat(out_mat, mat_dim[0], mat_dim[num_dim-1]);
printf("\n");
if_mats_equal(out_mat, cpu_mat, mat_dim[0], mat_dim[2]);
*/
return 0;
}
|
21,055 | #define MAX_DWELL 256
/** a simple complex type */
struct complex {
__host__ __device__ complex(float re, float im = 0) {
this->re = re;
this->im = im;
}
/** real and imaginary part */
float re, im;
}; // struct complex
// operator overloads for complex numbers
inline __host__ __device__ complex operator+
(const complex &a, const complex &b) {
return complex(a.re + b.re, a.im + b.im);
}
inline __host__ __device__ complex operator-
(const complex &a) { return complex(-a.re, -a.im); }
inline __host__ __device__ complex operator-
(const complex &a, const complex &b) {
return complex(a.re - b.re, a.im - b.im);
}
inline __host__ __device__ complex operator*
(const complex &a, const complex &b) {
return complex(a.re * b.re - a.im * b.im, a.im * b.re + a.re * b.im);
}
inline __host__ __device__ float abs2(const complex &a) {
return a.re * a.re + a.im * a.im;
}
inline __host__ __device__ complex operator/
(const complex &a, const complex &b) {
float invabs2 = 1 / abs2(b);
return complex((a.re * b.re + a.im * b.im) * invabs2,
(a.im * b.re - b.im * a.re) * invabs2);
}
/** computes the dwell for a single pixel */
__device__ int pixel_dwell
(int w, int h, complex cmin, complex cmax, int x, int y) {
complex dc = cmax - cmin;
float fx = (float)x / w, fy = (float)y / h;
complex c = cmin + complex(fx * dc.re, fy * dc.im);
int dwell = 0;
complex z = c;
while(dwell < MAX_DWELL && abs2(z) < 2 * 2) {
z = z * z + c;
dwell++;
}
return dwell;
} // pixel_dwell
/** computes the dwells for Mandelbrot image
@param dwells the output array
@param w the width of the output image
@param h the height of the output image
@param cmin the complex value associated with the left-bottom corner of the
image
@param cmax the complex value associated with the right-top corner of the
image
*/
extern "C" __global__
void mandelbrot_k
(int *dwells, int w, int h, complex cmin, complex cmax) {
// complex value to start iteration (c)
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int dwell = pixel_dwell(w, h, cmin, cmax, x, y);
dwells[y * w + x] = dwell;
} // mandelbrot_k |
21,056 | extern "C"
__global__ void fMatrixExp(
const float* arguments,
float* results,
const int states
) {
const int X = gridDim.x;
const int col = gridDim.y * X * threadIdx.x + X * blockIdx.y + blockIdx.x;
if (col < states) {
float sum = 0;
for (int j = 0; j < states - 1; j++) {
results[col * states + j] = expf(arguments[col * (states - 1) + j]);
sum = sum + results[col * states + j];
}
sum = sum + 1;
results[col * states + states - 1] = 1;
for (int j = 0; j < states; j++) {
results[col * states + j] = results[col * states + j] / sum;
}
}
}
extern "C"
#define BLOCK_DIM 1024
__global__ void fMatrixReduce(
const float* arguments,
float* results
) {
const int col = blockIdx.x;
const int states = blockDim.x;
const int tid = threadIdx.x;
const int index = states * col + tid;
__shared__ float sdata[BLOCK_DIM];
__shared__ float res[BLOCK_DIM];
if (tid < (states - 1)) {
const float f = expf(arguments[col * (states - 1) + tid]);
sdata[tid] = f;
res[tid] = f;
} else {
sdata[tid] = 1;
res[tid] = 1;
}
__syncthreads();
for (int s = BLOCK_DIM / 2; s > 0; s>>=1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
results[index] = res[tid] / sdata[0];
}
extern "C"
#define BLOCK_SIZE 32
__global__ void reduce5(const float* arguments, float* results, const int n) {
extern __shared__ float sdata[];
const int tid = threadIdx.x;
const int i = blockIdx.x*blockDim.x + tid;
if (i < n) {
sdata[tid] = arguments[i];
}
for (int s = BLOCK_SIZE / 2; s > 0; s>>=1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid == 0) {
results[blockIdx.x] = sdata[0];
}
}
extern "C"
__global__ void fFill(
float* arguments,
const float value,
const int size
) {
const int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index < size) {
arguments[index] = value;
}
}
extern "C"
__global__ void fMatrixKernel1(
const int states,
const float weight,
const float diff,
const float* distribution,
const float* expectedValue,
float* betaGrad,
const int to,
const float* weights
) {
const int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < states * (states - 1)) {
const int i = index / (states - 1);
const int j = index % (states - 1);
const float curW = weights[i * states + to];
const float grad = 2 * weight * diff * distribution[i] * expectedValue[to];
if (j == to) {
betaGrad[index] += grad * curW * (1 - curW);
} else {
betaGrad[index] += -grad * curW * weights[i * states + j];
}
}
}
extern "C"
__global__ void fMatrixKernel2(
const int states,
const float lambda,
float* betaGrad,
const int to,
const float* weights
) {
const int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < (states - 1) * states) {
const int from = index / (states - 1);
const int j = index % (states - 1);
const float curW = weights[from * states + to];
const float grad = lambda * curW;
if (j == to) {
betaGrad[index] += grad * curW * (1 - curW);
} else {
betaGrad[index] += -grad * curW * weights[from * states + j];
}
}
}
extern "C"
__global__ void fVectorKernel1(
const float* lastGrad,
const float* gradCoordinate,
const float* totalGrad,
const float step,
const int sumSize,
float* result,
const int size
) {
const int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index < size && lastGrad[index] != 0) {
result[index] += -step * gradCoordinate[index] * totalGrad[index] / sumSize;
}
}
/*
extern "C"
#define STATES 6
#define SIZE 15
__global__ void getSeqValue(
const float* params,
const float* seq,
const int len,
float result
) {
const int tid = threadIdx.x;
const int dim = STATES * (STATES - 1) * SIZE + STATES;
__shared__ float sdata[dim];
__shared__ float res[BLOCK_DIM];
if (tid < dim) {
sdata[tid] = params[tid];
}
__syncthreads();
for (int i = 0; i < len; i++) {
const int offset = seq[i] * STATES * (STATES - 1);
if (tid >= offset && tid < offset + STATES * (STATES - 1)) {
const int row = (tid - offset) / STATES;
}
__syncthreads();
}
if (tid < (states - 1)) {
for (int s = 1; s < states - 1; s *= 2) {
if (tid % (2 * s) == 0) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
}
const float sum = sdata[0] + 1;
results[index] = res[tid] / sum;
}*/ |
21,057 | #include <stdio.h>
#include <stdlib.h>
#define L (512)
#define N (100)
int main(){
int mat_A[N][L];
int mat_B[N][L];
int (*d_A)[N]; //pointers to arrays of dimension N
int (*d_B)[N]; //pointers to arrays of dimension N
//allocate values to matrices
for(int i = 0; i < N; i++) {
for(int j = 0; j < L; j++) {
mat_A[i][j] = 2*j;
mat_B[i][j] = 2*j+1;
}
}
//Display results
printf("Result A: ");
for(int i = 0; i < 10; i++){
for(int j =0; j<L; j++){
printf("%d, ", mat_A[i][j]);
}
}
printf("\n");
printf("Result B: ");
for(int i = 0; i < 10; i++){
for(int j =0; j<L; j++){
printf("%d, ", mat_B[i][j]);
}
}
//allocation
cudaMalloc((void**)&d_A, (N*L)*sizeof(int));
cudaMalloc((void**)&d_B, (N*L)*sizeof(int));
return 0;
}
|
21,058 | #include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <assert.h>
__global__ void
sum_arrays_on_device(float *c, float *a, float *b)
{
uint32_t i = threadIdx.x + blockIdx.x*blockDim.x;
c[i] = a[i] + b[i];
}
void initialize_data(float *host_data,
float *device_data,
const uint32_t num_floats)
{
cudaError_t status;
time_t t;
uint32_t size_bytes = sizeof(float)*num_floats;
srand((uint32_t)time(&t));
for (uint32_t i = 0;
i < num_floats;
++i) {
host_data[i] = (float)(rand() & 0xFF)/10.0f;
}
status = cudaMemcpy(device_data,
host_data,
size_bytes,
cudaMemcpyHostToDevice);
assert(status == cudaSuccess);
}
int main(void)
{
cudaError_t status;
float *a;
float *b;
float *c;
constexpr uint32_t num_floats = 32;
constexpr uint32_t size_bytes = sizeof(float)*num_floats;
status = cudaMalloc(&a, size_bytes);
assert(status == cudaSuccess);
status = cudaMalloc(&b, size_bytes);
assert(status == cudaSuccess);
status = cudaMalloc(&c, size_bytes);
assert(status == cudaSuccess);
float a_host[num_floats];
float b_host[num_floats];
initialize_data(a_host, a, num_floats);
initialize_data(b_host, b, num_floats);
dim3 block = num_floats/4;
dim3 grid = (num_floats + (block.x - 1))/block.x;
sum_arrays_on_device<<<grid, block>>>(c, a, b);
status = cudaDeviceSynchronize();
assert(status == cudaSuccess);
status = cudaFree(a);
assert(status == cudaSuccess);
status = cudaFree(b);
assert(status == cudaSuccess);
status = cudaFree(c);
assert(status == cudaSuccess);
status = cudaDeviceReset();
assert(status == cudaSuccess);
return EXIT_SUCCESS;
}
|
21,059 |
/*****************************************************************************
Example :cuda-matrix-matrix-multiplication-mgpu.cu
Objective : Write CUDA program to compute Matrix-Matrix multiplication
to be executed on multiple GPUs.(using global memory)
Input : None
Output : Execution time in seconds , Gflops achieved
Created : Aug 2011
E-mail : RarchK
****************************************************************************/
#include<stdio.h>
#include<cuda.h>
#include<math.h>
#define SIZE 128
#define EPS 1.0e-12
#define GRIDSIZE 10
#define BLOCKSIZE 16
#define MAX_GPU 2
typedef struct
{
int hA;
int wA;
int wB;
double* hMatA;
double* hMatB;
double* hMatC;
double* dMatA;
double* dMatB;
double* dMatC;
cudaStream_t stream;
}TGPUPlan;
int hA, wA,wB;
double *hMatA,*hMatB,*hMatC,*dMatA,*dMatB,*dMatC;
void checkResult(double *InMatA, double *InMatB, double *outMatC, int m, int n , int k );
__global__ void mmmul(double* dm1,double* dm2,double *dres,int r,int m,int c)
{
int tx = blockIdx.x*blockDim.x + threadIdx.x;
int ty = blockIdx.y*blockDim.y + threadIdx.y;
if(tx<c&&ty<r)
{
int i;
dres[ty*c+tx]=0.00;
for(i=0;i<m;i++)
dres[ty*c+tx]+=dm1[ty*m+i]*dm2[i*c+tx];
}
}
void print_on_screen(char * program_name,float tsec,double gflops,int size,int flag)//flag=1 if gflops has been calculated else flag =0
{
printf("\n---------------%s----------------\n",program_name);
printf("\tSIZE\t TIME_SEC\t Gflops\n");
if(flag==1)
printf("\t%d\t%f\t%lf\t",size,tsec,gflops);
else
printf("\t%d\t%lf\t%lf\t",size,"---","---");
}
/*
* Check for safe return of all calls to the device
*/
void CUDA_SAFE_CALL(cudaError_t call)
{
cudaError_t ret = call;
//printf("RETURN FROM THE CUDA CALL:%d\t:",ret);
switch(ret)
{
case cudaSuccess:
// printf("Success\n");
break;
/* case cudaErrorInvalidValue:
{
printf("ERROR: InvalidValue:%i.\n",__LINE__);
exit(-1);
break;
}
case cudaErrorInvalidDevicePointer:
{
printf("ERROR:Invalid Device pointeri:%i.\n",__LINE__);
exit(-1);
break;
}
case cudaErrorInvalidMemcpyDirection:
{
printf("ERROR:Invalid memcpy direction:%i.\n",__LINE__);
exit(-1);
break;
} */
default:
{
printf(" ERROR at line :%i.%d' ' %s\n",__LINE__,ret,cudaGetErrorString(ret));
exit(-1);
break;
}
}
}
/* Function to check cpu and gpu results */
void relError(double* dRes,double* hRes,int size)
{
double relativeError=0.0,errorNorm=0.0;
int flag=0;
int i;
for( i = 0; i < size; ++i) {
if (fabs(hRes[i]) > fabs(dRes[i]))
relativeError = fabs((hRes[i] - dRes[i]) / hRes[i]);
else
relativeError = fabs((dRes[i] - hRes[i]) / dRes[i]);
if (relativeError > EPS && relativeError != 0.0e+00 )
{
if(errorNorm < relativeError)
{
errorNorm = relativeError;
flag=1;
}
}
}
if( flag == 1)
{
printf(" \n Results verfication : Failed");
printf(" \n Considered machine precision : %e", EPS);
printf(" \n Relative Error : %e\n", errorNorm);
}
else
printf("\n Results verfication : Success\n");
}
int main(int argc,char** argv)
{
int numGPU;
int hA,wA,wB;
double *host_A,*host_B,*host_C;
int gpuBase,offset;
int i,j;
/* FOR TIMING MEASUREMENTS */
/*cudaEvent_t* start,*stop;
float *elapsedTime;
float Tsec=0,gflops;*/
/* ----- MULTI DEVICE COUNT --------*/
CUDA_SAFE_CALL(cudaGetDeviceCount(&numGPU));
if(numGPU > MAX_GPU )
numGPU=MAX_GPU;
printf("CUDA CAPABLE DEVICE COUNT: %d\n",numGPU);
hA=SIZE;
wA=SIZE;
wB=SIZE;
/*---------FILLING HOST MATRICES---------*/
host_A=(double*)malloc(hA*wA*sizeof(double));
host_B=(double*)malloc(wA*wB*sizeof(double));
host_C=(double*)malloc(hA*wB*sizeof(double));
for(i =0;i < hA * wA;i++)
host_A[i] = drand48();
for(i =0;i < wA*wB;i++)
host_B[i] = drand48();
/*start = (cudaEvent_t*)malloc(numGPU*sizeof(cudaEvent_t));
stop = (cudaEvent_t*)malloc(numGPU*sizeof(cudaEvent_t));
elapsedTime = (float *)malloc(numGPU*sizeof(float)); */
/*-------INITIATING THE DATA FOR EACH DEVICE ----*/
TGPUPlan plan[numGPU];
for(i =0;i < numGPU; i++)
{
plan[i].hA = hA / numGPU;
plan[i].wA = wA;
plan[i].wB = wB;
//cudaEventCreate(&start[i]);
//cudaEventCreate(&stop[i]);
}
/*.........To handle odd size of vectors.........*/
for(i = 0;i < hA % numGPU; i++)
plan[i].hA++;
for(i = 0; i<numGPU ; i++)
{
plan[i].hMatA=(double*)malloc(plan[i].hA*plan[i].wA*sizeof(double));
plan[i].hMatB=(double*)malloc(plan[i].wA*plan[i].wB*sizeof(double));
plan[i].hMatC=(double*)malloc(plan[i].hA*plan[i].wB*sizeof(double));
}
/*--------Division of input matrix for different GPU's-----*/
gpuBase=0;
for(i =0;i < numGPU ;i++)
{
plan[i].hMatA = host_A + gpuBase ;
plan[i].hMatB = host_B ;
gpuBase += plan[i].hA * plan[i].wA ;
}
for( i=0; i<numGPU ;i++)
{
CUDA_SAFE_CALL(cudaSetDevice(i));
CUDA_SAFE_CALL(cudaStreamCreate(&plan[i].stream));
}
/*-----------GPU Computation------------*/
for( i=0; i<numGPU ;i++)
{
CUDA_SAFE_CALL(cudaSetDevice(i));
CUDA_SAFE_CALL(cudaMalloc((void**)&plan[i].dMatA,plan[i].hA*plan[i].wA*sizeof(double)));
CUDA_SAFE_CALL(cudaMalloc((void**)&plan[i].dMatB,plan[i].wA*plan[i].wB*sizeof(double)));
CUDA_SAFE_CALL(cudaMalloc((void**)&plan[i].dMatC,plan[i].hA*plan[i].wB*sizeof(double)));
CUDA_SAFE_CALL(cudaMemcpyAsync(plan[i].dMatA,plan[i].hMatA,plan[i].hA*plan[i].wA*sizeof(double),cudaMemcpyHostToDevice,plan[i].stream));
CUDA_SAFE_CALL(cudaMemcpyAsync(plan[i].dMatB,plan[i].hMatB,plan[i].wA*plan[i].wB*sizeof(double),cudaMemcpyHostToDevice,plan[i].stream));
//CUDA_SAFE_CALL(cudaEventRecord(start[i],plan[i].stream));
dim3 dimBlock(BLOCKSIZE,BLOCKSIZE);
int gridX=1,gridY=1;
if( plan[i].wB >= BLOCKSIZE )
gridX=plan[i].wB/BLOCKSIZE;
if( plan[i].hA >= BLOCKSIZE )
gridY=plan[i].hA/BLOCKSIZE;
dim3 dimGrid(gridX,gridY);
mmmul<<<dimGrid,dimBlock,0,plan[i].stream>>>(plan[i].dMatA,plan[i].dMatB,plan[i].dMatC,hA,wA,wB);
//CUDA_SAFE_CALL(cudaEventRecord(stop[i],plan[i].stream));
//CUDA_SAFE_CALL(cudaEventSynchronize(stop[i]));
//printf("\nDevice status:%d:%d:%s\n",i,cudaPeekAtLastError(),cudaGetErrorString(cudaPeekAtLastError()));
CUDA_SAFE_CALL(cudaMemcpyAsync(plan[i].hMatC,plan[i].dMatC,plan[i].hA*plan[i].wB*sizeof(double),cudaMemcpyDeviceToHost,plan[i].stream));
}
/*--------- PROCESS RESULTS FROM GPU ----------*/
offset=0;
for(i=0; i<numGPU ; i++)
{
CUDA_SAFE_CALL(cudaSetDevice(i));
cudaStreamSynchronize(plan[i].stream);
for( j=0;j < plan[i].hA*plan[i].wB ; j++ )
host_C[j+offset] = plan[i].hMatC[j];
offset += plan[i].hA * plan[i].wB ;
//printf("Device status:%d:%s\n",cudaPeekAtLastError(),cudaGetErrorString(cudaPeekAtLastError()));
/*
free(plan[i].hMatA);
free(plan[i].hMatB);
free(plan[i].hMatC);
*/
CUDA_SAFE_CALL(cudaFree(plan[i].dMatA));
CUDA_SAFE_CALL(cudaFree(plan[i].dMatB));
CUDA_SAFE_CALL(cudaFree(plan[i].dMatC));
CUDA_SAFE_CALL(cudaStreamDestroy(plan[i].stream));
// CUDA_SAFE_CALL(cudaEventDestroy(start[i]));
// CUDA_SAFE_CALL(cudaEventDestroy(stop[i]));
// cudaEventElapsedTime(&elapsedTime[i],start[i],stop[i]);
// Tsec +=elapsedTime[i];
}
//CUDA_SAFE_CALL(cudaEventRecord(stop[0],0));//plan[i].stream));
//CUDA_SAFE_CALL(cudaEventSynchronize(stop[0]));
// cudaEventElapsedTime(&elapsedTime[0],start[0],stop[0]);
// Tsec +=elapsedTime[0];
//printf("\n\nTsec:%f\n",Tsec);
//gflops = (1.0e-12)*((2.0*hA*wA*wB)/Tsec);
//printf("\n\nGflops:%f\n",gflops);
checkResult(host_A,host_B,host_C,hA,wB,wA);
//print_on_screen("MatMatMult_mGPU",Tsec,gflops,SIZE,1);
}
/***********************************************************************************
Routine for verifiying the CPU+GPU results against the CPU results
************************************************************************************/
void checkResult(double *InMatA, double *InMatB, double *outMatC, int m, int n , int k )
{
int i;
int j;
int k1;
double *tempOut;
tempOut = (double*) malloc (m * n * sizeof(double));
if (tempOut == 0){
printf("\n Memory allocation Failed for Resultant Matrix");
exit (0);
}
/* CPU Compuation Performs operation using CBLAS */
//cblas_dgemm (CblasColMajor, CblasNoTrans, CblasNoTrans, m, n , k, alpha, InMatA, m , InMatB , k, beta, tempOut, m);
/******************************************************************
Serial computation
uncomment the below section if want to do the CPU computation
using i,j,k loop method. Method work only for square matrices.
*******************************************************************/
for (i = 0; i < m ; ++i) {
for (j = 0; j < n; ++j) {
double cprod = 0;
for (k1 = 0; k1 < k; ++k1)
cprod += InMatA[k1 + k* i] * InMatB[j + n * k1];
tempOut[j + n * i] = cprod;//alpha * cprod + beta * tempOut[j * n + i];
}
}
printf("\n..............\n");
relError(outMatC,tempOut,m*n);
free(tempOut);
}
|
21,060 | #include "includes.h"
__global__ void vecAddKernel(float *a, float *b, float *c, int n)
{
//ID del thread
int id = blockIdx.x*blockDim.x+threadIdx.x;
//No salir del tamaño del vector
if (id < n)
c[id] = a[id] + b[id];
} |
21,061 | #include <stdio.h>
#include <future>
#include <thread>
#include <chrono>
#include <iostream>
#include <iterator>
#include <cstring>
#define N 1000000
#define SIZE 100
__constant__ int factor = 0;
__global__
void vectorAdd(int *a, int *b, int *c) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
c[i] = factor*(a[i] + b[i]);
}
__global__
void matrixAdd(int **a,int **b, int**c) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
c[i][j] = a[i][j] + b[i][j];
}
#define PRINT(x) \
std::cout << #x " = " << x << std::endl
void func(const char* ptr) {
std::cout << "ptr = " << ptr << std::endl;
}
struct cuComplex {
float r;
float i;
__host__ __device__ cuComplex(float a, float b) : r(a), i(b) {}
__host__ __device__ float magnitude2(void) { return r*r + i*i; }
__host__ __device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__host__ __device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
};
/*
struct d_cuComplex {
float r;
float i;
__device__ d_cuComplex(float a, float b) : r(a), i(b) {}
__device__ float magnitude2(void) { return r*r + i*i; }
__device__ d_cuComplex operator*(const d_cuComplex& a) {
return d_cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ d_cuComplex operator+(const d_cuComplex& a) {
return d_cuComplex(r+a.r, i+a.i);
}
};*/
/*
int julia(int x, int y) {
const float scale = 1.5;
float jx = scale * (float)(SIZE/2 - x)/(SIZE/2);
float jy = scale * (float)(SIZE/2 - y)/(SIZE/2);
// cuComplex c(-0.4, 0.6);
cuComplex c(-0.8, 0.156);
cuComplex a(jx, jy);
int i = 0;
for (i=0; i<200; i++) {
a = a*a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
*/
__host__ __device__ int julia(int x, int y) {
const float scale = 1.5;
float jx = scale * (float)(SIZE/2 - x)/(SIZE/2);
float jy = scale * (float)(SIZE/2 - y)/(SIZE/2);
cuComplex c(-0.8, 0.156);
cuComplex a(jx, jy);
int i = 0;
for (i=0; i<200; i++) {
a = a*a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
void kernel(char* ptr) {
for (int y=0; y<SIZE; y++) {
for (int x=0; x<SIZE; x++) {
int offset = x+y*SIZE;
int juliaValue = julia(x,y);
ptr[offset] = juliaValue == 1 ? 'x' : ' ';
}
}
}
void printImage(char* ptr) {
for (auto i=0; i<SIZE; i++) {
char cpyPtr[SIZE+1];
std::memcpy((void*)cpyPtr, (void*)(ptr + SIZE*i), SIZE);
cpyPtr[SIZE] = '\0';
printf("%s\n", cpyPtr);
}
printf("\n");
}
__global__ void julia_kernel(char *image) {
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x+y * gridDim.x;
int juliaValue = julia(x,y);
image[offset] = juliaValue == 1 ? 'x' : ' ';
}
int main(int argc, char** argv) {
// start time
auto startTime = std::chrono::high_resolution_clock::now();
printf("Hello World\n");
// get the number of devices
int numDevices;
cudaGetDeviceCount(&numDevices);
PRINT(numDevices);
cudaDeviceProp prop;
for (auto i=0 ; i<numDevices; i++) {
cudaGetDeviceProperties(&prop, i);
PRINT(prop.name);
PRINT(prop.totalGlobalMem);
PRINT(prop.sharedMemPerBlock);
PRINT(prop.regsPerBlock);
PRINT(prop.warpSize);
PRINT(prop.memPitch);
PRINT(prop.maxThreadsPerBlock);
PRINT(prop.maxThreadsDim[0]);
PRINT(prop.maxThreadsDim[1]);
PRINT(prop.maxThreadsDim[2]);
PRINT(prop.maxGridSize[0]);
PRINT(prop.maxGridSize[1]);
PRINT(prop.maxGridSize[2]);
PRINT(prop.totalConstMem);
PRINT(prop.major);
PRINT(prop.minor);
PRINT(prop.clockRate);
PRINT(prop.textureAlignment);
PRINT(prop.deviceOverlap);
PRINT(prop.multiProcessorCount);
PRINT(prop.kernelExecTimeoutEnabled);
PRINT(prop.integrated);
PRINT(prop.canMapHostMemory);
PRINT(prop.computeMode);
PRINT(prop.maxTexture1D);
PRINT(prop.maxTexture2D[0]);
PRINT(prop.maxTexture2D[1]);
PRINT(prop.maxTexture3D[0]);
PRINT(prop.maxTexture3D[1]);
PRINT(prop.maxTexture3D[2]);
// PRINT(prop.maxTexture2DArray[0]);
// PRINT(prop.maxTexture2DArray[1]);
// PRINT(prop.maxTexture2DArray[2]);
PRINT(prop.concurrentKernels);
}
char image[SIZE * SIZE];
kernel(image);
printImage(image);
char h_image[SIZE * SIZE];
char *d_image;
cudaMalloc((void**)&d_image, SIZE*SIZE);
dim3 grid(SIZE, SIZE);
julia_kernel<<<grid, 1>>>(d_image);
cudaMemcpy(h_image, d_image, SIZE * SIZE, cudaMemcpyDeviceToHost);
printImage(h_image);
// stop time
auto stopTime = std::chrono::high_resolution_clock::now();
PRINT((stopTime - startTime).count());
printf("Goodbye World\n");
}
|
21,062 |
/*
Reduction summation algorithm (with sequential addressing)
made by: Carrick McClain
Sources:
http://developer.download.nvidia.com/compute/cuda/1.1-Beta/x86_website/projects/reduction/doc/reduction.pdf
some guidance from https://stackoverflow.com
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include <cstdlib>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <sstream>
#include <string.h>
#include <assert.h>
#define NUM_BLOCKS 2
#define BLOCK_WIDTH 8
#define BLOCK_SIZE (BLOCK_WIDTH * BLOCK_WIDTH)
#define NUM_FLOATS 100
using namespace std;
inline void gpu_handle_error( cudaError_t err, const char* file, int line, int abort = 1 )
{
if (err != cudaSuccess)
{
fprintf (stderr, "gpu error %s, %s, %d\n", cudaGetErrorString (err), file, line);
if (abort)
exit (EXIT_FAILURE);
}
}
#define gpu_err_chk(e) {gpu_handle_error( e, __FILE__, __LINE__ );}
__global__ void reduction_add (float* X, float* Y)
{
__shared__ float XY[NUM_FLOATS];
unsigned int tx = threadIdx.x;
unsigned int i = blockIdx.x * (2 * blockDim.x) + threadIdx.x;
XY[tx] = X[i] + X[i + blockDim.x];
__syncthreads();
for (unsigned int stride = blockDim.x/2; stride > 0; stride >>= 2)
{
if (tx < stride)
XY[tx] += XY[tx + stride];
__syncthreads();
}
if (tx == 0)
Y[blockIdx.x] = XY[0];
}
int main (int argc, char** argv)
{
cudaError_t err;
int idx = 0;
int sum = 0;
char chars[11];
float* h_input_data = (float*)malloc (NUM_FLOATS * sizeof(float));
float* h_output_data = (float*)malloc (NUM_FLOATS * sizeof(float));
float* d_input_data;
float* d_output_data;
ifstream infile;
//get data from floats.csv
infile.open("floats.csv", ifstream::in);
if (infile.is_open())
{
while (infile.good())
{
infile.getline(chars, 256, ',');
h_input_data[idx] = (float)(strtod(chars, NULL));
idx++;
}
infile.close();
}
else cout << "Error opening file";
assert ((sizeof(h_input_data) / sizeof(float)) == 100);
err = cudaMalloc ((void**) &d_input_data, NUM_FLOATS * sizeof(float));
gpu_err_chk(err);
err = cudaMalloc ((void**) &d_output_data, NUM_FLOATS * sizeof(float));
gpu_err_chk(err);
err = cudaMemcpy (d_input_data, h_input_data,
NUM_FLOATS * sizeof(float), cudaMemcpyHostToDevice);
gpu_err_chk(err);
dim3 dimGrid (NUM_BLOCKS);
dim3 dimBlock (BLOCK_SIZE);
reduction_add<<<dimGrid, dimBlock>>> (d_input_data, d_output_data);
err = cudaGetLastError();
gpu_err_chk(err);
err = cudaMemcpy( h_output_data, d_output_data,
NUM_FLOATS * sizeof(float),
cudaMemcpyDeviceToHost );
gpu_err_chk(err);
idx = 0;
while (h_output_data != NULL)
{
sum += h_output_data[idx];
idx++;
}
cout << "Sum of floats: " << sum;
return 0;
} |
21,063 | #include <cuda.h>
#include <stdio.h>
int main()
{
int count;
cudaDeviceProp prop;
cudaGetDeviceCount(&count);
printf("Count CUDA device = %i\n", count);
for (int i = 0; i < count; i++) {
cudaGetDeviceProperties(&prop, i);
printf("Device %d\n", i);
printf("Compute capability : %d.%d\n", prop.major, prop.minor);
printf("Name : %s\n", prop.name);
printf("Total Global Memory : %d byte\n", prop.totalGlobalMem);
printf("Shared memory per block : %d byte\n", prop.sharedMemPerBlock);
printf("Registers per block : %d\n", prop.regsPerBlock);
printf("Warp size : %d\n", prop.warpSize);
printf("Max threads per block : %d\n", prop.maxThreadsPerBlock);
printf("Total constant memory : %d byte\n", prop.totalConstMem);
printf("Clock Rate : %d kHz\n", prop.clockRate);
printf("Texture Alignment : %u\n", prop.textureAlignment);
printf("Device Overlap : %d\n", prop.deviceOverlap);
printf("Multiprocessor Count : %d\n", prop.multiProcessorCount);
printf("Max Threads Dim : %d %d %d\n", prop.maxThreadsDim[0],
prop.maxThreadsDim[1],
prop.maxThreadsDim[2]);
printf("Max Grid Size : %d %d %d\n", prop.maxGridSize[0],
prop.maxGridSize[1],
prop.maxGridSize[2]);
printf("Max threads per multiprocessor: %d\n", prop.maxThreadsPerMultiProcessor);
// printf("%d", prop.maxBlocksPerMultiProcessor);
printf("\n");
}
return 0;
}
|
21,064 | //STL
#include <iostream>
#include <vector>
#include <time.h>
#include <algorithm>
using std::cout; using std::endl; using namespace std;
unsigned i;
const unsigned N = 2048 * 4, bigN = 1000000;
unsigned gpuThr = 512;
unsigned gpuBl = N / gpuThr;
std::vector < float > inputVec( N );
void hostCalculateDCTPSNR( vector < float > &vec, float & vecMedian );
//=========================== gpu ===========================
__device__ float d_x[ N ], d_Xfp32[ N ], d_ix[ N ], d_rms[ N ];
__constant__ unsigned d_N[ 1 ];
__constant__ float d_median[ 1 ], d_max[ 1 ];
__device__ float d_inOut[ bigN ];
__device__ float d_inOutCopy[ bigN ];
__global__ void dummyCopy()
{
unsigned ind = blockIdx.x * blockDim.x + threadIdx.x;
d_inOutCopy[ ind ] = d_inOut[ ind ];
}
__global__ void psnr()
{
double acc = 0.0f;
for ( unsigned i = 0; i < d_N[ 0 ]; i++ )
acc += d_rms[ i ];
acc /= float( d_N[ 0 ] );
printf( "GPU PSNR: %f[dB]\n ", 10.0f * log10f( ( d_max[ 0 ] * d_max[ 0 ] ) / ( acc ) ) );
}
__global__ void rms()
{
unsigned ind = blockIdx.x * blockDim.x + threadIdx.x;
float x1 = d_x[ ind ] + d_median[ 0 ];
float x2 = d_ix[ ind ];
d_rms[ ind ] = ( x1 - x2 ) * ( x1 - x2 );
}
__global__ void printKernel()
{
printf( "======= GPU SIDE: =========\n" );
unsigned resNo = 3;
for ( unsigned i = 0; i < resNo; i++ )
printf( "d_x[%i]: %4f\n", i, d_x[ i ] + d_median[ 0 ] );
/*
for ( unsigned i = 0; i < resNo; i++ )
printf( "d_xNorm[%i]: %.4f\n", i, d_x[ i ] );
for ( unsigned i = 0; i < resNo; i++ )
printf( "d_Xfp32[%i]: %.4f\n", i, d_Xfp32[ i ] );
*/
for ( unsigned i = 0; i < resNo; i++ )
printf( "d_ix[%i]: %.4f\n", i, d_ix[ i ] );
for ( unsigned i = d_N[ 0 ] - 1; i > d_N[ 0 ] - 4; i-- )
printf( "d_x[%i]: %.4f\n", i, d_x[ i ] + d_median[ 0 ] );
for ( unsigned i = d_N[ 0 ] - 1; i > d_N[ 0 ] - 4; i-- )
printf( "d_ix[%i]: %.4f\n", i, d_ix[ i ] );
}
__global__ void idctKernelFloat()
{
unsigned ind = blockIdx.x * blockDim.x + threadIdx.x;
float constVal = ( float( ind ) + 0.5f ) * 3.14159265f / float( d_N[ 0 ] );
float sqrConst = sqrtf( 2.0f / float( d_N[ 0 ] ) );
float tmpX = sqrtf( 1.0f / float( d_N[ 0 ] ) ) * d_Xfp32[ 0 ];
float accDC = 0.0f, tmpx = 0.0f;
for ( unsigned k = 1; k < N; k++ )
{
tmpx = d_Xfp32[ k ];
tmpX += tmpx * sqrConst * __cosf( constVal * ( float( k ) ) );
accDC += tmpx;
}
d_ix[ ind ] = tmpX + d_median[ 0 ];
}
__global__ void dctKernelFloat()
{
unsigned ind = blockIdx.x * blockDim.x + threadIdx.x;
float constVal = float( ind ) * 3.14159265f / float( d_N[ 0 ] );
float sqrConst = sqrtf( 2.0f / float( d_N[ 0 ] ) );
float tmpX = 0.0f, accDC = 0.0f, tmpx = 0.0f;
for ( unsigned i = 0; i < N; i++ )
{
tmpx = d_x[ i ];
tmpX += sqrConst * tmpx * __cosf( constVal * ( float( i ) + 0.5f ) );
accDC += tmpx;
}
d_Xfp32[ ind ] = tmpX;
d_Xfp32[ 0 ] = accDC / sqrtf( float( d_N[ 0 ] ) );
}
//median extraction from input vector <float> for float better calculations precision
__global__ void dataMedianPreprocess()
{
unsigned ind = blockIdx.x * blockDim.x + threadIdx.x;
d_x[ ind ] -= d_median[ 0 ];
}
int main( int argc, char* argv[] )
{//memory copying
vector < float > h_vecIn;
for ( i = 0; i < bigN; i++ )
h_vecIn.push_back( rand() % 100 * 0.01f * i );
cudaMemcpyToSymbol( d_inOut, &h_vecIn[ 0 ], sizeof( float ) * bigN );
vector < float > h_vecOut( bigN, 0.0f );
cudaMemcpyFromSymbol( &h_vecOut[ 0 ], d_inOut, sizeof( float ) * bigN );
double acc = 0.0f; float x1 = 0.0f, x2 = 0.0f;
for ( i = 0; i < bigN; i++ )
{
x1 = h_vecIn[ i ];
x2 = h_vecOut[ i ];
acc += ( x1 - x2 ) * ( x1 - x2 );
}
acc /= double( bigN );
float maxEl = *std::max_element( h_vecIn.begin(), h_vecIn.end() );
printf( "psnr raw HOST2GPU copy: %f[dB]\n", 10.0f * log10( maxEl * maxEl / acc ) );
dummyCopy<<< bigN / 500, 500 >>>();
cudaMemcpyFromSymbol( &h_vecOut[ 0 ], d_inOutCopy, sizeof( float ) * bigN );
acc = 0.0f; x1 = 0.0f; x2 = 0.0f;
for ( i = 0; i < bigN; i++ )
{
x1 = h_vecIn[ i ];
x2 = h_vecOut[ i ];
acc += ( x1 - x2 ) * ( x1 - x2 );
}
acc /= double( bigN );
maxEl = *std::max_element( h_vecIn.begin(), h_vecIn.end() );
printf( "psnr raw GPU2GPU copy: %f[dB]\n", 10.0f * log10( maxEl * maxEl / acc ) );
cudaFree( d_inOut ); cudaFree( d_inOutCopy );
//gpu DCT from definition accuracuy
for(i=0;i<(unsigned)inputVec.size();i++)inputVec[i]=rand()%100*0.001f*i;
inputVec[ 3 ] = 0.05f;
vector < float > sortVec( inputVec ); sort( sortVec.begin(), sortVec.end() );
float vecMedian = sortVec[ sortVec.size() / 2 ];
cudaMemcpyToSymbol( d_x, &inputVec[ 0 ], sizeof( float ) * ( unsigned )inputVec.size() );
cudaMemcpyToSymbol( d_N, &N, sizeof( unsigned ) );
cudaMemcpyToSymbol( d_median, &vecMedian, sizeof( float ) );
cudaMemcpyToSymbol( d_max, &sortVec[ sortVec.size() - 1 ], sizeof( float ) );
dataMedianPreprocess<<< gpuBl, gpuThr >>>();
clock_t t = clock();
dctKernelFloat<<< gpuBl, gpuThr >>>();
cudaDeviceSynchronize();
cout << "CPU clocks GPU dct float accumulator: " << double( clock() - t ) << endl;
t = clock();
idctKernelFloat<<< gpuBl, gpuThr >>>();
cudaDeviceSynchronize();
cout << "CPU clocks GPU idct float accumulator: " << double( clock() - t ) << endl;
printKernel<<< 1, 1 >>>();
rms<<< gpuBl, gpuThr >>>();
psnr<<< 1, 1 >>>();
//host DCT from definition accuracy
hostCalculateDCTPSNR( inputVec, vecMedian );
cudaFree( d_x );
cudaFree( d_ix );
cudaFree( d_median );
cudaFree( d_rms );
cudaFree( d_max );
cudaFree( d_Xfp32 );
cudaFree( d_N );
cudaDeviceSynchronize();
cudaDeviceReset();
cout << endl << "PSNR - higher = better" << endl;
return 0;
}
void hostCalculateDCTPSNR( vector < float > &vec, float & vecMedian )
{
clock_t t;
unsigned vecSize = ( unsigned )vec.size();
for ( i = 0; i < vecSize; i++ )
vec[ i ] -= vecMedian;
vector < float > vecDCT( vecSize );
vector < float > ix( vecSize );
t = clock();
float dc = 0.0f;
for ( i = 0; i < vecSize; i++ )
dc += vec[ i ];
dc /= sqrt( vecSize );
vecDCT[ 0 ] = dc;
float acDCT = 0.0f, cons = sqrt( 2.0f / vecSize );
float pi = 3.14159265f;
for ( unsigned k = 1; k < vecSize; k++ )
{
acDCT = 0.0f;
for ( i = 0; i < vecSize; i++ )
acDCT += vec[ i ] * cos( pi * k * ( 2 * i + 1 ) / ( 2 * vecSize ) );
vecDCT[ k ] = cons * acDCT;
}
cout << "CPU clocks HOST dct float accumulator: " << double( clock() - t ) << endl;
t = clock();
float dcCons = ( 1.0f / sqrt( vecSize ) ) * vecDCT[ 0 ];
for ( i = 0; i < vecSize; i++ )
{
acDCT = 0.0f;
for ( unsigned k = 1; k < vecSize; k++ )
acDCT += vecDCT[ k ] * cos( pi * k * ( 2 * i + 1 ) / ( 2 * vecSize ) );
ix[ i ] = dcCons + cons * acDCT + vecMedian; //results median addition
}
cout << "CPU clocks HOST idct float accumulator: " << double( clock() - t ) << endl;
for ( i = 0; i < vecSize; i++ )
vec[ i ] += vecMedian;
cout << endl << "======= HOST SIDE: =========" << endl;
for ( i = 0; i < 3; i++ )
cout << "h_x[" << i << "]: " << vec[ i ] << endl;
for ( i = 0; i < 3; i++ )
cout << "h_ix[" << i << "]: " << ix[ i ] << endl;
for ( i = vecSize - 1; i > vecSize - 4; i-- )
cout << "h_x[" << i << "]: " << vec[ i ] << endl;
for ( i = vecSize - 1; i > vecSize - 4; i-- )
cout << "h_ix[" << i << "]: " << ix[ i ] << endl;
double mse = 0.0f;
for ( i = 0; i < vecSize; i++ )
mse += ( vec[ i ] - ix[ i ] ) * ( vec[ i ] - ix[ i ] );
mse /= vecSize;
double maxEl = *std::max_element( vec.begin(), vec.end() );
double psnr = 10.0f * log10( maxEl * maxEl / mse );
cout << "HOST PSNR: " << psnr << "[dB]" << endl << endl;
}
//P.S. PSNR( x1[], x2[] ) = +InfdB for identical inputs x1[] and x2[]; PSNR = 0dB for x1[] != x2[]; higher = better accuracy to true/real value
//P.P.S for range [-1; +1] float datatype has biggest mantissa precision
|
21,065 | #include<cuda_runtime.h>
#include<thrust/scan.h>
#include<thrust/functional.h>
#include<iostream>
int add_(int a, int b)
{
return a+b;
}
__device__ int log_plus(int a, int b)
{
return a+b;
}
__device__ int segscan_warp(int* ptr, bool* hd, int idx) {
const unsigned int lane = idx & 31;
if (lane >= 1) {
ptr[idx] = hd[idx] ? ptr[idx] : log_plus(ptr[idx - 1] , ptr[idx]);
hd[idx] = hd[idx - 1] | hd[idx]; }
if (lane >= 2) {
ptr[idx] = hd[idx] ? ptr[idx] : log_plus(ptr[idx - 2] , ptr[idx]);
hd[idx] = hd[idx - 2] | hd[idx]; }
if (lane >= 4) {
ptr[idx] = hd[idx] ? ptr[idx] : log_plus(ptr[idx - 4] , ptr[idx]);
hd[idx] = hd[idx - 4] | hd[idx]; }
if (lane >= 8) {
ptr[idx] = hd[idx] ? ptr[idx] : log_plus(ptr[idx - 8] , ptr[idx]);
hd[idx] = hd[idx - 8] | hd[idx]; }
if (lane >= 16) {
ptr[idx] = hd[idx] ? ptr[idx] : log_plus(ptr[idx - 16] , ptr[idx]);
hd[idx] = hd[idx - 16] | hd[idx];
}
return ptr[idx];
}
__device__ void segscan_block(int* ptr, bool* hd, int idx)
{
unsigned int warpid = idx >> 5;
unsigned int warp_first = warpid << 5;
unsigned int warp_last = warp_first + 31;
// Step 1a:
// Before overwriting the input head flags, record whether // this warp begins with an "open" segment.
bool warp_is_open = (hd[warp_first] == 0);
__syncthreads ();
// Step 1b:
// Intra-warp segmented scan in each warp.
int val = segscan_warp(ptr, hd, idx);
// Step 2a:
// Since ptr[] contains *inclusive* results, irrespective of Kind, // the last value is the correct partial result.
int warp_total = ptr[warp_last];
// Step 2b:
// warp_flag is the OR-reduction of the flags in a warp and is
// computed indirectly from the mindex values in hd[].
// will_accumulate indicates that a thread will only accumulate a
// partial result in Step 4 if there is no segment boundary to its left.
bool warp_flag = hd[warp_last]!=0 || !warp_is_open;
bool will_accumulate = warp_is_open && hd[idx]==0;
__syncthreads ();
// Step 2c: The last thread in each warp writes partial results
if( idx == warp_last ) {
ptr[warpid] = warp_total;
hd[warpid] = warp_flag;
}
__syncthreads ();
// Step 3: One warp scans the per-warp results
if( warpid == 0 ) segscan_warp(ptr, hd, idx);
__syncthreads ();
// Step 4: Accumulate results from
if( warpid != 0 && will_accumulate)
val = log_plus(ptr[warpid -1], val);
__syncthreads ();
ptr[idx] = val;
__syncthreads ();
}
__global__ void kernel_1(int* array, int size, bool* key)
{
int idx=threadIdx.x;
int stt=blockIdx.x;
segscan_block(array+stt*1838, key+stt*1838, idx);
__syncthreads();
if(threadIdx.x==0&&key[1024+stt*1838]==0)
{
key[1024+stt*1838]=1;
array[1024+stt*1838]+=array[1023+stt*1838];
}
__syncthreads();
if(threadIdx.x+1024<size)
{
segscan_block(array+1024+stt*1838, key+1024+stt*1838, idx);
}
}
int main()
{
int* data;
bool* keys;
int* vals;
int* thrust_keys;
int n=10;
int array_size=1838*n;
int num_of_rule=1838;
printf("%f\n",-std::numeric_limits<float>::max());
cudaMallocManaged(&thrust_keys, array_size*sizeof(int));
cudaMallocManaged(&data, array_size*sizeof(int));
cudaMallocManaged(&keys, array_size*sizeof(bool));
cudaMallocManaged(&vals, array_size*sizeof(int));
for(int i=0;i<num_of_rule;i++)
{
for(int j=0;j<n;j++)
{
data[i+j*num_of_rule] = 1;
thrust_keys[i+j*num_of_rule]=i/10;
if(i%10==0) keys[i+j*num_of_rule]=1;
else keys[i+j*num_of_rule]=0;
}
}
thrust::equal_to<int> binary_pred;
for(int i=0;i<n;i++)
thrust::inclusive_scan_by_key(thrust_keys+i*num_of_rule, thrust_keys +i*num_of_rule+ 1838, data+i*num_of_rule, vals+i*num_of_rule, binary_pred, add_);
dim3 grid(n);
dim3 block(1024);
dim3 block2(1838-1024);
kernel_1<<< grid, block >>>(data, 1838, keys);
cudaDeviceSynchronize();
for(int i=0;i<1838*n;i++) {
if(vals[i]!=data[i]) printf("%d ", i);
if((vals[i]-1)%10!=(i%1838)%10) printf("%d vs %d\n", vals[i], i);
}
printf("\n");
return 0;
}
|
21,066 | #include<stdio.h>
#include<stdlib.h>
__global__ void arradd(int* md, int* nd, int* pd)
{
int myid = threadIdx.x;
pd[myid] = md[myid] + nd[myid];
}
int main()
{
int size = 200 * sizeof(int);
int m[200], n[200], p[200],*md, *nd,*pd;
int i=0;
for(i=0; i<200; i++ )
{
m[i] = i;
n[i] = i;
p[i] = 0;
}
cudaMalloc(&md, size);
cudaMemcpy(md, m, size, cudaMemcpyHostToDevice);
cudaMalloc(&nd, size);
cudaMemcpy(nd, n, size, cudaMemcpyHostToDevice);
cudaMalloc(&pd, size);
dim3 DimGrid(1, 1);
dim3 DimBlock(200, 1);
arradd<<< DimGrid,DimBlock >>>(md,nd,pd);
cudaMemcpy(p, pd, size, cudaMemcpyDeviceToHost);
for(i=0; i<200; i++ )
{
printf("\t%d",p[i]);
}
cudaFree(md);
cudaFree(nd);
cudaFree(pd);
}
|
21,067 | #include "includes.h"
__global__ void reluActivationForward(float* Z, float* A, int Z_x_dim, int Z_y_dim) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < Z_x_dim * Z_y_dim) {
A[index] = fmaxf(Z[index], 0);
}
} |
21,068 | #include "includes.h"
__global__ void VectorAdd(int *a, int *b, int *c, int n)
{
// Get our global thread ID
int i = blockIdx.x*blockDim.x+threadIdx.x;
//for (i = 0; i < n; ++i) // replaced
// Make sure we do not go out of bounds
if (i < n)
c[i] = a[i] + b[i];
} |
21,069 | #include <iostream>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "parallel.cuh"
using std::cout;
using std::flush;
using std::endl;
__global__ void plus100Kernel(int *input, int* output)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < 100)
{
output[i] = input[i] + 100;
}
}
void plus100(int n_block, int n_thread)
{
int *d_input = 0;
int *d_output = 0;
cudaMalloc((void**)&d_input, 100 * sizeof(int));
cudaMalloc((void**)&d_output, 100 * sizeof(int));
srand(time(NULL));
int* matrice = (int*)malloc(sizeof(int) * 100);
for(int i = 0; i < 100; i++)
{
matrice[i] = rand() % 100;
}
// Copier vers le dispositif
cudaMemcpy(d_input, matrice, 100 * sizeof(int), cudaMemcpyHostToDevice);
// Appeler le kernel avec 100 blocs
plus100Kernel<<<n_block, n_thread>>>(d_input, d_output);
// Attendre que le kernel ait fini, puis copier vers l'hôte
cudaDeviceSynchronize();
cudaMemcpy(matrice, d_output, 100 * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < 100; i++)
{
printf("%d\n", matrice[i]);
}
}
|
21,070 | #include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/inner_product.h>
#include <thrust/reduce.h>
void count(const thrust::device_vector<int>& d_in, thrust::device_vector<int>& values, thrust::device_vector<int>& counts) {
thrust::device_vector<int> d_temp(d_in.begin(), d_in.end()); // copy of input array
thrust::device_vector<int> d_cnts(d_in.size(), 1); // array of 1's for reduce_by_key
// sort the input array copy in-place
thrust::sort(d_temp.begin(), d_temp.end());
// compute the size of counts/values (thrust::unique returns the end pointer and modifies the array in-place)
int num_unique = thrust::inner_product(d_temp.begin(), d_temp.end()-1, d_temp.begin()+1, 0, thrust::plus<int>(), thrust::not_equal_to<int>()) + 1;
// resize the corr. vectors
values.resize(num_unique);
counts.resize(num_unique);
// reduce_by_key to populate counts, values. Have their size so don't care about their end pointers in return value
thrust::reduce_by_key(d_temp.begin(), d_temp.end(), d_cnts.begin(), values.begin(), counts.begin(), thrust::equal_to<int>(), thrust::plus<int>());
} |
21,071 | // includes, system
#include <stdio.h>
#include <assert.h>
// Here you can set the device ID that was assigned to you
#define MYDEVICE 0
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char *msg);
// Part 3 of 5: implement the kernel
__global__ void myFirstKernel(int *d_a)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
d_a[i] = blockIdx.x + threadIdx.x;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
cudaSetDevice(MYDEVICE);
// pointer for host memory
int *h_a;
// pointer for device memory
int *d_a;
// define grid and block size
int numBlocks = 8;
int numThreadsPerBlock = 8;
// Part 1 of 5: allocate host and device memory
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int);
h_a = (int *) malloc(memSize);
cudaMalloc((void **) &d_a, memSize);
// Part 2 of 5: configure and launch kernel
dim3 dimGrid( numBlocks );
dim3 dimBlock( numThreadsPerBlock );
myFirstKernel<<< dimGrid , dimBlock >>>(d_a);
// block until the device has completed
cudaThreadSynchronize();
// check if kernel execution generated an error
checkCUDAError("kernel execution");
// Part 4 of 5: device to host copy
cudaMemcpy( h_a, d_a, memSize ,cudaMemcpyDeviceToHost);
// Check for any CUDA errors
checkCUDAError("cudaMemcpy");
// Part 5 of 5: verify the data returned to the host is correct
for (int i = 0; i < numBlocks ; i++)
{
for (int j = 0; j < numThreadsPerBlock ; j++)
{
assert(h_a[i * numThreadsPerBlock + j] == i + j);
}
}
// free device memory
cudaFree(d_a);
// free host memory
free(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
printf("Correct!\n");
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(-1);
}
}
|
21,072 | /******************************************************************************
*cr
*cr (C) Copyright 2010-2013 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#define BLOCK_SIZE 512
#define WARP_SIZE 32
#define NUM_WARPS (BLOCK_SIZE/WARP_SIZE)
// Maximum number of elements that can be inserted into a block queue
#define BQ_CAPACITY 2048
// Maximum number of elements that can be inserted into a warp queue
#define WQ_CAPACITY 128
/******************************************************************************
GPU kernels
*******************************************************************************/
__global__ void gpu_global_queuing_kernel(unsigned int *nodePtrs,
unsigned int *nodeNeighbors, unsigned int *nodeVisited,
unsigned int *currLevelNodes, unsigned int *nextLevelNodes,
unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) {
// INSERT KERNEL CODE HERE
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
//large number of nodes is too many for our threads so each thread may have to do more than one
for (idx=idx; idx < *numCurrLevelNodes; idx += gridDim.x * blockDim.x){
unsigned int node = currLevelNodes[idx];
for(unsigned int nbrIdx = nodePtrs[node]; nbrIdx < nodePtrs[node + 1]; ++nbrIdx) {
//check if node was visited, if it wasn't, flag it as visited and update queue
unsigned int neighbor = nodeNeighbors[nbrIdx];
unsigned int visited = atomicAdd(&(nodeVisited[neighbor]), 1);
if(!visited){
//increment numNextLevelNodes and use old value as index for this node's place in the queue
unsigned int gq_idx = atomicAdd(numNextLevelNodes,1);
nextLevelNodes[gq_idx] = neighbor;
}
}
}
}
__global__ void gpu_block_queuing_kernel(unsigned int *nodePtrs,
unsigned int *nodeNeighbors, unsigned int *nodeVisited,
unsigned int *currLevelNodes, unsigned int *nextLevelNodes,
unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) {
//setup block's shared queue
__shared__ unsigned int s_nextLevelNodes[BQ_CAPACITY], s_numNextLevelNodes, s_start;
if (threadIdx.x == 0) s_numNextLevelNodes = 0; //init block's numNExtLevelNodes
__syncthreads();
// INSERT KERNEL CODE HERE
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
//large number of nodes is too many for our threads so each thread may have to do more than one
for (idx=idx; idx < *numCurrLevelNodes; idx += gridDim.x * blockDim.x){
unsigned int node = currLevelNodes[idx];
for(unsigned int nbrIdx = nodePtrs[node]; nbrIdx < nodePtrs[node + 1]; ++nbrIdx) {
//check if node was visited, if it wasn't, flag it as visited and update queue
unsigned int neighbor = nodeNeighbors[nbrIdx];
unsigned int visited = atomicAdd(&(nodeVisited[neighbor]), 1);
if(!visited){
//increment numNextLevelNodes and use old value as index for this node's place in the queue
unsigned int bq_idx = atomicAdd(&s_numNextLevelNodes,1);
if (bq_idx < BQ_CAPACITY){//make sure there is room in block queue
s_nextLevelNodes[bq_idx] = neighbor;
}else{//if not, put right into global queue
s_numNextLevelNodes = BQ_CAPACITY;//s_numNextLevelNodes >= BQ_CAPACITY so reset to BQ_CAPACITY
unsigned int gq_idx = atomicAdd(numNextLevelNodes,1);
nextLevelNodes[gq_idx] = neighbor;
}
}
}
}
__syncthreads();//wait for entire block to finish
//update global numNextLevelNodes for other blocks to determine their start
if (threadIdx.x == 0){
s_start = atomicAdd(numNextLevelNodes, s_numNextLevelNodes);
}
__syncthreads();
for (unsigned int i = threadIdx.x; i < s_numNextLevelNodes; i += blockDim.x){
nextLevelNodes[i+s_start] = s_nextLevelNodes[i];
}
}
__global__ void gpu_warp_queuing_kernel(unsigned int *nodePtrs,
unsigned int *nodeNeighbors, unsigned int *nodeVisited,
unsigned int *currLevelNodes, unsigned int *nextLevelNodes,
unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) {
// INSERT KERNEL CODE HERE
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
//setup block's shared queue
__shared__ unsigned int b_nextLevelNodes[BQ_CAPACITY], b_numNextLevelNodes, b_start;
//setup warp queues
unsigned int wqueue_idx = threadIdx.x % WARP_SIZE;
__shared__ unsigned int w_nextLevelNodes[WQ_CAPACITY][WARP_SIZE];//allows for coalescing later
__shared__ unsigned int w_numNextLevelNodes[WARP_SIZE], w_start[WARP_SIZE];
//init block's numNextLevelNodes
if (threadIdx.x == 0) b_numNextLevelNodes = 0;
//init each warp queue's numNextLevelNodes
if (threadIdx.x < WARP_SIZE) {
w_numNextLevelNodes[threadIdx.x] = 0;
}
__syncthreads();
//large number of nodes is too many for our threads so each thread may have to do more than one
for (idx=idx; idx < *numCurrLevelNodes; idx += gridDim.x * blockDim.x){
unsigned int node = currLevelNodes[idx];
for(unsigned int nbrIdx = nodePtrs[node]; nbrIdx < nodePtrs[node + 1]; ++nbrIdx) {
//check if node was visited, if it wasn't, flag it as visited and update queue
unsigned int neighbor = nodeNeighbors[nbrIdx];
unsigned int visited = atomicAdd(&(nodeVisited[neighbor]), 1);
if(!visited){
//increment warp level numNextLevelNodes and use old value as index for this node's place in the queue
unsigned int queue_idx = atomicAdd(&(w_numNextLevelNodes[wqueue_idx]),1);
if(queue_idx < WQ_CAPACITY){//make sure there is room in this thread's warp queue
w_nextLevelNodes[queue_idx][wqueue_idx] = neighbor;
}else{//if not, fall back to block and global queues
//increment block level numNextLevelNodes and use old value as index for this node's place in the queue
w_numNextLevelNodes[wqueue_idx] = WQ_CAPACITY;//w_numNextLevelNodes[wqueue_idx] >= WQ_CAPACITY so reset to WQ_CAPACITY
unsigned int bq_idx = atomicAdd(&b_numNextLevelNodes,1);
if (bq_idx < BQ_CAPACITY){//make sure there is room in block queue
b_nextLevelNodes[bq_idx] = neighbor;
}else{//if not, put right into global queue
b_numNextLevelNodes = BQ_CAPACITY;//s_numNextLevelNodes >= BQ_CAPACITY so reset to BQ_CAPACITY
unsigned int gq_idx = atomicAdd(numNextLevelNodes,1);
nextLevelNodes[gq_idx] = neighbor;
}
}
}
}
}
__syncthreads();//wait for entire block to finish
//update block's numNextLevelNodes so other warps can determine their start
unsigned int offset = threadIdx.x/WARP_SIZE;
if (offset == 0){//only first thread in a warp
w_start[wqueue_idx] = atomicAdd(&b_numNextLevelNodes, w_numNextLevelNodes[wqueue_idx]);
}
__syncthreads();
//let each thread in the warp move elements from warp queue to block queue in coalesced fashion
for (unsigned int i = offset; i < w_numNextLevelNodes[wqueue_idx]; i += NUM_WARPS){
unsigned int bq_idx = w_start[wqueue_idx] + i;
if (bq_idx < BQ_CAPACITY){//make sure there is room in block queue
b_nextLevelNodes[bq_idx] = w_nextLevelNodes[i][wqueue_idx];
}else{//if not, put right into global queue
b_numNextLevelNodes = BQ_CAPACITY;//s_numNextLevelNodes >= BQ_CAPACITY so reset to BQ_CAPACITY
unsigned int gq_idx = atomicAdd(numNextLevelNodes,1);
nextLevelNodes[gq_idx] = w_nextLevelNodes[i][wqueue_idx];
}
}
__syncthreads();
// //update global numNextLevelNodes for other blocks to determine their start
if (threadIdx.x == 0){
b_start = atomicAdd(numNextLevelNodes, b_numNextLevelNodes);
}
__syncthreads();
for (unsigned int i = threadIdx.x; i < b_numNextLevelNodes; i += blockDim.x){
nextLevelNodes[i+b_start] = b_nextLevelNodes[i];
}
}
/******************************************************************************
Functions
*******************************************************************************/
void cpu_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors,
unsigned int *nodeVisited, unsigned int *currLevelNodes,
unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes,
unsigned int *numNextLevelNodes) {
// Loop over all nodes in the curent level
for(unsigned int idx = 0; idx < *numCurrLevelNodes; ++idx) {
unsigned int node = currLevelNodes[idx];
// Loop over all neighbors of the node113
for(unsigned int nbrIdx = nodePtrs[node]; nbrIdx < nodePtrs[node + 1];
++nbrIdx) {
unsigned int neighbor = nodeNeighbors[nbrIdx];
// If the neighbor hasn't been visited yet
if(!nodeVisited[neighbor]) {
// Mark it and add it to the queue
nodeVisited[neighbor] = 1;
nextLevelNodes[*numNextLevelNodes] = neighbor;
++(*numNextLevelNodes);
}
}
}
}
void gpu_global_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors,
unsigned int *nodeVisited, unsigned int *currLevelNodes,
unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes,
unsigned int *numNextLevelNodes) {
const unsigned int numBlocks = 45;
gpu_global_queuing_kernel <<< numBlocks , BLOCK_SIZE >>> (nodePtrs,
nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes,
numCurrLevelNodes, numNextLevelNodes);
}
void gpu_block_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors,
unsigned int *nodeVisited, unsigned int *currLevelNodes,
unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes,
unsigned int *numNextLevelNodes) {
const unsigned int numBlocks = 45;
gpu_block_queuing_kernel <<< numBlocks , BLOCK_SIZE >>> (nodePtrs,
nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes,
numCurrLevelNodes, numNextLevelNodes);
}
void gpu_warp_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors,
unsigned int *nodeVisited, unsigned int *currLevelNodes,
unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes,
unsigned int *numNextLevelNodes) {
const unsigned int numBlocks = 45;
gpu_warp_queuing_kernel <<< numBlocks , BLOCK_SIZE >>> (nodePtrs,
nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes,
numCurrLevelNodes, numNextLevelNodes);
}
|
21,073 | #include "includes.h"
__global__ void MatrixMulDevice( float *A, float *B, float *C, int *matrixSize)
{
int chunk = (*matrixSize) / gridDim.x;
int sum, i, k;
for(i = blockIdx.x * chunk; i < blockIdx.x * chunk + chunk - 1; i++) {
sum = 0;
for(k = 0; k < *matrixSize; k++) {
sum += A[i * *matrixSize + k] * B [k * *matrixSize + threadIdx.x];
}
C[i * *matrixSize + threadIdx.x] = sum;
}
} |
21,074 | // 程序功能:查看当前服务器上具有的显卡数目,并且分别获取他们的详细属性。
// 必要的CUDA 包含文件
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// 传统 C++ 流输入输出支持
#include <iostream>
using namespace std;
// 主函数
int main()
{
// 设备属性的变量
cudaDeviceProp deviceProp;
// 设备计数
int deviceCount;
// 保存调用函数的输出结果
cudaError_t cudaError;
// 获取当前的设备总数
cudaError = cudaGetDeviceCount(&deviceCount);
cout<<"We have "<<deviceCount<<" device(s)."<<endl;
// 获得每一个设备的属性
for (int i = 0; i < deviceCount; i++)
{
// 获得属性
cudaError = cudaGetDeviceProperties(&deviceProp, i);
cout << "设备 " << i + 1 << " 的主要属性: " << endl;
cout << "设备显卡型号: " << deviceProp.name << endl;
cout << "设备全局内存总量(以MB为单位): " << deviceProp.totalGlobalMem / 1024 / 1024 << endl;
cout << "设备上一个线程块(Block)中可用的最大共享内存(以KB为单位): " << deviceProp.sharedMemPerBlock / 1024 << endl;
cout << "设备上一个线程块(Block)中可用的32位寄存器数量: " << deviceProp.regsPerBlock << endl;
cout << "设备上一个线程块(Block)可包含的最大线程数量: " << deviceProp.maxThreadsPerBlock << endl;
cout << "设备的计算功能集(Compute Capability)的版本号: " << deviceProp.major << "." << deviceProp.minor << endl;
cout << "设备上多处理器的数量: " << deviceProp.multiProcessorCount << endl;
}
return 0;
}
|
21,075 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float* var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23) {
for (int i=0; i < var_1; ++i) {
if (comp >= cosf((-1.2614E-37f + +1.0591E35f))) {
float tmp_1 = +1.4818E-37f;
float tmp_2 = (-1.7030E-5f + logf((-1.1908E36f + tanhf(-0.0f))));
comp = tmp_2 / tmp_1 - coshf((var_3 + floorf(var_4 / asinf((var_5 - var_6 + var_7)))));
comp += fmodf((+1.1795E-8f / (var_8 - (+1.6709E-36f - (-1.4408E1f + var_9)))), -0.0f - var_10);
for (int i=0; i < var_2; ++i) {
comp = (var_12 * var_13 - +1.3634E34f - +1.5654E34f);
var_11[i] = +1.7711E-37f;
float tmp_3 = (var_14 + var_15);
comp = tmp_3 + var_11[i] - var_16 / -1.9434E-13f + acosf(var_17 / (var_18 / (-0.0f + -1.7130E-12f + sinhf((var_19 - var_20 * var_21 - (var_22 / var_23))))));
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float* tmp_12 = initPointer( atof(argv[12]) );
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24);
cudaDeviceSynchronize();
return 0;
}
|
21,076 | #include "includes.h"
__global__ void kernel(int* D, int* Q, int bits){
// Find index
int i = blockIdx.x * blockDim.x + threadIdx.x;
// Initialize variables that will be shifted left and right
int shifted_right = i;
int shifted_left = shifted_right;
// Perform bit reversal permutation
for(int a = 1; a < bits; a++)
{
shifted_right >>= 1;
shifted_left <<= 1;
shifted_left |= shifted_right & 1;
}
shifted_left &= N - 1;
// Assign the values to the bit reversed positions
Q[shifted_left] = D[i];
} |
21,077 |
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
__global__ void vectorAddKernel(int N, int *c_a, int *c_b, int *c_c){
int threadIndex = threadIdx.x;
int blockIndex = blockIdx.x;
int threadCount = blockDim.x;
int n = threadIndex + threadCount*blockIndex;
// check if n is in [0,N)
if(n<N)
c_c[n] = c_a[n] + c_b[n];
}
int main(int argc, char **argv){
int N = 4097;
int threadsPerBlock = 32;
int blocks = (N+threadsPerBlock-1)/threadsPerBlock;
// ON HOST
int *h_a = (int*) malloc(N*sizeof(int));
int *h_b = (int*) malloc(N*sizeof(int));
int *h_c = (int*) malloc(N*sizeof(int));
int n;
for(n=0;n<N;++n){
h_a[n] = 1 + n;
h_b[n] = 1 - n;
}
// ON DEVICE
int *c_a, *c_b, *c_c;
cudaMalloc(&c_a, N*sizeof(int));
cudaMalloc(&c_b, N*sizeof(int));
cudaMalloc(&c_c, N*sizeof(int));
cudaMemcpy(c_a, h_a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(c_b, h_b, N*sizeof(int), cudaMemcpyHostToDevice);
// INITIATE KERNEL ON DEVICE
vectorAddKernel <<< blocks, threadsPerBlock >>> (N, c_a, c_b, c_c);
// COPY DATA FROM DEVICE TO HOST
cudaMemcpy(h_c, c_c, N*sizeof(int), cudaMemcpyDeviceToHost);
// PRINT ENTRIES
for(n=0;n<5;++n){
printf("c[%d] = %d\n", n, h_c[n]);
}
cudaDeviceSynchronize();
cudaFree(c_a);
cudaFree(c_b);
cudaFree(c_c);
}
|
21,078 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "type.cuh"
#define UP 0
#define LEFT 1
#define UL 2
#define MAX(I, J) ((I) > (J) ? (I) : (J))
typedef ChromType Pattern;
void LcsString(int **b, ChromType *x, int i, int j, ChromType *l, int li);
void MatPrint(int **cmat, int m, int n);
int EqualPat(ChromType *x, ChromType *y);
/* longest common subsequence implementation from CLR page 317 */
/* Sushil J. Louis */
/* x is newString and m is its length
y is templateString and n is its length
*/
double DoLcs(ChromType *x, ChromType *y, int m, int n,
ChromType *tmplate)
{
int i, j;
ChromType *tmp; /* = "hello"; */
int **cmat; /* DP matrix to track longest common subsequence */
int **bmat; /* DP matrix to reconstruct string */
int len;
cmat = (int * *) malloc(sizeof(int *) * (m+1));
for(i = 0; i < m + 1; i++){
cmat[i] = (int *) malloc(sizeof(int) * (n+1));
}
bmat = (int * *) malloc(sizeof(int *) * (m+1));
for(i = 0; i < m + 1; i++){
bmat[i] = (int *) malloc(sizeof(int) * (n+1));
}
/* initialize cmat for main loop computation */
for(i = 0; i < m + 1 ; i++){
cmat[i][0] = 0;
}
for(j = 0; j < n + 1 ; j++){
cmat[0][j] = 0;
}
/* main loop to calculate lcs */
for(i = 1; i < m + 1; i++) {
for(j = 1; j < n + 1; j++) {
if(EqualPat(&x[i-1], &y[j-1])) {
cmat[i][j] = cmat[i - 1][j - 1] + 1;
bmat[i][j] = UL;
} else if (cmat[i - 1][j] >= cmat[i][j - 1]) {
cmat[i][j] = cmat[i - 1][j];
bmat[i][j] = UP;
} else {
cmat[i][j] = cmat[i][j - 1];
bmat[i][j] = LEFT;
}
}
}
/* tmp is temporary storage since we get the string in reverse */
tmp = (ChromType *) malloc (sizeof(ChromType) * (m + 1));
LcsString(bmat, x, m, n, tmp, 0);
len = cmat[m][n]; /* length is the last entry in the matrix */
/* reverse tmp to make the actual substring */
for(i = 0; i < len ; i++){
tmplate[i] = tmp[len - 1 - i];
}
for(i = 0; i < m + 1; i++){
free(cmat[i]);
}
free(cmat);
for(i = 0; i < m + 1; i++){
free(bmat[i] );
}
free (bmat);
free(tmp);
return len;
}
void LcsString(int **b, ChromType *x, int i, int j, ChromType *l, int li)
{
if(i == 0 || j == 0) {
return;
}
if(b[i][j] == UL) {
LcsString(b, x, i - 1, j - 1, l, li + 1);
l[li] = x[i-1];
} else if (b[i][j] == UP) {
LcsString(b, x, i - 1, j, l, li);
} else {
LcsString(b, x, i, j - 1, l , li);
}
}
void MatPrint(int **cmat, int m, int n)
{
int i, j;
for(i = 0; i < m ; i++){
for(j = 0; j < n; j++){
printf("%d ", cmat[i][j]);
}
printf("\n");
}
}
int EqualPat(ChromType *x, ChromType *y)
{
return (*x == *y);
}
|
21,079 | // incrementArray.cu
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include <math.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void incrementArrayOnHost(float *a, unsigned long N)
{
unsigned long i;
for (i=0; i < N; i++) a[i] = a[i]+1.f;
}
__global__ void incrementArrayOnDevice(float *a, unsigned long N)
{
unsigned long idx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned long idy = blockIdx.y*blockDim.y + threadIdx.y;
unsigned long id = idy*gridDim.x*blockDim.x + idx;
if (id<N) a[id] = a[id]+1.f;
}
int main(int argc, char **argv)
{
float *a, *b;
unsigned long i, N = strtoul(argv[1], NULL, 10);
size_t size = N*sizeof(float);
gpuErrchk( cudaMallocManaged((void **) &a, size) );
gpuErrchk( cudaMallocManaged((void **) &b, size) );
for (i=0; i<N; i++)
{
a[i] = (float)i;
b[i] = (float)i;
}
// do calculation on host
incrementArrayOnHost(a, N);
// do calculation on device:
// Part 1 of 2. Compute execution configuration
unsigned long blockDimX = strtoul(argv[2], NULL, 10);
unsigned long blockDimY = strtoul(argv[3], NULL, 10);
unsigned long gridDimX = strtoul(argv[4], NULL, 10);
unsigned long gridDimY = strtoul(argv[5], NULL, 10);
dim3 blockSize = dim3(blockDimX, blockDimY);
dim3 gridSize = dim3(gridDimX, gridDimY);
printf("blockDim: (%lu,%lu), gridDim: (%lu,%lu)\n", blockDimX, blockDimY, gridDimX, gridDimY);
// Part 2 of 2. Call incrementArrayOnDevice kernel
incrementArrayOnDevice <<< gridSize, blockSize >>> (b, N);
cudaDeviceSynchronize();
// check results
for (i=0; i<N; i++) assert(a[i] == b[i]);
// cleanup
cudaFree(a); cudaFree(b);
}
|
21,080 | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#define min(a, b) (((a) < (b)) ? (a) : (b))
#define max(a, b) (((a) > (b)) ? (a) : (b))
#define HANDLE_ERROR(err) (HandleError( err, __FILE__, __LINE__ ))
#define THREADS_PER_BLOCK 256
static void HandleError(cudaError_t err, const char *file, int line) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
__global__ void dot(int N, float *a, float *b, float *c) {
__shared__ float cache[THREADS_PER_BLOCK]; // buffer of shared memory
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0;
while (tid < N) {
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIndex] = temp; // clear shared memory buffer
__syncthreads();
int i = blockDim.x/2;
while (i != 0) {
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
c[blockIdx.x] = cache[0];
}
void cuda_dot(int N, float *a, float *b, float *result) {
int blocksPerGrid = min(32, (N+THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK);
float *partial_c, c;
float *dev_a, *dev_b, *dev_partial_c;
partial_c =(float*) malloc (blocksPerGrid*sizeof(float));
// allocate the memory on the GPU
HANDLE_ERROR(cudaMalloc((void **) &dev_a, N * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void **) &dev_b, N * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void **) &c, sizeof(float)));
HANDLE_ERROR(cudaMalloc((void **) &dev_partial_c, blocksPerGrid*sizeof(float)));
// copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR(cudaMemcpy(dev_a, a, N * sizeof(float), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_b, b, N * sizeof(float), cudaMemcpyHostToDevice));
dot<<<blocksPerGrid, THREADS_PER_BLOCK>>>(N, dev_a, dev_b, dev_partial_c);
// copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR(cudaMemcpy(partial_c, dev_partial_c, blocksPerGrid*sizeof(float), cudaMemcpyDeviceToHost));
c = 0;
for (int i=0; i<blocksPerGrid; i++) {
c += partial_c[i];
}
memcpy(result, &c, sizeof(float));
// free the memory allocated on the GPU
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_partial_c);
delete [] partial_c;
}
struct Tensor {
int shape[4];
float* vector;
int sz;
};
struct Tensor get_tensor(char* file_name) {
FILE *f;
if ((f = fopen(file_name, "rb")) == NULL) {
printf("Error: file opening in %s", file_name);
exit(0);
}
fseek(f, 0, SEEK_END);
int file_sz = ftell(f);
fseek(f, 0, SEEK_SET);
int shape[4] = {-1, -1, -1, -1};
float* vector = (float *) malloc(file_sz - 16); // 16 for shape
fread(shape, 4, 4, f);
fread(vector, file_sz - 16, 1, f);
printf("Read file: %d bytes (%d, %d, %d, %d) shape from %s\n",
file_sz, shape[0], shape[1], shape[2], shape[3], file_name);
struct Tensor tensor;
tensor.vector = vector;
memcpy(tensor.shape, shape, 16);
tensor.sz = file_sz;
return tensor;
}
void write_tensor(struct Tensor tensor, char* file_name) {
FILE *fp = fopen(file_name, "wb");
fwrite(tensor.shape, 4, 4, fp);
fwrite(tensor.vector, tensor.sz - 16, 1, fp);
fclose(fp);
printf("Write tensor: %s\n", file_name);
}
struct Padding {
int top;
int bottom;
int left;
int right;
};
struct Padding get_same_padding_in_tf(int oh, int ow, int kh, int kw) {
// strides: 1 (default)
// padding: same (default)
int p_vertical = max(- 1 + kh, 0);
int p_horizontal = max(- 1 + kw, 0);
struct Padding pad;
pad.top = p_vertical / 2;
pad.bottom = p_vertical - pad.top;
pad.left = p_horizontal / 2;
pad.right = p_horizontal - pad.left;
return pad;
}
float* get_input_patch(struct Tensor input, struct Padding pad,
int _n, int _h, int _w, int kh, int kw,
int i0, int i1, int i2) {
// input.shape: (N=1, H, W, C=IC)
// --> (kh, kw, ic)
int h = input.shape[1];
int w = input.shape[2];
int ic = input.shape[3];
int patch_sz = kh * kw * ic;
float *patch = (float *) calloc(patch_sz, sizeof(float));
int center_h = kh / 2;
int center_w = kw / 2;
int input_base_idx = i0 * _n + i1 * (_h - center_h) + i2 * (_w - center_w);
int patch_base_idx = 0;
int size = kw * ic;
int end_h = kh;
if (pad.top != 0 && _h - center_h < 0) {
input_base_idx += i1 * pad.top;
patch_base_idx += kw * ic * pad.top;
end_h -= pad.top;
} else if (pad.bottom != 0 && _h - center_h + kh > h) {
end_h -= pad.bottom;
}
if (pad.left != 0 && _w - center_w < 0){
input_base_idx += i2 * pad.left;
patch_base_idx += ic * pad.left;
size -= ic * pad.left;
} else if (pad.right != 0 && _w - center_w + kw > w) {
size -= ic * pad.right;
}
int input_start_idx, patch_start_idx;
for (int _kh = 0; _kh < end_h; _kh++) {
input_start_idx = input_base_idx + i1 * _kh;
patch_start_idx = patch_base_idx + kw * ic * _kh;
memcpy(patch + patch_start_idx, input.vector + input_start_idx, size * sizeof(float));
}
return patch;
}
void einsum_hwi_hwoi_to_o(int* shape, float* v_hwi, float* v_hwoi, float* v_o) {
// shape: h, w, o, i
int h = shape[0];
int w = shape[1];
int o = shape[2];
int i = shape[3];
int hwi_idx, hwoi_idx;
for (int _h = 0; _h < h; _h++) {
for (int _w = 0; _w < w; _w++) {
for (int _o = 0; _o < o; _o++) {
hwi_idx = (w * i) * _h + i * _w;
hwoi_idx = (w * o * i) * _h + (o * i) * _w + i * _o;
float dot_product_ret = 0;
cuda_dot(i, v_hwi + hwi_idx, v_hwoi + hwoi_idx, &dot_product_ret);
v_o[_o] += dot_product_ret;
}
}
}
}
struct Tensor conv2d(struct Tensor input, struct Tensor kernel) {
// input.shape: (N, H, W, C=IC)
// kernel.shape: (KH, KW, OC, IC=C)
// output.shape: (N, H, W, OC)
int output_sz = input.shape[0] * input.shape[1] * input.shape[2] * kernel.shape[2] * 4;
struct Tensor out;
out.vector = (float *) malloc(output_sz);
int shape[4] = {input.shape[0], input.shape[1], input.shape[2], kernel.shape[2]};
memcpy(out.shape, shape, 16);
out.sz = output_sz + 16;
int i2 = input.shape[3];
int i1 = input.shape[2] * i2;
int i0 = input.shape[1] * i1;
int o2 = kernel.shape[2];
int o1 = input.shape[2] * o2;
int o0 = input.shape[1] * o1;
struct Padding pad = get_same_padding_in_tf(out.shape[1], out.shape[2], kernel.shape[0], kernel.shape[1]);
float *patch_hwi; // kh * kw * ic
float *v_o;
int start_odx;
int oc = kernel.shape[2];
for (int _n = 0; _n < input.shape[0]; _n++) { // N
for (int _h = 0; _h < input.shape[1]; _h++) { // H
for (int _w = 0; _w < input.shape[2]; _w++) { // W
start_odx = o0 * _n + o1 * _h + o2 * _w;
v_o = (float *) calloc(oc, sizeof(float));
patch_hwi = get_input_patch(input, pad, _n, _h, _w, kernel.shape[0], kernel.shape[1], i0, i1, i2);
einsum_hwi_hwoi_to_o(kernel.shape, patch_hwi, kernel.vector, v_o);
memcpy(out.vector + start_odx, v_o, oc * sizeof(float));
free(patch_hwi);
free(v_o);
}
}
}
return out;
}
int main (int argc, char* argv[]) {
clock_t start, end;
float elapsed_time;
struct Tensor tensor_in = get_tensor(argv[1]);
struct Tensor tensor_ke = get_tensor(argv[2]);
struct Tensor tensor_ot;
start = clock();
tensor_ot = conv2d(tensor_in, tensor_ke);
end = clock();
elapsed_time = (float) (end - start) / CLOCKS_PER_SEC;
printf("Elapsed time: %f \n", elapsed_time);
write_tensor(tensor_ot, "output_tensor.bin");
free(tensor_in.vector);
free(tensor_ke.vector);
free(tensor_ot.vector);
return 0;
}
|
21,081 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define THREADS_PER_BLOCK 16 // Threads per block
#define SIZE 65 // Array size
__global__ void vectorAdd(int *a, int *b, int *c, int n)
{
// blockIdx.x is block index
// threadIdx.x is thread index
// blockDim.x corresponds to threads per block
int i = threadIdx.x + blockIdx.x * blockDim.x;
// Avoid accessing beyond the end of the arrays
if (i < n)
c[i] = a[i] + b[i];
// Parallel threads
// c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
// Parallel blocks
// c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
int main()
{
int block_size = SIZE / THREADS_PER_BLOCK;
int *a, *b, *c; // Host arrays
int *d_a, *d_b, *d_c; // Device arrays
// Allocate the memory on the CPU
a = (int *)malloc(SIZE * sizeof(int));
b = (int *)malloc(SIZE * sizeof(int));
c = (int *)malloc(SIZE * sizeof(int));
// Allocate the memory on the GPU
cudaMalloc(&d_a, SIZE * sizeof(int));
cudaMalloc(&d_b, SIZE * sizeof(int));
cudaMalloc(&d_c, SIZE * sizeof(int));
for (int i = 0; i < SIZE; ++i)
{
a[i] = i;
b[i] = i + 2;
}
// Copy Host array to Device array
cudaMemcpy(d_a, a, SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, SIZE * sizeof(int), cudaMemcpyHostToDevice);
// Make a call to GPU kernel
vectorAdd <<< block_size, THREADS_PER_BLOCK >>>(d_a, d_b, d_c, SIZE);
// Copy result back to Host array from Device array
cudaMemcpy(c, d_c, SIZE * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < SIZE; ++i)
printf("c[%d] = %d\n", i, c[i]);
// Free the Host array memory
free(a);
free(b);
free(c);
// Free the Device array memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
21,082 | /*
* MSU CUDA Course Examples and Exercises.
*
* Copyright (c) 2011 Dmitry Mikushin
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the authors be held liable for any damages arising
* from the use of this software.
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it freely,
* without any restrictons.
*
* This sample demonstates parallel execution using process forking.
* Each process works on own private data.
*
*/
#include <cuda_runtime.h>
#include <errno.h>
#include <malloc.h>
#include <stdio.h>
#include <unistd.h>
// Perform some dummy 2D field processing on GPU and CPU,
// and compare results.
int pattern2d(int nx, int ny, float* in, float* out, int pid, int step);
int nticks = 10;
// The size of memory region.
int nx = 512, ny = 256;
size_t size = nx * ny * sizeof(float);
int main(int argc, char* argv[])
{
// Allocate input & output arrays.
float* input = (float*)malloc(size);
float* output = (float*)malloc(size);
// Generate input data array of the
// specified size.
long np = nx * ny;
float invdrandmax = 1.0 / RAND_MAX;
for (long i = 0; i < np; i++)
input[i] = rand() * invdrandmax;
// Call fork to create another process.
// Standard: "Memory mappings created in the parent
// shall be retained in the child process."
pid_t fork_status = fork();
// From this point two processes are running the same code, if no errors.
if (fork_status == -1)
{
fprintf(stderr, "Cannot fork process, errno = %d\n", errno);
return errno;
}
// By fork return value we can determine the process role:
// master or child (worker).
int master = fork_status ? 1 : 0;
int ndevices = 0;
cudaError_t cuda_status = cudaGetDeviceCount(&ndevices);
if (cuda_status != cudaSuccess)
{
fprintf(stderr, "Cannot get the cuda device count, status = %d\n",
cuda_status);
return cuda_status;
}
// Return if no cuda devices present.
if (master)
printf("%d CUDA device(s) found\n", ndevices);
if (!ndevices) return 0;
// Get the process ID.
int pid = (int)getpid();
// Use different devices, if more than one present.
if (ndevices > 1)
{
int idevice = 1;
if (master) idevice = 0;
cuda_status = cudaSetDevice(idevice);
if (cuda_status != cudaSuccess)
{
fprintf(stderr, "Cannot set CUDA device by process %d, status = %d\n",
pid, cuda_status);
return cuda_status;
}
printf("Process %d uses device #%d\n", pid, idevice);
}
// Perform some "iterations" on data array private to each process.
for (int i = 0; i < nticks; i++)
{
// Execute function with CUDA kernel.
int status = pattern2d(nx, ny, input, output, pid, i);
if (status)
{
fprintf(stderr, "Pattern 2D failed by process %d, status = %d\n",
pid, status);
return status;
}
}
free(input);
free(output);
return 0;
}
|
21,083 | #include "includes.h"
// filename: eeTanh.cu
// a simple CUDA kernel to square the elements of a matrix
extern "C" // ensure function name to be exactly "eeTanh"
{
}
__global__ void swap_matrix_col(int N, int C, float *X, float *V)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int index = (C-1)*N + i;
if (i < N)
{
float a = X[index];
X[index] = V[i];
V[i] = a;
}
} |
21,084 | /*
This is an good trick to see what is going on for debugging purposes. It is terribly BAD
to print from GPU kernels in anything you want to be performance oriented though. It is not a
performance oriented feature!
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void kernel()
{
/*
this just gets some kernel specific parameters
this is just so you can see how non-deterministic thread timing is
*/
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
/* print some stuff out */
int size = sizeof(int);
printf("Hello, World! size=%d tidx=%d, tidy=%d\n", size, tidx, tidy);
return;
}
int main(int argc, char** argv)
{
/*
Keep this in mind. in Cuda 8 compute 2.0 was deprecated and it may be
removed by now. CDER only currently (11/2018) supports Cuda 7 so the below
will work and may or may not warn you about this.
*/
printf("You compile this with 'nvcc -arch sm_20 hello.cu -o hello'\n");
printf("You need -at least- arch of sm_20 to print from kernels\n");
dim3 dimBlock( 16, 16, 1 );
dim3 dimGrid( 16, 16, 1 );
kernel<<<dimGrid,dimBlock>>>();
cudaDeviceSynchronize(); /* you also -need- this here to print from the kernel */
return 1;
}
|
21,085 | #include "includes.h"
__global__ void colorInvalids_kernel(uchar4 *out_image, const float *in_image, int width, int height) {
const int x = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x < width && y < height) {
int ind = __mul24(y, width) + x;
uchar4 temp = out_image[ind];
float value = in_image[ind];
if (!isfinite(value)) { // color
temp.x *= 0.5f;
temp.y *= 0.5f;
}
out_image[ind] = temp;
}
} |
21,086 | #include "includes.h"
__global__ void matMult(int* a, int* b, int* res,unsigned int rows, unsigned int k, unsigned int cols){
unsigned int r = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int c = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int sum = 0;
if(r< rows && c< cols){
for(int x=0; x<k; x++){
sum += a[r*k +x] + b[x*cols + c];
}
res[r*cols + c] = sum;
}
} |
21,087 | #include <iostream>
#include <fstream>
#include <vector>
#include <sstream>
#include <cstdio>
#include <ctime>
#include <curand_mtgp32_kernel.h>
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
#define TILE_WIDTH 32
#define w (TILE_WIDTH + 3 - 1)
#define KERNEL_SIZE 3
__global__ void naiveFiltering(float* pixelsDevice, float* kernelDevice, float* resultDevice, int width, int height,
int n, int widthResult, int heightResult, int channels) {
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
float sum;
int a, b;
for(int i = 0; i < 3; i++) {
if ((row < heightResult) && (col < widthResult)) {
sum = 0;
a = 0;
for (int k = row; k < row + n; k++) {
b = 0;
for (int l = col; l < col + n; l++) {
sum += kernelDevice[a * n + b] * pixelsDevice[k * width * channels + l * channels + i];
b++;
}
a++;
}
if (sum < 0)
sum = 0;
if (sum > 1)
sum = 1;
resultDevice[row * widthResult * channels + col * channels + i] = sum;
}
}
}
__global__ void tiling(float* pixelsDevice, float* kernelDevice, float* resultDevice, int width, int height,
int n, int widthResult, int heightResult, int channels) {
__shared__ float N_ds[w][w];
for (int k = 0; k < channels; ++k) {
int dest = threadIdx.y * TILE_WIDTH + threadIdx.x;
int destY = dest / w;
int destX = dest % w;
int srcY = blockIdx.y * TILE_WIDTH + destY - (n/2);
int srcX = blockIdx.x * TILE_WIDTH + destX - (n/2);
int src = srcY*width*channels + srcX*channels + k;
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width) {
N_ds[destY][destX] = pixelsDevice[src];
} else {
N_ds[destY][destX] = 0;
}
dest = threadIdx.y * TILE_WIDTH + threadIdx.x + TILE_WIDTH * TILE_WIDTH;
destY = dest / w;
destX = dest % w;
srcY = blockIdx.y * TILE_WIDTH + destY - (n/2);
srcX = blockIdx.x * TILE_WIDTH + destX - (n/2);
src = srcY*width*channels + srcX*channels + k;
if (destY < w) {
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width) {
N_ds[destY][destX] = pixelsDevice[src];
} else {
N_ds[destY][destX] = 0;
}
}
__syncthreads();
float sum = 0;
int y, x;
for (y = 0; y < n; ++y) {
for (x = 0; x < n; ++x) {
sum += N_ds[threadIdx.y + y][threadIdx.x + x] * kernelDevice[y * n + x];
}
}
if (sum < 0)
sum = 0;
if (sum > 1)
sum = 1;
y = blockIdx.y * TILE_WIDTH + threadIdx.y;
x = blockIdx.x * TILE_WIDTH + threadIdx.x;
if (y < heightResult && x < widthResult)
resultDevice[y*widthResult*channels + x*channels + k] = sum;
__syncthreads();
}
} |
21,088 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <time.h>
#include <cstdlib>
using namespace std;
#define N 2048
#define Iteration 100
const int TILE = 32; const int SIDE = 8;
__global__ void matrixTransposeUnrolled(const int *a, int *b)
{
__shared__ int mat[TILE][TILE + 1];
int x = blockIdx.x * TILE + threadIdx.x;
int y = blockIdx.y * TILE + threadIdx.y;
#pragma unroll TILE/SIDE
for(int k = 0; k < TILE ; k += SIDE) {
if(x < N && y + k < N)
mat[threadIdx.y + k][threadIdx.x] = a[((y + k) * N) + x];
}
__syncthreads();
x = blockIdx.y * TILE + threadIdx.x;
y = blockIdx.x * TILE + threadIdx.y;
#pragma unroll TILE/SIDE
for(int k = 0; k < TILE; k += SIDE) {
if(x < N && y + k < N)
b[(y + k) * N + x] = mat[threadIdx.x][threadIdx.y + k];
}
}
int main(){
int *a, *b;
int *d_a, *d_b;
int size = N*N*sizeof(int);
clock_t start, end;
// Alloc space for device copies of a, b
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
// Alloc space for host copies of a, b, and setup input values
a = (int*)malloc(size);
b = (int*)malloc(size);
for(int i = 0; i < N; i++)
for(int j = 0; j < N; j++)
a[i*N+j] = j;
dim3 grid(64, 64);
dim3 block(32, 8);
start = clock();
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
//cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
//Launch kernel
for(int i = 0; i < Iteration; i++)
matrixTransposeUnrolled<<<grid, block>>>(d_a, d_b);
// Copy result back to host
cudaMemcpy(b, d_b, size, cudaMemcpyDeviceToHost);
end = clock();
for(int i = 0; i < 10; i++){
for(int j = 0; j < 10; j++)
cout<<b[i*N+j]<<" ";
cout<<endl;
}
//Cleanup
free(a); free(b);
cudaFree(d_a); cudaFree(d_b);
cout << "Totle Time : " <<(double)(end - start)<< "ms" << endl;
return 0;
} |
21,089 | #include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#define N 5
__global__ void maximumElement(int *a,int *o)
{
int of;
int id = threadIdx.x;
for(of=N/2; of>0;of=of/2)
{
if(id<of)
{
if(a[id+of] > a[id])
{
a[id] = a[id+of];
}
}
}
if(a[0]<a[N-1])
{
a[0]=a[N-1];
}
o[0] = a[0];
}
__global__ void minimumElement(int *a,int *o)
{
int of;
int id = threadIdx.x;
for(of=N/2;of>0;of=of/2)
{
if(id<of)
{
if(a[id+of] < a[id])
{
a[id] = a[id+of];
}
}
}
if(a[0]>a[N-1])
{
a[0]=a[N-1];
}
o[0] = a[0];
}
int main()
{
int *host,*device,*output_host,*output_device;
int choice;
int size=N*sizeof(int);
host = (int*)malloc(size);
output_host = (int*)malloc(size);
cudaMalloc(&device,size);
cudaMalloc(&output_device,size);
int i;
/*
for(i=0 ; i<N ;i++)
{
host[i] = random() %N;
}*/
host[0]=7;
host[1]=2;
host[2]=6;
host[3]=3;
host[4]=1;
printf("\n\n Vector => ");
for(i=0 ; i<N ;i++)
{
printf("%d ",host[i]);
}
cudaMemcpy(device,host,size,cudaMemcpyHostToDevice);
printf("\n\n1.Maximum Elemnt\n2.Minimum Elemnt\n\nEnter your choice :");
scanf("%d",&choice);
if(choice==1)
{
maximumElement<<<2,N/2>>>(device,output_device);
}
else
{
minimumElement<<<2,N/2>>>(device,output_device);
}
cudaMemcpy(output_host,output_device,size,cudaMemcpyDeviceToHost);
if(choice==1)
{
printf("\n\nMaximum elemnt => %d",output_host[0]);
}
else
{
printf("\n\nMinimum elemnt => %d",output_host[0]);
}
cudaFree(device);
cudaFree(output_device);
free(host);
free(output_host);
return 0;
}
|
21,090 | #include <iostream>
#include <cstdlib>
#include <iomanip>
#include <cstring>
#include <cuda_runtime.h>
#include <cstdio>
//C++ timers
#include <chrono>
#define BLOCK_SIZE 16
#define N_STREAMS 8
#define CUDA_WARN(XXX) \
do { if (XXX != cudaSuccess) std::cerr << "CUDA Error: " << \
cudaGetErrorString(XXX) << ", at line " << __LINE__ \
<< std::endl; cudaDeviceSynchronize(); } while (0)
typedef std::chrono::high_resolution_clock Clock;
typedef std::chrono::nanoseconds nanoseconds;
typedef struct {
//Dimensions of A
int M;
int N;
//Scalars
int alpha;
int beta;
//First dimension of A
int lda;
//Increments for X and Y
int incX;
int incY;
//Array A and vectors X, Y;
int *A, *X, *Y;
} dgemv_data;
// BASIC DGEMM METHOD
void dgemm(int N, int alpha,
int *X, int *Y,
int beta, int *A) {
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
int tmp = 0;
for (int k = 0; k < N; ++k) {
tmp += X[i * N + k] * Y[k * N + j];
}
A[i * N + j] = alpha * tmp + beta * A[i * N + j];
}
}
}
// CUDA DGEMM W/O SHARED MEMORY
__global__ void dgemm_cuda (int N, int alpha,
int *X, int *Y,
int beta, int *A) {
int i = blockDim.y * blockIdx.y + threadIdx.y;
int j = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N && j < N) {
int temp = 0;
for (int k = 0; k < N; ++k) {
temp += X[i * N + k] * Y[k * N + j];
}
A[i * N + j] = alpha * temp + beta * A[i * N + j];
}
}
// CUDA DGEMM W/O SHARED MEMORY
__global__ void dgemm_cuda_shared (int N, int alpha,
int *X, int *Y,
int beta, int *A) {
// Create Shared Memory Arrays
__shared__ int Xshared[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int Yshared[BLOCK_SIZE][BLOCK_SIZE];
//Setup i and j
int i = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int j = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int temp = 0;
for (int s = 0; s < gridDim.x; ++s) {
int index = i * N + s * BLOCK_SIZE + threadIdx.x;
if(index >= N*N)
Xshared[threadIdx.y][threadIdx.x] = 0;
else
Xshared[threadIdx.y][threadIdx.x] = X[index];
index = (s * BLOCK_SIZE + threadIdx.y) * N + j;
if(index >= N*N)
Yshared[threadIdx.y][threadIdx.x] = 0;
else
Yshared[threadIdx.y][threadIdx.x] = Y[index];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
temp += Xshared[threadIdx.y][k] * Yshared[k][threadIdx.x];
__syncthreads();
}
if(i < N && j < N) {
A[i * N + j] = temp * alpha + beta * A[i * N + j];
}
}
int* createMatrix(int M, int N) {
int *A = new int[M*N];
int i;
#pragma omp parallel for private(i)
for(i = 0; i < M*N; i++) {
A[i] = (int) (rand() % 10); //Random number from 0 to 1
}
return A;
}
int* createVector(int len) {
int *A = new int[len];
int i;
#pragma omp parallel for private(i)
for(i = 0; i < len; i++) {
A[i] = rand() % 10;
}
return A;
}
dgemv_data* generateRandomData(int M, int N) {
dgemv_data *data = new dgemv_data();
data->M = M;
data->N = N;
data->A = new int[M*N];
data->alpha = (int) (rand() % 10);
data->beta = (int) (rand() % 10);
data->lda = M; //Not used for dgemm
data->X = createMatrix(N,N);
data->Y = createMatrix(N,N);
data->incX = 1; //Not used for dgemm
data->incY = 1; //not used for dgemm
return data;
}
dgemv_data* copyData(dgemv_data* data) {
dgemv_data* copy = new dgemv_data();
copy->M = data->N;
copy->N = data->N;
copy->alpha = data->alpha;
copy->beta = data->beta;
copy->lda = data->lda;
copy->incX = data->incX;
copy->incY = data->incY;
copy->X = new int[data->N * data->N];
copy->Y = new int[data->N * data->N];
copy->A = new int[data->N * data->N];
memcpy (copy->X, data->X, data->N * data->N * sizeof(int));
memcpy (copy->Y, data->Y, data->N * data->N * sizeof(int));
memcpy (copy->A, data->A, data->N * data->N * sizeof(int));
return copy;
}
bool compareMatrices(int *A, int *B, int length) {
int i;
for(i = 0; i < length; i++) {
//To account for inting pt error, check if greater than some epsilon
if(abs(A[i] - B[i]) != 0) { //was float but this still wr
std::cout <<"i: " << i << " A[i]: " << A[i] << " B[i]: " << B[i] << std::endl;
return false;
}
}
return true;
}
void freeDataStruct(dgemv_data* data) {
delete[](data->A);
delete[](data->X);
delete[](data->Y);
}
void testOutput(dgemv_data *data, dgemv_data* test_data) {
dgemm(test_data->N, test_data->alpha,
test_data->X, test_data->Y,
test_data->beta,test_data->A);
if(compareMatrices(data->A, test_data->A, data->N * data->N)) {
std::cout << "Output: PASSED" << std::endl;
} else {
std::cout << "Output: FAILED" << std::endl;
}
}
// void printTimeTaken(unsigned long ms) {
// std::cout << std::fixed;
// std::cout << std::setprecision(10)
// << "Time taken: "
// << ms
// << " ms or "
// << (unsigned long) ms/1000.0
// << " s\n"
// << std::endl;
// }
void printTimeTakenFloat(float ms) {
std::cout << std::fixed;
std::cout << std::setprecision(10)
<< "Time taken: "
<< ms
<< " ms or "
<< ms/1000.0
<< " s\n"
<< std::endl;
}
int main(int argc, char **argv) {
//Clock for C++
if (argc != 3) {
std::cout << "Invalid set of arguments.\n"
<< "Usage: ./dgemv [Testing Off/On(0/1)] [size N]"
<< std::endl;
exit(-1);
}
//Get user arguments
int M,N;
bool test;
//Square matrix
if(argc == 3) {
M = atoi(argv[2]);
N = M;
}
test = (atoi(argv[1]) > 0) ? true : false;
//Feed random seed
srand(time(NULL));
//Generate the data
dgemv_data *unModifiedData = generateRandomData(M,N); //DO not run functions on this
/************************************************************************/
//Run DGEMVT Serial Version
//Get data sets to run with and test with (Since the arrays are modified)
dgemv_data *data = copyData(unModifiedData);
dgemv_data *serialTestData = copyData(unModifiedData);
std::cout << "-----------------------------------------------" << std::endl;
std::cout << "Running Serial Version of DGEMM" << std::endl;
auto start = Clock::now();
//dgemm(data->N, data->alpha, data->X,
// data->Y, data->beta, data->A);
auto stop = Clock::now();
unsigned long ns = (unsigned long) std::chrono::duration_cast<nanoseconds>(stop-start).count();
printTimeTakenFloat(ns/1000000.0);
//Test the output
if(test) {
testOutput(data, serialTestData);
}
//Congifure CUDA blocksize and dim grid/block
size_t gridR = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
size_t gridC = (N + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(gridC,gridR);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
/************************************************************************/
//Run DGEMVT CUDA Version
//Get data sets to run with and test with (Since the arrays are modified)
dgemv_data *cuData = copyData(unModifiedData);
dgemv_data *cuTestData = copyData(unModifiedData);
//int arrays for the GPU
int *cuA, *cuX, *cuY;
//Start time
start = Clock::now();
//Allocate space and copy data into GPU allocated arrays
CUDA_WARN(cudaMalloc(&cuA, N * N * sizeof(int)));
CUDA_WARN(cudaMalloc(&cuX, N * N * sizeof(int)));
CUDA_WARN(cudaMalloc(&cuY, N * N * sizeof(int)));
CUDA_WARN(cudaMemcpy(cuA, cuData->A, N*N*sizeof(int), cudaMemcpyHostToDevice));
CUDA_WARN(cudaMemcpy(cuX, cuData->X, N*N*sizeof(int), cudaMemcpyHostToDevice));
CUDA_WARN(cudaMemcpy(cuY, cuData->Y, N*N*sizeof(int), cudaMemcpyHostToDevice));
std::cout << "-----------------------------------------------" << std::endl;
std::cout << "Running Basic CUDA Version of DGEMM" << std::endl;
dgemm_cuda<<<dimGrid, dimBlock>>>
(N,cuData->alpha, cuX,
cuY, cuData->beta, cuA);
//Check if there was an error
CUDA_WARN(cudaGetLastError());
CUDA_WARN(cudaThreadSynchronize());
//Copy the cude result back
CUDA_WARN(cudaMemcpy(cuData->A, cuA, N*N*sizeof(int), cudaMemcpyDeviceToHost));
//Print the time taken
stop = Clock::now();
ns = (unsigned long) std::chrono::duration_cast<nanoseconds>(stop-start).count();
printTimeTakenFloat(ns/1000000.0);
//Test the output
if(test) {
testOutput(cuData, cuTestData);
}
//free variables
CUDA_WARN(cudaFree(cuA));
CUDA_WARN(cudaFree(cuX));
CUDA_WARN(cudaFree(cuY));
/************************************************************************/
//Run DGEMVT CUDA Shared Memory Version
//Get data sets to run with and test with (Since the arrays are modified)
dgemv_data *cuDataShared = copyData(unModifiedData);
dgemv_data *cuTestDataShared = copyData(unModifiedData);
//int arrays for the GPU
int *scuA, *scuX, *scuY;
//Start time
start = Clock::now();
//Allocate space and copy data into GPU allocated arrays
CUDA_WARN(cudaMalloc(&scuA, N * N * sizeof(int)));
CUDA_WARN(cudaMalloc(&scuX, N * N * sizeof(int)));
CUDA_WARN(cudaMalloc(&scuY, N * N * sizeof(int)));
CUDA_WARN(cudaMemcpy(scuA, cuDataShared->A, N*N*sizeof(int), cudaMemcpyHostToDevice));
CUDA_WARN(cudaMemcpy(scuX, cuDataShared->X, N*N*sizeof(int), cudaMemcpyHostToDevice));
CUDA_WARN(cudaMemcpy(scuY, cuDataShared->Y, N*N*sizeof(int), cudaMemcpyHostToDevice));
std::cout << "-----------------------------------------------" << std::endl;
std::cout << "Running Shared Memory CUDA Version of DGEMM" << std::endl;
dgemm_cuda_shared<<<dimGrid, dimBlock>>>
(N,cuDataShared->alpha, scuX,
scuY, cuDataShared->beta, scuA);
//Check if there was an error
CUDA_WARN(cudaGetLastError());
CUDA_WARN(cudaThreadSynchronize());
//Copy the cude result back
CUDA_WARN(cudaMemcpy(cuDataShared->A, scuA, N*N*sizeof(int), cudaMemcpyDeviceToHost));
//Print the time taken
stop = Clock::now();
ns = (unsigned long) std::chrono::duration_cast<nanoseconds>(stop-start).count();
printTimeTakenFloat(ns/1000000.0);
//Test the output
if(test) {
testOutput(cuDataShared, cuTestDataShared);
}
//free variables
CUDA_WARN(cudaFree(scuA));
CUDA_WARN(cudaFree(scuX));
CUDA_WARN(cudaFree(scuY));
/************************************************************************/
//Run DGEMVT CUDA Shared Memory Version w/ Driver Events/Streams
//Get data sets to run with and test with (Since the arrays are modified)
dgemv_data *cuDataShared2 = copyData(unModifiedData);
dgemv_data *cuTestDataShared2 = copyData(unModifiedData);
//int arrays for the GPU
int *scuA2, *scuX2, *scuY2;
//Events and streams
cudaStream_t stream[N_STREAMS];
cudaEvent_t begin, end;
cudaEventCreate(&begin);
cudaEventCreate(&end);
for(int i = 0; i < N_STREAMS; i++) cudaStreamCreate(&stream[i]);
//Start the event timer
cudaEventRecord(begin, 0);
//Allocate space and copy data into GPU allocated arrays
CUDA_WARN(cudaMalloc(&scuA2, N * N * sizeof(int)));
CUDA_WARN(cudaMalloc(&scuX2, N * N * sizeof(int)));
CUDA_WARN(cudaMalloc(&scuY2, N * N * sizeof(int)));
//COpy Memory to the GPU
CUDA_WARN(cudaMemcpyAsync(scuA2, cuDataShared2->A, N*N*sizeof(int), cudaMemcpyHostToDevice, stream[0]));
CUDA_WARN(cudaMemcpyAsync(scuX2, cuDataShared2->X, N*N*sizeof(int), cudaMemcpyHostToDevice, stream[1]));
CUDA_WARN(cudaMemcpyAsync(scuY2, cuDataShared2->Y, N*N*sizeof(int), cudaMemcpyHostToDevice, stream[2]));
cudaDeviceSynchronize();
std::cout << "-----------------------------------------------" << std::endl;
std::cout << "Running CUDA Shared Memory Version of DGEMM" << std::endl;
dgemm_cuda_shared<<<dimGrid, dimBlock>>>
(N,cuDataShared2->alpha, scuX2,
scuY2, cuDataShared2->beta, scuA2);
//Check if there was an error
CUDA_WARN(cudaGetLastError());
CUDA_WARN(cudaThreadSynchronize());
//Streams
for (int i = 0; i < N_STREAMS; i++) {
cudaMemcpyAsync(cuDataShared2->A+(i*N*N)/N_STREAMS, scuA2+(i*N*N)/N_STREAMS, sizeof(int)*(N*N)/N_STREAMS, cudaMemcpyDeviceToHost, stream[i]);
}
cudaDeviceSynchronize();
//Stop the timer
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
//Print the time taken
float ms;
cudaEventElapsedTime(&ms, begin, end);
printTimeTakenFloat(ms);
//Test the output
if(test) {
testOutput(cuDataShared2, cuTestDataShared2);
}
//free variables
CUDA_WARN(cudaFree(scuA2));
CUDA_WARN(cudaFree(scuX2));
CUDA_WARN(cudaFree(scuY2));
return 0;
}
|
21,091 | #include <stdio.h>
#include <stdlib.h>
#include <curand_kernel.h> // CURAND lib header file
#define TRIALS_PER_THREAD 1000000
#define BLOCKS 16
#define THREADS 1024
#define PI 3.14159265358979 // known value of pi
__global__ void setup_kernel(curandState *states)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
/* Each thread gets same seed, a different sequence number, no offset */
curand_init(tid, 0, 0, &states[tid]);
}
__global__ void pi_mc(unsigned long *estimate, curandState *states)
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
unsigned long points_in_circle = 0;
double x, y;
curandState localState = states[tid];
for(int i = 0; i < TRIALS_PER_THREAD; i++)
{
x = curand_uniform(&localState);
y = curand_uniform(&localState);
points_in_circle += (x*x + y*y <= 1.0f);
}
states[tid] = localState;
estimate[tid] = points_in_circle;
}
int main(int argc, char *argv[])
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
unsigned long host[BLOCKS * THREADS];
unsigned long *dev;
curandState *devStates;
cudaMalloc((void **) &dev, BLOCKS * THREADS * sizeof(unsigned long));
cudaMalloc( (void **)&devStates, BLOCKS*THREADS*sizeof(curandState) );
unsigned long long points_in_circle = 0;
unsigned long long total_points = 0;
setup_kernel<<<BLOCKS, THREADS>>>(devStates);
cudaEventRecord(start);
printf(" time (ms) | total points | points in 1/4 circle | estimated pi | error \n");
printf("------------------------------------------------------------------------------------------------------------\n");
for (int j=1; j < 10000; j++)
{
pi_mc<<<BLOCKS, THREADS>>>(dev, devStates);
cudaMemcpy(host, dev, BLOCKS * THREADS * sizeof(unsigned long), cudaMemcpyDeviceToHost);
for(int i = 0; i < BLOCKS * THREADS; i++)
{
points_in_circle += host[i];
}
total_points += (unsigned long long)BLOCKS * (unsigned long long)THREADS * (unsigned long long)TRIALS_PER_THREAD;
long double pi = 4 * (long double) points_in_circle / (long double)total_points;
long double error = pi - (long double) PI;
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("%14.0f\t%16lld\t%16lld\t%20.14lf\t%20.14lf\n", milliseconds, total_points, points_in_circle, pi, error);
}
cudaFree(dev);
cudaFree(devStates);
return 0;
}
|
21,092 | #include <iostream>
#include <time.h>
#include <cuda_runtime.h>
/*
* Lectura Archivo
*/
void Read(float** R, float** G, float** B,
int *N, int *S, int **ordenamiento, int* P, const char *filename) {
FILE *fp;
fp = fopen(filename, "r");
fscanf(fp, "%d %d\n", N, S);
int imsize = (*N) * (*N);
*P = (*N)/ (*S);
int P2 = (*P) * (*P) ;
float* R1 = new float[imsize];
float* G1 = new float[imsize];
float* B1 = new float[imsize];
int *orden_temp = new int[P2];
for(int i = 0; i < P2; i++)
fscanf(fp, "%d ", &(orden_temp[i]));
for(int i = 0; i < imsize; i++)
fscanf(fp, "%f ", &(R1[i]));
for(int i = 0; i < imsize; i++)
fscanf(fp, "%f ", &(G1[i]));
for(int i = 0; i < imsize; i++)
fscanf(fp, "%f ", &(B1[i]));
fclose(fp);
*R = R1; *G = G1; *B = B1;
*ordenamiento = orden_temp;
}
/*
* Escritura Archivo
*/
void Write(float* R, float* G, float* B,
int M, int N, const char *filename) {
FILE *fp;
fp = fopen(filename, "w");
fprintf(fp, "%d %d\n", M, N);
for(int i = 0; i < M*N-1; i++)
fprintf(fp, "%f ", R[i]);
fprintf(fp, "%f\n", R[M*N-1]);
for(int i = 0; i < M*N-1; i++)
fprintf(fp, "%f ", G[i]);
fprintf(fp, "%f\n", G[M*N-1]);
for(int i = 0; i < M*N-1; i++)
fprintf(fp, "%f ", B[i]);
fprintf(fp, "%f\n", B[M*N-1]);
fclose(fp);
}
/*
* Procesamiento Imagen CPU
*/
void funcionCPU(float* R,float* G,float* B, float* Rout,float* Gout,float* Bout, int N, int S, int P, int* ordenamiento){
for (int i = 0; i< N*N; i++){
int x = i % N;
int y = i / N;
int x_bloque_escritura = x / S;
int y_bloque_escritura = y /S;
int indice_bloque_escritura = x_bloque_escritura + y_bloque_escritura * P;
int indice_bloque_lectura = ordenamiento[indice_bloque_escritura];
int x_bloque_lectura = indice_bloque_lectura % P;
int y_bloque_lectura = indice_bloque_lectura / P;
int shift_horizontal = x_bloque_lectura - x_bloque_escritura;
int shift_vertical = y_bloque_lectura - y_bloque_escritura;
x = x + shift_horizontal * S;
y = y + shift_vertical * S;
Rout[i] = R[x + y*N];
Gout[i] = G[x + y*N];
Bout[i] = B[x + y*N];
}
}
/*
* Procesamiento Imagen GPU
*/
__global__ void kernelGPU(float* R,float* G,float* B, float* Rout,float* Gout,float* Bout, int N, int S, int P, int* ordenamiento){
int i = threadIdx.x + blockDim.x* blockIdx.x;
if (i < N*N){
int x = i % N;
int y = i / N;
int x_bloque_escritura = x / S;
int y_bloque_escritura = y /S;
int indice_bloque_escritura = x_bloque_escritura + y_bloque_escritura * P;
int indice_bloque_lectura = ordenamiento[indice_bloque_escritura];
int x_bloque_lectura = indice_bloque_lectura % P;
int y_bloque_lectura = indice_bloque_lectura / P;
int shift_horizontal = x_bloque_lectura - x_bloque_escritura;
int shift_vertical = y_bloque_lectura - y_bloque_escritura;
x = x + shift_horizontal * S;
y = y + shift_vertical * S;
Rout[i] = R[x + y*N];
Gout[i] = G[x + y*N];
Bout[i] = B[x + y*N];
}
}
/*
* Codigo Principal
*/
int main(int argc, char **argv){
/*
* Inicializacion
*/
clock_t t1, t2;
cudaEvent_t ct1, ct2;
double ms;
float dt;
int N, S;
int P;
int *ordenamiento; //arreglo con el ordenamiento
int* ordenamiento_dev;
float *Rhost, *Ghost, *Bhost;
float *Rhostout, *Ghostout, *Bhostout;
float *Rdev, *Gdev, *Bdev;
float *Rdevout, *Gdevout, *Bdevout;
char names[1][3][20] = {{"img100x100.txt\0", "img100x100CPU.txt\0", "img100x100GPU.txt\0"}};
for (int i=0; i<1; i++){
Read(&Rhost, &Ghost, &Bhost, &N, &S, &ordenamiento, &P, names[i][0]);
/*
* Parte CPU
*/
Rhostout = new float[N*N];
Ghostout = new float[N*N];
Bhostout = new float[N*N];
t1 = clock();
funcionCPU(Rhost,Ghost,Bhost, Rhostout,Ghostout,Bhostout, N, S, P, ordenamiento); // Agregar parametros!
t2 = clock();
ms = 1000.0 * (double)(t2 - t1) / CLOCKS_PER_SEC;
std::cout << "Tiempo CPU: " << ms << "[ms]" << std::endl;
Write(Rhostout, Ghostout, Bhostout, N, N, names[i][1]);
delete[] Rhostout; delete[] Ghostout; delete[] Bhostout;
/*
* Parte GPU
*/
int grid_size, block_size = 256;
grid_size = (int)ceil((float) N * N / block_size);
cudaMalloc((void**)&Rdev, N * N * sizeof(float));
cudaMalloc((void**)&Gdev, N * N * sizeof(float));
cudaMalloc((void**)&Bdev, N * N * sizeof(float));
cudaMalloc((void**)&ordenamiento_dev, (P*P) * sizeof(int));
cudaMemcpy(Rdev, Rhost, N * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Gdev, Ghost, N * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Bdev, Bhost, N * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(ordenamiento_dev, ordenamiento, (P*P) * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&Rdevout, N * N * sizeof(float));
cudaMalloc((void**)&Gdevout, N * N * sizeof(float));
cudaMalloc((void**)&Bdevout, N * N * sizeof(float));
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
cudaEventRecord(ct1);
kernelGPU<<<grid_size, block_size>>>(Rdev,Gdev,Bdev, Rdevout,Gdevout,Bdevout, N, S, P, ordenamiento_dev); // Agregar parametros!
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
std::cout << "Tiempo GPU: " << dt << "[ms]" << std::endl;
Rhostout = new float[N*N];
Ghostout = new float[N*N];
Bhostout = new float[N*N];
cudaMemcpy(Rhostout, Rdevout, N * N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(Ghostout, Gdevout, N * N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(Bhostout, Bdevout, N * N * sizeof(float), cudaMemcpyDeviceToHost);
Write(Rhostout, Ghostout, Bhostout, N, N, names[i][2]);
cudaFree(Rdev); cudaFree(Gdev); cudaFree(Bdev);
cudaFree(Rdevout); cudaFree(Gdevout); cudaFree(Bdevout);
cudaFree(ordenamiento_dev);
delete[] Rhost; delete[] Ghost; delete[] Bhost;
delete[] Rhostout; delete[] Ghostout; delete[] Bhostout;
delete[] ordenamiento;
}
return 0;
} |
21,093 | //
// Created by root on 2020/11/12.
//
#include "cuda_runtime.h"
#include "stdio.h"
__device__ int *m = NULL, *n = NULL;
__device__ volatile int *m_v = NULL, *n_v = NULL;
__device__ int A, B, A_v, B_v;
__device__ void write() {
// m = 10;
// n = 20;
//
// m_v = 10;
// n_v = 20;
(*m)++;
(*n)++;
(*m_v)++;
(*n_v)++;
}
__device__ void read() {
A = *m;
B = *n;
A_v = *m_v;
B_v = *n_v;
}
__global__ void testKernel(int *count) {
if ((blockIdx.x * blockDim.x + threadIdx.x) == 0 && threadIdx.y == 0) {
m = &A;
n = &B;
m_v = &A_v;
n_v = &B_v;
(*m) = 0;
(*n) = 0;
(*m_v) = 0;
(*n_v) = 0;
// printf("thread 0\n");
} else {
// printf("following thread\n");
}
write();
// __shared__ int t;
// t++;
//
// printf("t = %d\n", t);
// __threadfence();
// __syncthreads();
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = threadIdx.y + blockIdx.y * blockDim.y;
*count = 1 + idx_x + idx_y * blockDim.x * gridDim.x;
printf("count = %d\n", *count);
read();
// printf("A = %d, B = %d, A_v = %d, B_v = %d\n", A, B, A_v, B_v);
}
int *m_h = NULL, *n_h = NULL;
volatile int *m_v_h = NULL, *n_v_h = NULL;
int A_h, B_h, A_v_h, B_v_h;
void writeH() {
// m = 10;
// n = 20;
//
// m_v = 10;
// n_v = 20;
(*m_h)++;
(*n_h)++;
(*m_v_h)++;
(*n_v_h)++;
}
void readH() {
A_h = *m_h;
B_h = *n_h;
A_v_h = *m_v_h;
B_v_h = *n_v_h;
}
void testHost(int epoch) {
if (epoch == 0) {
m_h = &A_h;
n_h = &B_h;
m_v_h = &A_v_h;
n_v_h = &B_v_h;
(*m_h) = 0;
(*n_h) = 0;
(*m_v_h) = 0;
(*n_v_h) = 0;
}
writeH();
readH();
printf("A_h = %d, B_h = %d, A_v_h = %d, B_v_h = %d\n", A_h, B_h, A_v_h, B_v_h);
}
int main() {
int x = 10, y = 2;
dim3 block(2, 2);
dim3 grid((x + block.x - 1) / block.x, (y + block.y - 1) / block.y);
int count = 0;
int *countD;
cudaMalloc(&countD, sizeof(int));
cudaMemcpy(countD, &count, sizeof(count), cudaMemcpyHostToDevice);
testKernel<<<grid, block>>>(countD);
// cudaError_t err = cudaDeviceSynchronize();
// printf("result:%s\n", cudaGetErrorString(err));
cudaMemcpy(&count, countD, sizeof(count), cudaMemcpyDeviceToHost);
// printf("count = %d\n", count);
cudaMemcpy(&A_h, &A, sizeof(A), cudaMemcpyDeviceToHost);
cudaMemcpy(&B_h, &B, sizeof(B), cudaMemcpyDeviceToHost);
cudaMemcpy(&A_v_h, &A_v, sizeof(A_v), cudaMemcpyDeviceToHost);
cudaMemcpy(&B_v_h, &B_v, sizeof(B_v), cudaMemcpyDeviceToHost);
printf("===============================\n");
printf("A_h = %d, B_h = %d, A_v_h = %d, B_v_h = %d\n", A_h, B_h, A_v_h, B_v_h);
// for (int i = 0; i < x * y; i++) {
// testHost(i);
// }
return 0;
} |
21,094 | #include <stdio.h>
__global__ void kernel( void ){
}
int main(void) {
int count;
cudaGetDeviceCount(&count);
printf( "%d\n",count );
return 0;
}
|
21,095 | /*
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
extern "C" {
#define MPEG_LUMA_MIN (16)
#define MPEG_CHROMA_MIN (16)
#define MPEG_LUMA_MAX (235)
#define MPEG_CHROMA_MAX (240)
#define JPEG_LUMA_MIN (0)
#define JPEG_CHROMA_MIN (1)
#define JPEG_LUMA_MAX (255)
#define JPEG_CHROMA_MAX (255)
__device__ int mpeg_min[] = {MPEG_LUMA_MIN, MPEG_CHROMA_MIN};
__device__ int mpeg_max[] = {MPEG_LUMA_MAX, MPEG_CHROMA_MAX};
__device__ int jpeg_min[] = {JPEG_LUMA_MIN, JPEG_CHROMA_MIN};
__device__ int jpeg_max[] = {JPEG_LUMA_MAX, JPEG_CHROMA_MAX};
__device__ int clamp(int val, int min, int max)
{
if (val < min)
return min;
else if (val > max)
return max;
else
return val;
}
__global__ void to_jpeg_cuda(const unsigned char* src, unsigned char* dst,
int pitch, int comp_id)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int src_, dst_;
// 8 bit -> 15 bit for better precision
src_ = static_cast<int>(src[x + y * pitch]) << 7;
// Conversion
dst_ = comp_id ? (min(src_, 30775) * 4663 - 9289992) >> 12 // chroma
: (min(src_, 30189) * 19077 - 39057361) >> 14; // luma
// Dither replacement
dst_ = dst_ + 64;
// Back to 8 bit
dst_ = clamp(dst_ >> 7, jpeg_min[comp_id], jpeg_max[comp_id]);
dst[x + y * pitch] = static_cast<unsigned char>(dst_);
}
__global__ void to_mpeg_cuda(const unsigned char* src, unsigned char* dst,
int pitch, int comp_id)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int src_, dst_;
// 8 bit -> 15 bit for better precision
src_ = static_cast<int>(src[x + y * pitch]) << 7;
// Conversion
dst_ = comp_id ? (src_ * 1799 + 4081085) >> 11 // chroma
: (src_ * 14071 + 33561947) >> 14; // luma
// Dither replacement
dst_ = dst_ + 64;
// Back to 8 bit
dst_ = clamp(dst_ >> 7, mpeg_min[comp_id], mpeg_max[comp_id]);
dst[x + y * pitch] = static_cast<unsigned char>(dst_);
}
}
|
21,096 | #include<stdlib.h>
#include<iostream>
#include<cmath>
#include<fstream>
#include<chrono>
//shared host/device constants
int gridWidth,gridHeight,gridDepth,blockWidth,blockHeight,blockDepth,gridWidthBlocks,gridHeightBlocks,gridDepthBlocks,gridArea;
__constant__ int gridWidth_d,gridHeight_d,gridDepth_d,blockWidth_d,blockHeight_d,blockDepth_d,gridWidthBlocks_d,gridHeightBlocks_d,gridDepthBlocks_d;
//host only constants
int timeSteps;
__device__ int arrayPos(const int &x,const int &y,const int &z){
return (z*gridWidth_d*gridHeight_d)+(y*gridWidth_d)+x;
}
__global__ void solver(bool *grid,bool *grid1){
int xpos = (blockIdx.x*blockWidth_d)+threadIdx.x;
int ypos = (blockIdx.y*blockHeight_d)+threadIdx.y;
int zpos = (blockIdx.z*blockDepth_d)+threadIdx.z;
if(xpos>0 && xpos<gridWidth_d-1 && ypos>0 && ypos<gridHeight_d-1 && zpos>0 && zpos<gridDepth_d-1){
int neighbors = grid[arrayPos(xpos+1,ypos,zpos)]+grid[arrayPos(xpos-1,ypos,zpos)]
+grid[arrayPos(xpos,ypos+1,zpos)]+grid[arrayPos(xpos,ypos-1,zpos)]
+grid[arrayPos(xpos,ypos,zpos+1)]+grid[arrayPos(xpos,ypos,zpos-1)];
if(grid[arrayPos(xpos,ypos,zpos)]){
if(neighbors<2 || neighbors>3){
grid1[arrayPos(xpos,ypos,zpos)] = false;
}else{
grid1[arrayPos(xpos,ypos,zpos)] = true;
}
}else{
if(neighbors==3){
grid1[arrayPos(xpos,ypos,zpos)] = true;
}else{
grid1[arrayPos(xpos,ypos,zpos)] = false;
}
}
}
}
//helper function to read grid from a text file
void readTextRepr(const std::string& filename,bool *array){
std::ifstream file(filename);
std::string str;
int index=0;
while(std::getline(file,str)){
if(str!="---"){
for(int i=0;i<str.length();i++){
//stop reading if file is greater than arrayLength
if(index<gridArea){
if(str[i]!='\n'){
if(str[i]=='#'){
array[index]=true;
}else{
array[index]=false;
}
index++;
}
}
}
}
}
//fill in excess space with falses if file is too short
if(index<gridArea){
for(int i=index;i<gridArea;i++){
array[index]=false;
}
}
}
//helper function to write grid to a text file
void writeTextRepr(const std::string& filename,bool *array){
std::ofstream file(filename);
for(int i=0;i<gridArea;i++){
if(array[i]){
file<<'#';
}else{
file<<' ';
}
if((i+1)%gridWidth==0){
file<<'\n';
}
if((i+1)%(gridWidth*gridHeight)==0){
file<<"---\n";
}
}
}
int main(int argc, const char * argv[]){
//start clock
auto startTime = std::chrono::high_resolution_clock::now();
//input and output files
std::string inFile = "in.txt";
std::string outFile = "out.txt";
//time of simulation
timeSteps = 1;
//dimensions of the grid
gridWidth = 16;
gridHeight = 16;
gridDepth = 16;
/*
Speed testing with varying grid and block sizes, using both 3D and 2D kernel implementations
16x16x16 grid, 2mil steps:
3D Kernel
16x16x1 = 18921ms
8x8x16 = 21150ms
8x8x8 = 19432ms
4x4x4 = 21614ms
2x2x2 = 24362ms
2D Kernel
16x16 = 19389ms
8x8 = 19425ms
4x4 = 19992ms
64x64x64 grid, 2mil steps:
3D Kernel
16x16x1 = 44162ms
8x8x8 = 61221ms
8x8x4 = 62442ms
2D Kernel
16x16 = 47543ms
2048x2048x128 grid, 100 steps:
3D Kernel
16x16x1 = 15065ms
8x8x8 = 18386ms
The fastest block size across both small and large grids appears to be 16x16x1, using the 3D kernel
*/
blockWidth = 16;
blockHeight = 16;
blockDepth = 1;
//handle command line arguments
int optionLen = 0;
for(int i=1;i<argc;i+=optionLen){
printf("%d",i);
if(strcmp(argv[i],"-i")==0){
optionLen = 2;
if(i+optionLen<=argc){
inFile = argv[i+1];
}else{
printf("Error: Missing arguments for -i\n");
return 1;
}
}else if(strcmp(argv[i],"-o")==0){
optionLen = 2;
if(i+optionLen<=argc){
outFile = argv[i+1];
}else{
printf("Error: Missing arguments for -o\n");
return 1;
}
}else if(strcmp(argv[i],"-t")==0){
optionLen = 2;
if(i+optionLen<=argc){
timeSteps = strtol(argv[i+1],NULL,10);
}else{
printf("Error: Missing arguments for -t\n");
return 1;
}
}else if(strcmp(argv[i],"-g")==0){
optionLen = 4;
if(i+optionLen<=argc){
gridWidth = strtol(argv[i+1],NULL,10);
gridHeight = strtol(argv[i+2],NULL,10);
gridDepth = strtol(argv[i+3],NULL,10);
}else{
printf("Error: Missing arguments for -g\n");
return 1;
}
}else if(strcmp(argv[i],"-b")==0){
optionLen = 4;
if(i+optionLen<=argc){
blockWidth = strtol(argv[i+1],NULL,10);
blockHeight = strtol(argv[i+2],NULL,10);
blockDepth = strtol(argv[i+3],NULL,10);
}else{
printf("Error: Missing arguments for -b\n");
return 1;
}
}else{
printf("Error: Parameters must be of form:\n");
printf("./game [-i infile] [-o outfile] [-t timesteps] [-g griddimensions] [-b blockdimensions]\n");
return 1;
}
}
//derived values
gridWidthBlocks = std::ceil((float)gridWidth/(float)blockWidth);
gridHeightBlocks = std::ceil((float)gridHeight/(float)blockHeight);
gridDepthBlocks = std::ceil((float)gridDepth/(float)blockDepth);
gridArea = gridWidth*gridHeight*gridDepth;
std::cout << "In file = " << inFile << "\n";
std::cout << "Out file = " << outFile << "\n";
printf("Time steps = %d\n",timeSteps);
printf("Grid dimensions = %dx%dx%d\n",gridWidth,gridHeight,gridDepth);
printf("Block dimensions = %dx%dx%d\n",blockWidth,blockHeight,blockDepth);
printf("Grid in blocks = %dx%dx%d\n",gridWidthBlocks,gridHeightBlocks,gridDepthBlocks);
printf("...");
//set device symbols to dimensions of grid,block,etc.
cudaMemcpyToSymbol(*(&gridWidth_d),&gridWidth,sizeof(int),0,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(*(&gridHeight_d),&gridHeight,sizeof(int),0,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(*(&gridDepth_d),&gridDepth,sizeof(int),0,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(*(&blockWidth_d),&blockWidth,sizeof(int),0,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(*(&blockHeight_d),&blockHeight,sizeof(int),0,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(*(&blockDepth_d),&blockDepth,sizeof(int),0,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(*(&gridWidthBlocks_d),&gridWidthBlocks,sizeof(int),0,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(*(&gridHeightBlocks_d),&gridHeightBlocks,sizeof(int),0,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(*(&gridDepthBlocks_d),&gridDepthBlocks,sizeof(int),0,cudaMemcpyHostToDevice);
dim3 numBlocks(gridWidthBlocks,gridHeightBlocks,gridDepthBlocks);
dim3 blockSize(blockWidth,blockHeight,blockDepth);
size_t gridSize = gridWidth*gridHeight*gridDepth;
//device+host grid arrays
bool *grid_h;
bool *grid_d,*grid1_d;
//allocate host memory
grid_h = (bool *)calloc(gridSize,sizeof(bool));
//allocate device memory
cudaMalloc((void **)&grid_d, gridSize);
cudaMalloc((void **)&grid1_d, gridSize);
//load grid
readTextRepr(inFile,grid_h);
//only copy first grid to device, second one is computed by kernel
cudaMemcpy(grid_d,grid_h,gridSize,cudaMemcpyHostToDevice);
for(int i=0;i<timeSteps;i++){
solver<<<numBlocks,blockSize>>>(grid_d,grid1_d);
cudaDeviceSynchronize();
std::swap(grid_d,grid1_d);
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess){
std::cout << cudaGetErrorString(error) << std::endl;
}
}
//only copy first grid to host, since it was computed and then swapped by kernel
cudaMemcpy(grid_h,grid_d,gridSize,cudaMemcpyDeviceToHost);
//output grid
writeTextRepr(outFile,grid_h);
//free host memory
free(grid_h);
//free device memory
cudaFree(grid_d);
cudaFree(grid1_d);
//end clock
auto endTime = std::chrono::high_resolution_clock::now();
auto timePassed = std::chrono::duration_cast<std::chrono::milliseconds>(endTime-startTime).count();
printf("Ran in %ld ms\n",timePassed);
return 0;
} |
21,097 | /* Block size X: 32 */
__global__ void fct_ale_c_vertical(const int maxLevels, const int * __restrict__ nLevels, double * __restrict__ del_ttf_advvert, const double * __restrict__ ttf, const double * __restrict__ hnode, const double * __restrict__ fct_LO, const double * __restrict__ hnode_new, const double * __restrict__ fct_adf_v, const double dt, const double * __restrict__ area)
{
const int node = (blockIdx.x * maxLevels);
const int maxNodeLevel = nLevels[blockIdx.x] - 1;
for ( int level = threadIdx.x; level < maxNodeLevel; level += 32 )
{
double temp = 0;
temp = del_ttf_advvert[node + level] - (ttf[node + level] * hnode[node + level]);
temp += fct_LO[node + level] * hnode_new[node + level];
temp += (fct_adf_v[node + level] - fct_adf_v[node + level + 1]) * (dt / area[node + level]);
del_ttf_advvert[node + level] = temp;
}
} |
21,098 | #include <chrono>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iostream>
const int BLOCK_SIZE = 32;
const int THREADS_PER_BLOCK = 512;
void BranchCPU(const ssize_t e, uint32_t* set, const int blockCount, const int k) {
set[e * blockCount + k / BLOCK_SIZE] |= (((uint32_t)1) << ((uint32_t)(k % BLOCK_SIZE)));
}
void BoundCPU(const ssize_t e, const uint32_t* set, int* flags,
const int blockCount, const int k, const int n, const int m, const int* exprVar, const int* exprNeg) {
for (int i = 0; i < m; ++i) {
int disjunctRes = 0;
for (int j = 0; j < 3; ++j) {
int index = exprVar[i * 3 + j];
if (index > k) {
disjunctRes = -1;
} else {
int elem = (set[e * blockCount + index / BLOCK_SIZE] & (((uint32_t)1) << ((uint32_t)(index % BLOCK_SIZE)))) ? 1 : 0;
elem ^= exprNeg[i * 3 + j];
if (elem == 1) {
disjunctRes = 1;
break;
}
}
}
if (disjunctRes == 0) {
flags[e] = 0;
break;
}
}
}
__global__ void BranchGPU(uint32_t* set, const int blockCount, const int k, const ssize_t q) {
ssize_t e = blockIdx.x * blockDim.x + threadIdx.x;
if (e >= q) {
return;
}
set[e * blockCount + k / BLOCK_SIZE] |= (((uint32_t)1) << ((uint32_t)(k % BLOCK_SIZE)));
}
__global__ void BoundGPU(const uint32_t* set, int* flags,
const int blockCount, const int k, const int n, const int m, const int* exprVar, const int* exprNeg, const ssize_t q) {
ssize_t e = blockIdx.x * blockDim.x + threadIdx.x;
if (e >= q) {
return;
}
for (int i = 0; i < m; ++i) {
int disjunctRes = 0;
for (int j = 0; j < 3; ++j) {
int index = exprVar[i * 3 + j];
if (index > k) {
disjunctRes = -1;
} else {
int elem = (set[e * blockCount + index / BLOCK_SIZE] & (((uint32_t)1) << ((uint32_t)(index % BLOCK_SIZE)))) ? 1 : 0;
elem ^= exprNeg[i * 3 + j];
if (elem == 1) {
disjunctRes = 1;
break;
}
}
}
if (disjunctRes == 0) {
flags[e] = 0;
break;
}
}
}
int main(int argc, char* argv[]) {
std::chrono::high_resolution_clock::time_point totalStart = std::chrono::high_resolution_clock::now();
if (argc != 3) {
std::cerr << "Usage: " << argv[0] << " input_file output_file" << std::endl;
return 0;
}
std::ifstream fin(argv[1]);
std::ofstream fout(argv[2]);
int n, m;
fin >> n >> m;
int* exprVar = (int*)malloc(3 * m * sizeof(*exprVar));
int* exprNeg = (int*)malloc(3 * m * sizeof(*exprNeg));
int* cudaExprVar = nullptr;
int* cudaExprNeg = nullptr;
for (int i = 0; i < m; ++i) {
fin >> exprVar[3 * i]
>> exprNeg[3 * i]
>> exprVar[3 * i + 1]
>> exprNeg[3 * i + 1]
>> exprVar[3 * i + 2]
>> exprNeg[3 * i + 2];
--exprVar[3 * i];
--exprVar[3 * i + 1];
--exprVar[3 * i + 2];
}
ssize_t q = 1;
const int blockCount = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
uint32_t* set = (uint32_t*)calloc(q * blockCount, sizeof(*set));
int* flags = (int*)malloc(q * sizeof(*flags));
flags[0] = 1;
for (int k = 0; k < n; ++k) {
std::cout << "Step " << k + 1 << ", q = " << q << std::endl;
if (q > 1000) {
if (cudaExprVar == nullptr) {
cudaMalloc(&cudaExprVar, 3 * m * sizeof(*exprVar));
cudaMalloc(&cudaExprNeg, 3 * m * sizeof(*exprNeg));
cudaMemcpy(cudaExprVar, exprVar, 3 * m * sizeof(*exprVar), cudaMemcpyHostToDevice);
cudaMemcpy(cudaExprNeg, exprNeg, 3 * m * sizeof(*exprNeg), cudaMemcpyHostToDevice);
}
uint32_t* cudaSet;
int* cudaFlags;
cudaMalloc(&cudaSet, 2 * q * blockCount * sizeof(*set));
cudaMalloc(&cudaFlags, 2 * q * sizeof(*flags));
cudaMemcpy(cudaSet, set, q * blockCount * sizeof(*set), cudaMemcpyHostToDevice);
cudaMemcpy(cudaSet + q * blockCount, set, q * blockCount * sizeof(*set), cudaMemcpyHostToDevice);
cudaMemcpy(cudaFlags, flags, q * sizeof(*flags), cudaMemcpyHostToDevice);
cudaMemcpy(cudaFlags + q, flags, q * sizeof(*flags), cudaMemcpyHostToDevice);
ssize_t qBlock = (q + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
BranchGPU<<<qBlock, THREADS_PER_BLOCK>>>(cudaSet, blockCount, k, q);
cudaDeviceSynchronize();
q *= 2;
qBlock = (q + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
BoundGPU<<<qBlock, THREADS_PER_BLOCK>>>(cudaSet, cudaFlags, blockCount, k, n, m, cudaExprVar, cudaExprNeg, q);
cudaDeviceSynchronize();
set = (uint32_t*)realloc(set, q * blockCount * sizeof(*set));
flags = (int*)realloc(flags, q * sizeof(*flags));
cudaMemcpy(set, cudaSet, q * blockCount * sizeof(*set), cudaMemcpyDeviceToHost);
cudaMemcpy(flags, cudaFlags, q * sizeof(*flags), cudaMemcpyDeviceToHost);
cudaFree(cudaSet);
cudaFree(cudaFlags);
} else {
set = (uint32_t*)realloc(set, 2 * q * blockCount * sizeof(*set));
flags = (int*)realloc(flags, 2 * q * sizeof(*flags));
memcpy(set + q * blockCount, set, q * blockCount * sizeof(*set));
memcpy(flags + q, flags, q * sizeof(*flags));
for (ssize_t e = 0; e < q; ++e) {
BranchCPU(e, set, blockCount, k);
}
q *= 2;
for (ssize_t e = 0; e < q; ++e) {
BoundCPU(e, set, flags, blockCount, k, n, m, exprVar, exprNeg);
}
}
for (ssize_t i = 0, j = q - 1;;) {
while (i < q && flags[i] != 0) {
++i;
}
while (j >= 0 && flags[j] == 0) {
--j;
}
if (i >= j) {
q = i;
break;
}
memcpy(set + i * blockCount, set + j * blockCount, blockCount * sizeof(*set));
std::swap(flags[i], flags[j]);
}
if (q == 0) {
break;
}
}
if (cudaExprVar != nullptr) {
cudaFree(cudaExprVar);
cudaFree(cudaExprNeg);
}
if (q == 0) {
fout << "No solution" << std::endl;
} else {
for (int i = 0; i < n; ++i) {
fout << "x_" << i + 1 << " = " <<
((set[i / BLOCK_SIZE] & (((uint32_t)1) << ((uint32_t)(i % BLOCK_SIZE)))) ? 1 : 0) << std::endl;
}
}
free(exprVar);
free(exprNeg);
free(set);
free(flags);
std::chrono::high_resolution_clock::time_point totalEnd = std::chrono::high_resolution_clock::now();
double totalTime = std::chrono::duration_cast<std::chrono::duration<double>>(totalEnd - totalStart).count();
std::cout << "Total time: " << totalTime << std::endl;
return 0;
}
|
21,099 | #include <stdio.h>
#include <cuda.h>
// Notice that this file needs to have a .cu extension for the NVCC compiler
// to understand what it is supposed to do. NVCC can compile C and C++, by
// emulating a C++ compiler. However, any code that contains GPU kernels
// must reside in a CUDA unit with .cu extension.
//-----------------------------------------------------------------------------
// TheKernel: basic kernel containing a print statement.
//-----------------------------------------------------------------------------
__global__ void TheKernel()
{
printf("This is the kernel saying hello world, from the GPU.\n");
}
//-----------------------------------------------------------------------------
// main
//-----------------------------------------------------------------------------
int main()
{
printf("This is the C layer saying hello world, from the host.\n");
// Launch the kernel
TheKernel<<<1, 1>>>();
// It appears essential to call for synchronization before finally
// exiting, lest you risk the program crashing your machine!
cudaDeviceSynchronize();
return 0;
}
|
21,100 | //Creating 4 streams, each assigns a local thread index to the array
//
#include <stdio.h>
#include <stdlib.h>
#define N 16
#define NCHUNK 2
//__device__ int *data;
__global__
void thread_multi(int t1,int *data)
{
int i=blockDim.x * blockIdx.x + threadIdx.x;
int j=threadIdx.x;
printf(" %d %d\n",data[t1+i],threadIdx.x);
}
int main()
{
int i=0;
int *data;
cudaStream_t stream[NCHUNK];
size_t size = N*NCHUNK*sizeof(int);
// cudaMalloc((void **)&d_t1, size);
// cudaMallocHost(&h_t1, size);
int *h_data=(int *)malloc(sizeof(int)*32);
for(i=0;i<32;i++)
h_data[i]=i;
cudaMalloc(&data,sizeof(int)*32);
cudaMemcpy(data,h_data,sizeof(int)*32,cudaMemcpyHostToDevice);
//for (i=0;i<N*NCHUNK;i++) {
// h_t1[i]=0;
//}
//Create 4 streams
for (i = 0; i < NCHUNK;i++) {
cudaStreamCreate(&stream[i]);
}
//4 events on each stream - Memory copy to the device, execution, memory copy to the host, stream destroyed
// for(i=0;i<NCHUNK;i++) {
// cudaMemcpyAsync(d_t1+i*N, h_t1+i*N, N*sizeof(int), cudaMemcpyHostToDevice, stream[i]);
// }
// for(i=0;i<NCHUNK;i++) {
// cudaStreamSynchronize(stream[i]);
// }
for(i=0;i<NCHUNK;i++) {
thread_multi<<<1,16,0,stream[i]>>>(i*16,data);
}
for(i=0;i<NCHUNK;i++) {
cudaStreamSynchronize(stream[i]);
}
/* for(i=0;i<NCHUNK;i++) {
cudaMemcpyAsync(h_t1+i*N, d_t1+i*N, N*sizeof(int), cudaMemcpyDeviceToHost, stream[i]);
}
for(i=0;i<NCHUNK;i++) {
cudaStreamSynchronize(stream[i]);
}*/
for (i=0; i < NCHUNK; i++) {
cudaStreamDestroy(stream[i]);
}
//Print result
/* for(i=0;i<N*NCHUNK;i++) {
printf("%d: %d\n",i, h_t1[i]);
}*/
// cudaFree(d_t1);
cudaFree(data);
printf("\nDone\n");
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.