serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
20,801 | #include <iostream>
#include <math.h>
#include <stdio.h>
__global__ void add(int n, float *x, float *y) {
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
void FillWithData(int n, float* x, float* y) {
for (int i = 0; i < n; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
}
int main(void) {
int N = 1<<20;
float *x, *y;
float *d_x, *d_y;
int size = N * sizeof(float);
x = (float*) malloc(size);
y = (float*) malloc(size);
FillWithData(N, x, y);
cudaMalloc(&d_x, size);
cudaMalloc(&d_y, size);
cudaMemcpy(d_x, x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, size, cudaMemcpyHostToDevice);
add<<<2, 256>>>(N, d_x, d_y);
cudaMemcpy(x, d_x, size, cudaMemcpyDeviceToHost);
cudaMemcpy(y, d_y, size, cudaMemcpyDeviceToHost);
int i = 0;
int sample_rate = N / 10;
for (i = 0; i < N; i=i+sample_rate) {
printf("Value y[%d] = %f\n" , i, y[i]);
}
// Free memory
free(x); free(y);
cudaFree(d_x); cudaFree(d_y);
return 0;
}
|
20,802 | #include <stdio.h>
int main()
{
/*
* Device ID is required first to query the device.
*/
int deviceId;
cudaGetDevice(&deviceId);
cudaDeviceProp props;
cudaGetDeviceProperties(&props, deviceId);
/*
* `props` now contains several properties about the current device.
*/
int computeCapabilityMajor = props.major;
int computeCapabilityMinor = props.minor;
int multiProcessorCount = props.multiProcessorCount;
int warpSize = props.warpSize;
printf("Device ID: %d\nNumber of SMs: %d\nCompute Capability Major: %d\nCompute Capability Minor: %d\nWarp Size: %d\n", deviceId, multiProcessorCount, computeCapabilityMajor, computeCapabilityMinor, warpSize);
}
|
20,803 | #include "includes.h"
__global__ void get_average(unsigned char * img, int * nz, int * average, int scale)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
//int h = width /2;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS)
{
int iw = x;
int ih = y + j;
if (img[3*(ih*width + iw)] + img[3*(ih*width + iw)+1] + img[3*(ih*width + iw)+2] > 0)
{
//nz[ih/3 * width + iw/3] += 1;
//average[3*(ih/3*width + iw/3)] += (int)img[3*(ih*width + iw)];
//average[3*(ih/3*width + iw/3)+1] += (int)img[3*(ih*width + iw)+1];
//average[3*(ih/3*width + iw/3)+2] += (int)img[3*(ih*width + iw)+2];
atomicAdd(&(nz[ih/scale * width + iw/scale]), 1);
atomicAdd(&(average[3*(ih/scale*width + iw/scale)]), (int)img[3*(ih*width + iw)]);
atomicAdd(&(average[3*(ih/scale*width + iw/scale)+1]), (int)img[3*(ih*width + iw)+1]);
atomicAdd(&(average[3*(ih/scale*width + iw/scale)+2]), (int)img[3*(ih*width + iw)+2]);
}
}
} |
20,804 | #include <iostream>
#include <iomanip>
#include <time.h>
#include <cuda_runtime_api.h>
#include <fstream>
using namespace std;
using std::ifstream;
#define BLOCK_SIZE 16
// max 40
// 32
// 25
// 20
// 16
// 10
// 8
// 4
// min 2
// Device multiplication function called by Mul()
// Compute C = A * B
// wA is the width of A
// wB is the width of B
float A[1600][1600],B[1600][1600];
__global__ void Muld(float* A, float* B, int wA, int wB, float* C)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// The element of the block sub-matrix that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B required to compute the block
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Shared memory for the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Shared memory for the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from global memory to shared memory;
// each thread loads one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < BLOCK_SIZE; ++k)
Csub += As[ty][k] * Bs[k][tx];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to global memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
// Host multiplication function
// Compute C = A * B
// hA is the height of A
// wA is the width of A
// wB is the width of B
void Mul(const float* A, const float* B, int hA, int wA, int wB,float* C)
{
int size;
// Load A and B to the device
float* Ad;
size = hA * wA * sizeof(float);
cudaMalloc((void**)&Ad, size);
cudaMemcpy(Ad, A, size, cudaMemcpyHostToDevice);
float* Bd;
size = wA * wB * sizeof(float);
cudaMalloc((void**)&Bd, size);
cudaMemcpy(Bd, B, size, cudaMemcpyHostToDevice);
// Allocate C on the device
float* Cd;
size = hA * wB * sizeof(float);
cudaMalloc((void**)&Cd, size);
// Compute the execution configuration assuming
// the matrix dimensions are multiples of BLOCK_SIZE
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(wB / dimBlock.x, hA / dimBlock.y);
// Launch the device computation
Muld<<<dimGrid, dimBlock>>>(Ad, Bd, wA, wB, Cd);
//cout << cudaGetErrorString(cudaGetLastError()) << endl;
// Read C from the device
cudaMemcpy(C, Cd, size, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(Ad);
cudaFree(Bd);
cudaFree(Cd);
}
void readFile(){
int i=0,j=0,tmp=0;
ifstream f;
int num;
bool t = true;
f.open("1");
f >> num;
tmp += 1;
while(!f.eof())
{
if(j == 1600){
j=0;
i++;
//cout << endl;
//getchar();
}
if(i == 1600){
t = false;
i = 0;
j = 0;
}
if(t){
A[i][j] = num;
}
else{
B[i][j] = num;
}
f >> num;
j++;
}
f.close();
}
int main(int argc , char* argv[]){
timespec ts, te;
float *C;
C = (float*)malloc(sizeof(float) * 1600 * 1600);
readFile();
clock_gettime(CLOCK_REALTIME, &ts);
Mul(*A,*B,1600,1600,1600,C);
clock_gettime(CLOCK_REALTIME, &te);
cout << BLOCK_SIZE << " " << (te.tv_sec-ts.tv_sec)<<"."<<abs(te.tv_nsec-ts.tv_nsec) << endl;
free(C);
return 0;
}
|
20,805 | #include <cuda.h>
#include <stdio.h>
__global__
void g_scalar_mult(float* a, float* b)
{
a[threadIdx.x] *= *b;
}
float* scalar_mult(const float scaler,
const float* vect,
unsigned int size)
{
float* cuda_vect;
float* cuda_scal;
float* answer;
answer = (float*)malloc(size * sizeof(float));
cudaMalloc((void**)&cuda_vect, 4*sizeof(float));
cudaMalloc((void**)&cuda_scal, 4*sizeof(float));
cudaMemcpy(cuda_vect,
vect,
size * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(cuda_scal,
&scaler,
size * sizeof(float),
cudaMemcpyHostToDevice);
g_scalar_mult<<<1, size>>>(cuda_vect, cuda_scal);
// side effect?
cudaMemcpy(answer,
cuda_vect,
size * sizeof(float),
cudaMemcpyDeviceToHost);
cudaFree(cuda_vect);
cudaFree(cuda_scal);
return answer;
}
int main()
{
float* answer;
float a[] = {1,2,3,4};
float scal = 10;
answer = scalar_mult(scal, a, 4);
for (int i = 0; i < 4; i++)
printf("%f\n", answer[i]);
printf("\n");
}
|
20,806 | #include "includes.h"
__global__ void swan_fast_fill_word( uint *ptr, int len ) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if( idx<len) {
ptr[idx] = 0;
}
} |
20,807 | #include "includes.h"
__global__ void sum(int *a, int *b, int *c) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < N) {
c[i] = a[i] + b[i];
i += gridDim.x * blockDim.x;
}
} |
20,808 | /***************************************************************************//**
* \file L.cu
* \author Christopher Minar (minarc@oregonstate.edu)
* \brief kernels to calculate the diffusion terms
*/
#include "L.h"
namespace kernels
{
/*
* calculates explicit diffusion terms in the middle of the domain
* param L explicit diffusion terms
* param u u velocities
* param dx distance between nodes in the x direction (measured between node sides, where u velocites are stored)
* param dy distance between nodes in the y direction (measured between node top/bot, where v velocites are stored)
* param nx number of cells in x direction
* param ny number of cells in y direction
* param nu viscosity
*/
__global__
void Lmidx(double *L, double *u, double *dx, double *dy, int nx, int ny, double nu)
{
if (threadIdx.x + blockDim.x * blockIdx.x >= (nx-1)*ny)
return;
int i = threadIdx.x + blockDim.x * blockIdx.x,
I = i % (nx-1),
J = i / (nx-1);
if (I == 0 || I == nx-2 || J == 0 || J == ny-1)
return;
L[i] = nu*(
(u[i+1] -u[i]) / (dx[I+1]*(dx[I+1]+dx[I])*0.5)//east
+(u[i-1] -u[i]) / (dx[I] *(dx[I+1]+dx[I])*0.5)//west
+(u[i+(nx-1)]-u[i]) / (dy[J] *(dy[J+1]+dy[J])*0.5)//north
+(u[i-(nx-1)]-u[i]) / (dy[J] *(dy[J-1]+dy[J])*0.5)//south
);
}
/*
* calculates explicit diffusion terms at the edge of the domain
* param L explicit diffusion terms
* param u u velocities
* param dx distance between nodes in the x direction (measured between node sides, where u velocites are stored)
* param dy distance between nodes in the y direction (measured between node top/bot, where v velocites are stored)
* param ym yminus boundary velocities
* param yp yplus boundary velocities
* param xm xminus boundary velocities
* param xp xplus boundary velocities
* param nx number of cells in x direction
* param ny number of cells in y direction
* param nu viscosity
*/
__global__
void Lbcx(double *L, double *u, double *dx, double *dy, double *ym, double *yp, double *xm, double *xp, int nx, int ny, double nu)
{
if (threadIdx.x + blockDim.x * blockIdx.x >= (nx-1)*ny)
return;
int i = threadIdx.x + blockDim.x * blockIdx.x,
I = i % (nx-1),
J = i / (nx-1);
if (I != 0 && I != nx-2 && J != 0 && J != ny-1)
return;
double temp = 0;
//East
if(I != nx-2)
temp += nu * (u[i+1] -u[i]) / (dx[I+1]*(dx[I+1]+dx[I])*0.5);
//East Boundary
else
temp += nu * (xp[J] -u[i]) / (dx[I+1]*(dx[I+1]+dx[I])*0.5);
//West
if(I != 0)
temp += nu * (u[i-1] -u[i]) / (dx[I] *(dx[I+1]+dx[I])*0.5);
//West Boundary
else
temp += nu * (xm[J] -u[i]) / (dx[I] *(dx[I+1]+dx[I])*0.5);
//North
if(J != ny-1)
temp += nu * (u[i+(nx-1)]-u[i]) / (dy[J] *(dy[J+1]+dy[J])*0.5);
//North Boundary
else
temp += nu * (2*yp[I] -2*u[i]) / (dy[J] * dy[J]);
//South
if(J != 0)
temp += nu * (u[i-(nx-1)]-u[i]) / (dy[J] *(dy[J-1]+dy[J])*0.5);
//South Boundary
else
temp += nu * (2*ym[I] -2*u[i]) / (dy[J] * dy[J]);
L[i] = temp;
}
/*
* calculates explicit diffusion terms in the middle of the domain
* param L explicit diffusion terms
* param u v velocities
* param dx distance between nodes in the x direction (measured between node sides, where u velocites are stored)
* param dy distance between nodes in the y direction (measured between node top/bot, where v velocites are stored)
* param nx number of cells in x direction
* param ny number of cells in y direction
* param nu viscosity
*/
__global__
void Lmidy(double *L, double *u, double *dx, double *dy, int nx, int ny, double nu)
{
if (threadIdx.x + blockDim.x * blockIdx.x >= nx*(ny-1))
return;
int ip = threadIdx.x + blockDim.x * blockIdx.x,
I = ip % nx,
J = ip / nx,
iv = ip + (nx-1)*ny;
if (I == 0 || I == nx-1 || J == 0 || J == ny-2)
return;
L[iv] = nu* (
(u[iv+1] -u[iv]) / (dx[I] *(dx[I]+dx[I+1])*0.5)//east
+(u[iv-1] -u[iv]) / (dx[I] *(dx[I]+dx[I-1])*0.5)//west
+(u[iv+nx] -u[iv]) / (dy[J+1]*(dy[J]+dy[J+1])*0.5)//north
+(u[iv-nx] -u[iv]) / (dy[J] *(dy[J]+dy[J+1])*0.5)//south
);
}
/*
* calculates explicit diffusion terms at the edge of the domain
* param L explicit diffusion terms
* param u v velocities
* param dx distance between nodes in the x direction (measured between node sides, where u velocites are stored)
* param dy distance between nodes in the y direction (measured between node top/bot, where v velocites are stored)
* param ym yminus boundary velocities
* param yp yplus boundary velocities
* param xm xminus boundary velocities
* param xp xplus boundary velocities
* param nx number of cells in x direction
* param ny number of cells in y direction
* param nu viscosity
*/
__global__
void Lbcy(double *L, double *u, double *dx, double *dy, double *ym, double *yp, double *xm, double *xp, int nx, int ny, double nu)
{
if (threadIdx.x + blockDim.x * blockIdx.x >= nx*(ny-1))
return;
int ip = threadIdx.x + blockDim.x * blockIdx.x,
I = ip % nx,
J = ip / nx,
iv = ip + (nx-1)*ny;
if (I != 0 && I != nx-1 && J != 0 && J != ny-2)
return;
double temp = 0;
//East
if(I != nx-1)
temp += nu * (u[iv+1] -u[iv]) / (dx[I] *(dx[I]+dx[I+1])*0.5);
//East Boundary
else
temp += nu * (2*xp[ny+J] -2*u[iv]) / (dx[I] * dx[I]);
//West
if(I != 0)
temp += nu * (u[iv-1] -u[iv]) / (dx[I] *(dx[I]+dx[I-1])*0.5);
//West Boundary
else
temp += nu * (2*xm[ny+J] -2*u[iv]) / (dx[I] *dx[I]);
//North
if(J != ny-2)
temp += nu * (u[iv+nx] -u[iv]) / (dy[J+1]*(dy[J]+dy[J+1])*0.5);
//North Boundary
else
temp += nu * (yp[nx-1+I] -u[iv]) / (dy[J+1]*(dy[J]+dy[J+1])*0.5);
//South
if(J != 0)
temp += nu * (u[iv-nx] -u[iv]) / (dy[J] *(dy[J]+dy[J+1])*0.5);
//South Boundary
else
temp += nu * (ym[nx-1+I] -u[iv]) / (dy[J] *(dy[J]+dy[J+1])*0.5);
L[iv] = temp;
}
}
|
20,809 | #include <cuda.h>
#include <cuda_runtime.h>
#define BLOCKSIZE 1024
__device__ float sigmoid(float x) {
return 1.0/(1+expf(-x));
}
__global__ void gelu_fwd_cuda(float* input, float* ret,
int64_t size) {
int64_t idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx < size) {
ret[idx] = input[idx]*sigmoid(1.702*input[idx]);
}
}
__global__ void gelu_bwd_cuda(float* grad_out, float* input,
float* ret, int64_t size) {
int64_t idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx < size) {
float tmp = sigmoid(1.702*input[idx]);
ret[idx] = grad_out[idx]*(tmp + 1.702*input[idx]*tmp*(1-tmp));
}
}
__host__ void gelu_fwd_interface(float* input, float* ret, int64_t size) {
int64_t nblock = (size + BLOCKSIZE - 1)/BLOCKSIZE;
gelu_fwd_cuda<<<nblock, BLOCKSIZE>>>(input, ret, size);
}
__host__ void gelu_bwd_interface(float* grad_out, float* input, float* ret,
int64_t size) {
int64_t nblock = (size + BLOCKSIZE - 1)/BLOCKSIZE;
gelu_bwd_cuda<<<nblock, BLOCKSIZE>>>(grad_out, input,
ret, size);
}
|
20,810 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand_kernel.h>
#include <math_constants.h>
extern "C"
{
__global__ void
rtruncnorm_kernel(float *vals, int n,
float *mu, float *sigma,
float *lo, float *hi,
int mu_len, int sigma_len,
int lo_len, int hi_len,
int maxtries)
{
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
// Setup the RNG:
// Sample:
return;
}
} // END extern "C"
|
20,811 |
#include "device.cuh"
__global__
void fill_array(double *d_A){
for (int i=0; i<1000; i++){
d_A[i] = i;
}
}
__global__
void fill_c_array(thrust::complex<double> *d_A){
for (int i=0; i<1000; i++){
d_A[i] = i;
}
}
thrust::device_vector<thrust::complex<double>> d_vec_A;
void get_cuda_array_ptr(double **array_ptr){
double *d_A = NULL;
cudaMalloc ((void**)&d_A, sizeof(double) * 1000);
fill_array<<<1,1>>>(d_A);
*array_ptr = d_A;
}
void get_cuda_c_array_ptr(std::complex<double> **array_ptr){
thrust::complex<double> *d_A = NULL;
cudaMalloc ((void**)&d_A, sizeof(thrust::complex<double>) * 1000);
fill_c_array<<<1,1>>>(d_A);
*array_ptr = reinterpret_cast<std::complex<double>*>(d_A);
}
void get_cuda_thrust_vector_ptr(std::complex<double> **array_ptr){
d_vec_A.resize(1000);
fill_c_array<<<1,1>>>(thrust::raw_pointer_cast(&d_vec_A[0]));
*array_ptr = reinterpret_cast<std::complex<double>*>( thrust::raw_pointer_cast(&d_vec_A[0]) );
}
void use_cuda_array_and_check(double *array_ptr){
double *h_A = new double [1000];
cudaMemcpy(h_A, array_ptr, sizeof(double) * 1000, cudaMemcpyDeviceToHost);
for (int i=0; i<10; i++){
std::cout << h_A[i] << std::endl;
}
}
void use_cuda_c_array_and_check(std::complex<double> *array_ptr){
std::complex<double> *h_A = new std::complex<double> [1000];
cudaMemcpy(h_A, array_ptr, sizeof(std::complex<double>) * 1000, cudaMemcpyDeviceToHost);
for (int i=0; i<10; i++){
std::cout << h_A[i] << std::endl;
}
}
|
20,812 | #include <stdio.h>
#include <math.h>
#include <malloc.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
//#define M 12
double* polyfit(double* x, double* y, int n, int M)
{
int m;
m = n + 1;
double **a = (double **)malloc(sizeof(double*)*m);
for (int i = 0; i < m; i++)
{
a[i] = (double*)malloc(m * sizeof(double));
}
double *p = (double*)malloc(m * sizeof(double));
double *b = (double*)malloc(m * sizeof(double));
double *atemp = (double*)malloc(2 * m * sizeof(double));
for (int i = 0; i < m; i++)
{
b[i] = 0;
atemp[2 * i] = 0;
atemp[2 * i + 1] = 0;
}
// Էϵb[]
for (int i = 0; i < M; i++)
{
for (int k = 1; k <= n * 2; k++)
{
atemp[k] += pow(x[i], k);
}
for (int h = 0; h < n + 1; h++)
{
b[h] += pow(x[i], h)*y[i];
}
}
atemp[0] = M;
for (int i = 0; i < m; i++)
{
int k = i;
for (int g = 0; g < m; g++)
{
a[i][g] = atemp[k++];
}
}
//ⷽ a*p = b
//
for (int i = 0; i < n; i++)
{
if (a[i][i] == 0)
{
double temp = a[i][i];
int idx = i;
while (temp == 0)
{
temp = a[idx + 1][i];
idx = idx + 1;
}
//iк͵idx
double change, change_b;
for (int g = 0; g < m; g++)
{
change = a[i][g];
a[i][g] = a[idx][g];
a[idx][g] = change;
}
change_b = b[i];
b[i] = b[idx];
b[idx] = change_b;
}
if (a[i][i] != 1)
{
double temp = a[i][i];
for (int g = 0; g < m; g++)
{
a[i][g] = a[i][g] / temp;
}
b[i] = b[i] / temp;
}
for (int k = i + 1; k < m; k++)
{
if (a[k][i] != 0)
{
double temp = -a[k][i];
for (int p = 0; p < m; p++)
{
a[k][p] = a[k][p] + a[i][p] * temp;
}
b[k] = b[k] + b[i] * temp;
}
}
}
if (a[n][n] != 1)
{
double temp = a[n][n];
a[n][n] = 1;
b[n] = b[n] / temp;
}
//Ԫ
for (int i = n; i > 0; i--)
{
for (int g = 0; g < i; g++)
{
double temp = -a[g][i];
b[g] = b[g] + b[i] * temp;
}
}
for (int i = 0; i < m; i++)
{
p[i] = b[n - i];
}
free(b);
free(atemp);
for (int i = 0; i < m; i++)
free(a[i]);/*ͷ*/
free(a);/*ͷ*/
return p;
free(p);
} |
20,813 | #include "includes.h"
__global__ void dev_get_gravity_at_point( float eps2, float *eps, float *xh, float *yh, float *zh, float *xt, float *yt, float *zt, float *ax, float *ay, float *az, int n, float *field_m, float *fxh, float *fyh, float *fzh, float *fxt, float *fyt, float *fzt, int n_field) {
float dx, dy, dz, r2, tmp, dr2, eps2_total;
for (int tid=threadIdx.x + blockIdx.x*blockDim.x; tid < n; tid += blockDim.x*gridDim.x){
eps2_total = eps2 + eps[tid]*eps[tid];
ax[tid] = 0;
ay[tid] = 0;
az[tid] = 0;
for (int i=0; i < n_field; i++){
dx = (fxh[i] - xh[tid]) + (fxt[i] - xt[tid]);
dy = (fyh[i] - yh[tid]) + (fyt[i] - yt[tid]);
dz = (fzh[i] - zh[tid]) + (fzt[i] - zt[tid]);
dr2 = dx*dx + dy*dy + dz*dz;
if (dr2 > 0) {
r2 = eps2_total + dr2;
tmp = field_m[i] / (r2 * sqrt(r2));
ax[tid] += tmp * dx;
ay[tid] += tmp * dy;
az[tid] += tmp * dz;
}
}
}
} |
20,814 | #include <stdio.h>
#include <stdlib.h>
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.width + col)
typedef struct
{
int width;
int height;
float *elements;
} Matrix;
// Thread block size
#define BLOCK_SIZE 16
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
cudaSetDevice(0);
cudaDeviceSynchronize();
size_t available, total;
cudaMemGetInfo(&available, &total);
// printf("Mem total: %ld Bytes\nMem available: %ld Bytes\n", available, total);
// Load A and B to device memory
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
// printf("size of A: %ld\n", size);
cudaMalloc(&d_A.elements, size);
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
fprintf(stderr, "ERROR: allocation A %s\n", cudaGetErrorString(error));
exit(-1);
}
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
error = cudaGetLastError();
if (error != cudaSuccess)
{
fprintf(stderr, "ERROR: allocation B %s\n", cudaGetErrorString(error));
exit(-1);
}
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
error = cudaGetLastError();
cudaMalloc(&d_C.elements, size);
if (error != cudaSuccess)
{
fprintf(stderr, "ERROR: allocation C %s\n", cudaGetErrorString(error));
exit(-1);
}
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
cudaDeviceSynchronize();
error = cudaGetLastError();
if (error != cudaSuccess)
{
fprintf(stderr, "ERROR: calculation error %s\n", cudaGetErrorString(error));
exit(-1);
}
// Read C from device memory
cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
fprintf(stderr, "ERROR: copying C %s\n", cudaGetErrorString(error));
exit(-1);
}
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.width; ++e)
Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col];
C.elements[row * C.width + col] = Cvalue;
}
int myrand()
{
return rand() / (RAND_MAX / 10);
}
int main()
{ // A x B
srand(0);
Matrix A, B, C;
A.height = 1 * BLOCK_SIZE;
A.width = 1 * BLOCK_SIZE; // hB = wA
B.height = A.width;
B.width = 1 * BLOCK_SIZE;
C.height = A.height; // hC = hA
C.width = B.width; // wC = wB
A.elements = (float *)malloc(A.height * A.width * sizeof(float));
B.elements = (float *)malloc(B.height * B.width * sizeof(float));
C.elements = (float *)malloc(C.height * C.width * sizeof(float));
printf("Content of A: \n");
for (int i = 0; i < A.height; i++)
{
for (int j = 0; j < A.width; j++)
{
A.elements[i * A.height + j] = myrand();
printf("%2d", (int)A.elements[i * A.height + j]);
}
printf("\n");
}
printf("\n\nContent of B: \n");
for (int i = 0; i < B.height; i++)
{
for (int j = 0; j < B.width; j++)
{
B.elements[i * B.height + j] = myrand();
printf("%2d", (int)B.elements[i * B.height + j]);
}
printf("\n");
}
MatMul(A, B, C);
printf("\n\nContent of C: \n");
for (int i = 0; i < C.height; i++)
{
for (int j = 0; j < C.width; j++)
{
printf("%4d", (int)C.elements[i * C.height + j]);
}
printf("\n");
}
return 0;
}
|
20,815 | extern "C"
__global__ void add(int n, float *a, float *b, float *sum)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
sum[i] = a[i] + b[i];
}
}
|
20,816 | #define NTHREADS 16
__global__ void scale(float knot_max, int nx, int nsamples,
float * x, int pitch_x)
{
int
col_idx = blockDim.x * blockIdx.x + threadIdx.x;
if(col_idx >= nx) return;
float
min, max,
* col = x + col_idx * pitch_x;
// find the min and the max
min = max = col[0];
for(int i = 1; i < nsamples; i++) {
if(col[i] < min) min = col[i];
if(col[i] > max) max = col[i];
}
float delta = max - min;
for(int i = 0; i < nsamples; i++)
col[i] = (knot_max * (col[i] - min)) / delta;
}
__device__ float do_fraction(float numer, float denom) {
float result = 0.f;
if((numer == denom) && (numer != 0.f))
result = 1.f;
else if(denom != 0.f)
result = numer / denom;
return result;
}
// bins must be initialized to zero before calling get_bin_scores
__global__ void get_bin_scores(int nbins, int order,
int nknots, float * knots, int nsamples,
int nx, float * x, int pitch_x,
float * bins, int pitch_bins)
{
int
col_x = blockDim.x * blockIdx.x + threadIdx.x;
if(col_x >= nx)
return;
float
ld, rd, z,
term1, term2,
* in_col = x + col_x * pitch_x,
* bin_col = bins + col_x * pitch_bins;
int i0;
for(int k = 0; k < nsamples; k++, bin_col += nbins) {
z = in_col[k];
i0 = (int)floorf(z) + order - 1;
if(i0 >= nbins)
i0 = nbins - 1;
bin_col[i0] = 1.f;
for(int i = 2; i <= order; i++) {
for(int j = i0 - i + 1; j <= i0; j++) {
rd = do_fraction(knots[j + i] - z, knots[j + i] - knots[j + 1]);
if((j < 0) || (j >= nbins) || (j >= nknots) || (j + i - 1 < 0) || (j > nknots))
term1 = 0.f;
else {
ld = do_fraction(z - knots[j],
knots[j + i - 1] - knots[j]);
term1 = ld * bin_col[j];
}
if((j + 1 < 0) || (j + 1 >= nbins) || (j + 1 >= nknots) || (j + i < 0) || (j + i >= nknots))
term2 = 0.f;
else {
rd = do_fraction(knots[j + i] - z,
knots[j + i] - knots[j + 1]);
term2 = rd * bin_col[j + 1];
}
bin_col[j] = term1 + term2;
}
}
}
}
__global__ void get_entropy(int nbins, int nsamples, int nx,
float * bin_scores, int pitch_bin_scores, float * entropies)
{
int
col_x = blockDim.x * blockIdx.x + threadIdx.x;
if(col_x >= nx)
return;
float
* in_col = bin_scores + col_x * pitch_bin_scores,
entropy = 0.f, prob, logp;
for(int i = 0; i < nbins; i++) {
prob = 0.f;
for(int j = 0; j < nsamples; j++)
prob += in_col[j * nbins + i];
prob /= (double) nsamples;
if(prob <= 0.f)
logp = 0.f;
else
logp = __log2f(prob);
entropy += prob * logp;
}
entropies[col_x] = -entropy;
}
__global__ void get_mi(int nbins, int nsamples,
int nx, float * x_bin_scores, int pitch_x_bin_scores,
float * entropies_x,
int ny, float * y_bin_scores, int pitch_y_bin_scores,
float * entropies_y,
float * mis, int pitch_mis)
{
int
col_x = blockDim.x * blockIdx.x + threadIdx.x,
col_y = blockDim.y * blockIdx.y + threadIdx.y;
if((col_x >= nx) || (col_y >= ny))
return;
float
prob, logp, mi = 0.f,
* x_bins = x_bin_scores + col_x * pitch_x_bin_scores,
* y_bins = y_bin_scores + col_y * pitch_y_bin_scores;
// calculate joint entropy
for(int i = 0; i < nbins; i++) {
for(int j = 0; j < nbins; j++) {
prob = 0.f;
for(int k = 0; k < nsamples; k++)
prob += x_bins[k * nbins + i] * y_bins[k * nbins + j];
prob /= (float)nsamples;
if(prob <= 0.f)
logp = 0.f;
else
logp = __log2f(prob);
mi += prob * logp;
}
}
// calculate mi from entropies
mi += entropies_x[col_x] + entropies_y[col_y];
(mis + col_y * pitch_mis)[col_x] = mi;
}
|
20,817 | #include <cuda.h>
#include <stdio.h>
#include <iostream>
#include <string>
using namespace std;
int main() {
int driver_version = 0, runtime_version = 0;
cudaDriverGetVersion(&driver_version);
cudaRuntimeGetVersion(&runtime_version);
printf("Driver Version: %d\n Runtime Version: %d\n", \
driver_version, runtime_version);
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
int cudaCores = 0;
int SM = prop.multiProcessorCount;
int major = prop.major;
int minor = prop.minor;
string arch = "";
switch (major) {
case 1:
arch = "TESLA";
cudaCores = 8;
break;
case 2:
arch = "FERMI";
if (minor == 0)
cudaCores = 32;
else
cudaCores = 48;
break;
case 3:
arch = "KEPLER";
cudaCores = 192;
break;
case 5:
arch = "MAXWELL";
cudaCores = 128;
break;
case 6:
arch = "PASCAL";
if ((minor == 1) || (minor == 2)) cudaCores = 128;
else if (minor == 0) cudaCores = SM * 64;
else printf("Unknown device type\n");
break;
case 7:
if ((minor == 0) || (minor == 2)) {
arch = "VOLTA";
cudaCores = 384;
//tensorCores = 48;
}
if (minor == 5) arch = "TURING";
if ((minor == 0) || (minor == 5)) cudaCores = 64;
else printf("Unknown device type\n");
break;
case 8:
arch = "AMPERE";
if (minor == 0) cudaCores = 64;
else printf("Unknown device type\n");
break;
default:
//ARQUITECTURA DESCONOCIDA
cudaCores = 0;
printf("!!!!!dispositivo desconocido!!!!!\n");
}
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
cout << " Architecture: " << arch << endl;
printf(" Compute Capability: %d.%d\n", major, minor);
printf(" MultiProccessors: %d\n", SM);
printf(" CUDA Cores (%dx%d): %d\n", cudaCores, SM, cudaCores*SM);
printf(" GlobalMemory (total): %zu MiB\n", prop.totalGlobalMem/(1024*1024));
printf(" ConstMemory (total): %zu KiB\n", prop.totalConstMem/1024);
printf(" sharedMemPerMultiprocessor: %zu\n", prop.sharedMemPerMultiprocessor);
printf(" regsPerMultiprocessor: %d\n", prop.regsPerMultiprocessor);
printf(" maxThreadsPerMultiProcessor: %d\n", prop.maxThreadsPerMultiProcessor);
printf(" sharedMemPerBlock: %zu\n", prop.sharedMemPerBlock);
printf(" regsPerBlock: %d\n", prop.regsPerBlock);
printf(" maxThreadsPerBlock: %d\n", prop.maxThreadsPerBlock);
printf(" x = %d\n", prop.maxThreadsDim[0]);
printf(" y = %d\n", prop.maxThreadsDim[1]);
printf(" z = %d\n", prop.maxThreadsDim[2]);
printf(" maxThreadsDim: %d\n", prop.maxThreadsDim[3]);
printf(" maxGridSize: %d\n", prop.maxGridSize[3]);
printf(" x = %d\n", prop.maxGridSize[0]);
printf(" y = %d\n", prop.maxGridSize[1]);
printf(" z = %d\n", prop.maxGridSize[2]);
printf(" warpSize: %d\n", prop.warpSize);
printf(" memPitch: %d\n", prop.memPitch);
printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
|
20,818 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define SIZE 50000
void printArr( int arr[], int n )
{
int i;
for ( i = 0; i < n; ++i )
printf( "%d ", arr[i] );
}
__device__ int d_size;
__global__ void partition (int *arr, int *arr_l, int *arr_h, int n)
{
int z = blockIdx.x*blockDim.x+threadIdx.x;
d_size = 0;
__syncthreads();
if (z<n)
{
int h = arr_h[z];
int l = arr_l[z];
int x = arr[h];
int i = (l - 1);
int temp;
for (int j = l; j <= h- 1; j++)
{
if (arr[j] <= x)
{
i++;
temp = arr[i];
arr[i] = arr[j];
arr[j] = temp;
}
}
temp = arr[i+1];
arr[i+1] = arr[h];
arr[h] = temp;
int p = (i + 1);
if (p-1 > l)
{
int ind = atomicAdd(&d_size, 1);
arr_l[ind] = l;
arr_h[ind] = p-1;
}
if ( p+1 < h )
{
int ind = atomicAdd(&d_size, 1);
arr_l[ind] = p+1;
arr_h[ind] = h;
}
}
}
void quickSortIterative (int arr[], int l, int h)
{
int lstack[ h - l + 1 ], hstack[ h - l + 1];
int top = -1, *d_d, *d_l, *d_h;
lstack[ ++top ] = l;
hstack[ top ] = h;
cudaMalloc(&d_d, (h-l+1)*sizeof(int));
cudaMemcpy(d_d, arr,(h-l+1)*sizeof(int),cudaMemcpyHostToDevice);
cudaMalloc(&d_l, (h-l+1)*sizeof(int));
cudaMemcpy(d_l, lstack,(h-l+1)*sizeof(int),cudaMemcpyHostToDevice);
cudaMalloc(&d_h, (h-l+1)*sizeof(int));
cudaMemcpy(d_h, hstack,(h-l+1)*sizeof(int),cudaMemcpyHostToDevice);
int n_t = 1;
int n_b = 1;
int n_i = 1;
while ( n_i > 0 )
{
partition<<<n_b,n_t>>>( d_d, d_l, d_h, n_i);
int answer;
cudaMemcpyFromSymbol(&answer, d_size, sizeof(int), 0, cudaMemcpyDeviceToHost);
if (answer < 1024)
{
n_t = answer;
}
else
{
n_t = 1024;
n_b = answer/n_t + (answer%n_t==0?0:1);
}
n_i = answer;
cudaMemcpy(arr, d_d,(h-l+1)*sizeof(int),cudaMemcpyDeviceToHost);
}
}
int main(int argc, char **argv) {
int *arr;
int numElements;
FILE *inp1 = fopen(argv[1], "r");
fscanf(inp1, "%d", &numElements);
printf("\nInput length = %d\n",numElements);
arr= new int[numElements];
for(int i = 0; i < numElements; ++i){
fscanf(inp1, "%d", &arr[i]);
}
/* printf("\nInput\n");
for(int i=0;i<numElements;i++){
printf("%d ",arr[i]);
}
*/
int start_s=clock();
quickSortIterative( arr, 0, numElements);
int stop_s=clock();
FILE *op = fopen(argv[2], "r");
fscanf(op, "%d", &numElements);
int *output;
output=new int[numElements];
for(int i = 0; i < numElements; ++i){
fscanf(op, "%d", &output[i]);
}
int flag=0;
printf("\n");
for(int i=0;i<numElements;i++){
if(output[i]!=arr[i+1]){
printf("\nSolution wrong Expecting : %d but got : %d\n",output[i],arr[i]);
flag=1;
}
}
if(flag==0){
printf("\nSolution is Correct !!!\n");
printf("\nTime : %f s \n",(stop_s-start_s)/double(CLOCKS_PER_SEC));
}
fclose(op);
fclose(inp1);
//printf("\nOutput\n");
//printArr( arr, numElements);
return 0;
}
|
20,819 | #include <stdio.h>
__global__ void onetoten() {
__shared__ unsigned int n;
n = 0;
__syncthreads();
while (n < 10) {
int oldn = atomicInc(&n, 100);
if (oldn % 3 == threadIdx.x) {
printf("%d: %d\n", threadIdx.x, oldn);
}
}
}
__global__ void onetoten4() {
__shared__ unsigned int n;
n = 0;
__syncthreads();
while (n < 10) {
int oldn = atomicInc(&n, 100);
if (oldn % 3 == threadIdx.x) {
printf("%d: %d\n", threadIdx.x, oldn);
}
}
}
__device__ volatile int n;
__global__ void onetoten3() {
n = 0;
__syncthreads();
while (n < 10) {
if (n % 3 == threadIdx.x) {
printf("%d: %d\n", threadIdx.x, n);
++n;
}
}
}
__global__ void onetoten2() {
volatile __shared__ int n;
n = 0;
__syncthreads();
while (n < 10) {
if (n % 3 == threadIdx.x) {
printf("%d: %d\n", threadIdx.x, n);
++n;
}
}
}
__global__ void onetoten1() {
__shared__ int n;
n = 0;
__syncthreads();
while (n < 10) {
if (n % 3 == threadIdx.x) {
printf("%d: %d\n", threadIdx.x, n);
++n;
}
__syncthreads();
}
}
__global__ void onetoten0() {
for (int ii = 0; ii < 10; ++ii) {
if (ii % 3 == threadIdx.x) {
printf("%d: %d\n", threadIdx.x, ii);
}
}
}
int main() {
onetoten<<<1, 3>>>();
cudaDeviceSynchronize();
return 0;
}
|
20,820 | //
// Created by songzeceng on 2020/11/26.
//
#include "stdio.h"
#include "cuda_runtime.h"
#define N 64
#define TPB 32
__device__ float scale(int i, int n) {
return ((float ) i) / (n - 1);
}
__device__ float distance(float x1, float x2) {
return sqrt((x2 - x1) * (x2 - x1));
}
__global__ void distanceKernel(float *d_out, float ref, int len) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
float x = scale(i, len);
d_out[i] = distance(x, ref);
}
int main() {
float ref = 0.5f;
float *d_out;
float *h_out = (float *) malloc(N * sizeof(float ));
cudaMalloc(&d_out, N * sizeof(float ));
distanceKernel<<<N / TPB, TPB>>>(d_out, ref, N);
cudaMemcpy(h_out, d_out, N * sizeof(float ), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
printf("%.2f\t", h_out[i]);
}
printf("\n");
free(h_out);
cudaFree(d_out);
return 0;
}
|
20,821 | #include "includes.h"
__global__ void add(int *a, int *b, int *c)
{
//blockIdx is the value of the block index for whichever block is running the code
int tid = blockIdx.x;//handle the data at this index
//blockIdx has 2 dimensions; x and y. We only need one dimension
if(tid < N)
c[tid] = a[tid] + b[tid];
} |
20,822 | #include "includes.h"
__global__ void sobelFilterShared3(unsigned char* g_DataIn, unsigned char * g_DataOut, unsigned int width, unsigned int height){
__shared__ char sharedMem[BLOCK_HEIGHT*BLOCK_WIDTH];
int x = blockIdx.x * TILE_WIDTH + threadIdx.x - FILTER_RADIUS;
int y = blockIdx.y * TILE_HEIGHT + threadIdx.y - FILTER_RADIUS;
//Clamp to the center
x = max(FILTER_RADIUS, x);
x = min(x, width - FILTER_RADIUS - 1);
y = max(FILTER_RADIUS, y);
y = min(y, height - FILTER_RADIUS - 1);
unsigned int index = y * width + x;
unsigned int sharedIndex = threadIdx.y * blockDim.y + threadIdx.x;
sharedMem[sharedIndex] = g_DataIn[index];
__syncthreads();
if( threadIdx.x >= FILTER_RADIUS && threadIdx.x < BLOCK_WIDTH - FILTER_RADIUS
&& threadIdx.y >= FILTER_RADIUS && threadIdx.y < BLOCK_HEIGHT - FILTER_RADIUS)
{
int sum = 0;
for(int dy = -FILTER_RADIUS; dy <= FILTER_RADIUS; ++dy)
for(int dx = -FILTER_RADIUS; dx <= FILTER_RADIUS; ++dx)
{
int pixelValue = (int)(sharedMem[sharedIndex + (dy * blockDim.x + dx)]);
sum += pixelValue;
}
g_DataOut[index] = (unsigned char)(sum / FILTER_AREA);
}
} |
20,823 | /*
29/12/2019
hmhuan-1612858
nnkhai-1612909
*/
#include <stdio.h>
#include <stdint.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
struct GpuTimer
{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start()
{
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
}
void Stop()
{
cudaEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
void sortByHost(const uint32_t * in, int n,
uint32_t * out,
int nBits)
{
int nBins = 1 << nBits; // 2^nBits
int * hist = (int *)malloc(nBins * sizeof(int));
int * histScan = (int *)malloc(nBins * sizeof(int));
uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t));
memcpy(src, in, n * sizeof(uint32_t));
uint32_t * originalSrc = src; // Use originalSrc to free memory later
uint32_t * dst = out;
for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits)
{
// TODO: Compute "hist" of the current digit
memset(hist, 0, nBins * sizeof(int));
for (int i = 0; i < n; i++)
{
int bin = (src[i] >> bit) & (nBins - 1);
hist[bin]++;
}
// TODO: Scan "hist" (exclusively) and save the result to "histScan"
histScan[0] = 0;
for (int bin = 1; bin < nBins; bin++)
histScan[bin] = histScan[bin - 1] + hist[bin - 1];
// TODO: From "histScan", scatter elements in "src" to correct locations in "dst"
for (int i = 0; i < n; i++)
{
int bin = (src[i] >> bit) & (nBins - 1);
dst[histScan[bin]] = src[i];
histScan[bin]++;
}
// TODO: Swap "src" and "dst"
uint32_t * temp = src;
src = dst;
dst = temp;
}
// TODO: Copy result to "out"
memcpy(out, src, n * sizeof(uint32_t));
// Free memories
free(hist);
free(histScan);
free(originalSrc);
}
void sortRadixBase04(const uint32_t * in, int n, uint32_t * out, int nBits, int * blockSizes)
{
dim3 blkSize1(blockSizes[0]); // block size for histogram kernel
dim3 blkSize2(blockSizes[1]); // block size for scan kernel
dim3 gridSize((n - 1) / blkSize1.x + 1); // grid size for histogram kernel
// TODO
int nBins = 1 << nBits; // 2^nBits
int * hist = (int *)malloc(nBins * gridSize.x * sizeof(int));
int *histScan = (int * )malloc(nBins * gridSize.x * sizeof(int));
uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t));
memcpy(src, in, n * sizeof(uint32_t));
uint32_t * originalSrc = src; // Use originalSrc to free memory later
uint32_t * dst = out;
int nHist = nBins * gridSize.x;
for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits)
{
// TODO: Compute "hist" of the current digit
memset(hist, 0, nHist * sizeof(int));
for (int i = 0; i < n; i++)
{
int bin = (src[i] >> bit) & (nBins - 1);
hist[bin * gridSize.x + i / blkSize1.x]++;
}
// TODO: Exclusive scan
histScan[0] = 0;
for (int i = 1; i < nHist; i++)
histScan[i] = histScan[i - 1] + hist[i - 1];
// TODO: Scatter
for (int i = 0; i < n ; i++)
{
int bin = i / blkSize1.x + ((src[i] >> bit) & (nBins - 1)) * gridSize.x;
dst[histScan[bin]] = src[i];
histScan[bin]++;
}
// TODO: Swap "src" and "dst"
uint32_t * temp = src;
src = dst;
dst = temp;
}
// TODO: Copy result to "out"
memcpy(out, src, n * sizeof(uint32_t));
// Free memories
free(hist);
free(histScan);
free(originalSrc);
}
// histogram kernel
__global__ void computeHistKernel(uint32_t * in, int n, int * hist, int nBins, int bit)
{
// Each block computes its local hist using atomic on SMEM
extern __shared__ int s_bin[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int delta = (nBins - 1) / blockDim.x + 1;
for (int j = 0; j < delta; j++)
{
int id = threadIdx.x + j * blockDim.x;
if (id < nBins)
s_bin[id] = 0;
}
__syncthreads();
if (i < n)
{
int bin = (in[i] >> bit) & (nBins - 1);
atomicAdd(&s_bin[bin], 1);
}
__syncthreads();
for (int j = 0; j < delta; j++)
{
int id = threadIdx.x + j * blockDim.x;
if (id < nBins)
hist[id * gridDim.x + blockIdx.x] += s_bin[id];
}
}
__global__ void scanBlkKernel(int * in, int n, int * out, int * blkSums, int mode = 1)
{
extern __shared__ int s_data[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i > 0 && i < n)
s_data[blockDim.x - 1 - threadIdx.x] = in[i - 1];
else
s_data[blockDim.x - 1 - threadIdx.x] = 0;
__syncthreads();
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
int val = 0;
if (threadIdx.x < blockDim.x - stride)
val = s_data[threadIdx.x + stride];
__syncthreads();
s_data[threadIdx.x] += val;
__syncthreads();
}
if (i < n)
out[i] = s_data[blockDim.x - 1 - threadIdx.x];
if (blkSums != NULL)
blkSums[blockIdx.x] = s_data[0];
}
__global__ void addBlkSums(int * in, int n, int* blkSums)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n && blockIdx.x > 0)
in[i] += blkSums[blockIdx.x - 1];
}
__global__ void Scatter(uint32_t * in, int n, int nBits, int bit, int nBins, int *histScan, uint32_t * out)
{
extern __shared__ int s_data[];
int * s_in = s_data;
int * s_hist = (int *)&s_in[blockDim.x];
int *dst = (int *)&s_hist[blockDim.x];
int *dst_ori = (int *)&dst[blockDim.x];
int *startIndex = (int *)&dst_ori[blockDim.x]; // Cấp phát nBins
int * scan = (int *)&startIndex[nBins];
int * hist = (int *)&scan[blockDim.x];
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n)
{
s_in[threadIdx.x] = in[id];
s_hist[threadIdx.x] = (s_in[threadIdx.x] >> bit) & (nBins - 1);
}
else
s_hist[threadIdx.x] = nBins - 1;
scan[threadIdx.x] = 0;
__syncthreads();
// TODO: B1 - sort radix with k = 1
for (int b = 0; b < nBits; b++)
{
// compute hist
int _hist = s_hist[threadIdx.x];
int _in = s_in[threadIdx.x];
int _bin = (_hist >> b) & 1;
hist[threadIdx.x] = _bin;
if (threadIdx.x < blockDim.x - 1)
scan[threadIdx.x + 1] = _bin;
__syncthreads();
int _last_hist = hist[blockDim.x - 1];
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
int val = 0;
if (threadIdx.x >= stride)
val = scan[threadIdx.x - stride];
__syncthreads();
scan[threadIdx.x] += val;
__syncthreads();
}
__syncthreads();
// scatter
int scan_ = scan[threadIdx.x];
int nZeros = blockDim.x - scan[blockDim.x - 1] - _last_hist;//hist[blockDim.x - 1];
int rank = 0;
if (_bin == 0)
rank = threadIdx.x - scan_;//scan[threadIdx.x];
else
rank = nZeros + scan_;//scan[threadIdx.x];
dst[rank] = _hist;//s_hist[threadIdx.x];
dst_ori[rank] = _in;//s_in[threadIdx.x];
__syncthreads();
// copy or swap
s_hist[threadIdx.x] = dst[threadIdx.x];
s_in[threadIdx.x] = dst_ori[threadIdx.x];
}
int _hist = s_hist[threadIdx.x];
int _in = s_in[threadIdx.x];
__syncthreads();
// TODO: B2 + B3
if (threadIdx.x == 0)
startIndex[_hist] = 0;
else
{
if (_hist != s_hist[threadIdx.x - 1])
startIndex[_hist] = threadIdx.x;
}
__syncthreads();
// TODO: B4 real scatter
if (id < n)
{
int preRank = threadIdx.x - startIndex[_hist];
int bin = ((_in >> bit) & (nBins - 1));
int scan = histScan[bin * gridDim.x + blockIdx.x];
out[scan + preRank] = _in;
}
}
void sortRadixBase04_device(const uint32_t * in, int n, uint32_t * out, int nBits, int * blockSizes)
{
int nBins = 1 << nBits;
dim3 blkSize1(blockSizes[0]); // block size for histogram kernel
dim3 blkSize2(blockSizes[1]); // block size for scan kernel
dim3 gridSize1((n - 1) / blkSize1.x + 1); // grid size for histogram kernel
dim3 gridSize2((nBins * gridSize1.x - 1) / blkSize2.x + 1);
int * blkSums = (int *)malloc(gridSize2.x * sizeof(int));
uint32_t * d_src, *d_dst;
int *d_scan, *d_blkSums;
CHECK(cudaMalloc(&d_src, n * sizeof(uint32_t)));
CHECK(cudaMalloc(&d_dst, n * sizeof(uint32_t)));
CHECK(cudaMalloc(&d_scan, nBins * gridSize1.x * sizeof(int)));
CHECK(cudaMalloc(&d_blkSums, gridSize2.x * sizeof(int)));
CHECK(cudaMemcpy(d_src, in, n * sizeof(uint32_t), cudaMemcpyHostToDevice));
size_t sMemSize2 = blkSize2.x * sizeof(int);
for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits)
{
// TODO: Compute "hist" of the current digit
CHECK(cudaMemset(d_scan, 0, nBins * gridSize1.x * sizeof(int)));
computeHistKernel<<<gridSize1, blkSize1, nBins * sizeof(int)>>>(d_src, n, d_scan, nBins, bit);
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
// TODO: Scan
scanBlkKernel<<<gridSize2, blkSize2, sMemSize2>>>(d_scan, nBins * gridSize1.x, d_scan, d_blkSums);
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(blkSums, d_blkSums, gridSize2.x * sizeof(int), cudaMemcpyDeviceToHost));
for (int i = 1; i < gridSize2.x; i++)
blkSums[i] += blkSums[i - 1];
CHECK(cudaMemcpy(d_blkSums, blkSums, gridSize2.x * sizeof(int), cudaMemcpyHostToDevice));
addBlkSums<<<gridSize2, blkSize2>>>(d_scan, nBins * gridSize1.x, d_blkSums);
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
// TODO: Scatter
Scatter<<<gridSize1, blkSize1, (blkSize1.x * 6 + nBins) * sizeof(int)>>>(d_src, n, nBits, bit, nBins, d_scan, d_dst);
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
// TODO: Swap "src" and "dst"
uint32_t * temp = d_src;
d_src = d_dst;
d_dst = temp;
}
// TODO: Copy result to "out"
CHECK(cudaMemcpy(out, d_src, n * sizeof(uint32_t), cudaMemcpyDeviceToHost));
// Free memories
free(blkSums);
CHECK(cudaFree(d_src));
CHECK(cudaFree(d_dst));
CHECK(cudaFree(d_scan));
CHECK(cudaFree(d_blkSums));
}
void sortByDevice_thrust(const uint32_t * in, int n, uint32_t * out)
{
// TODO
thrust::device_vector<uint32_t> dv_out(in, in + n);
thrust::sort(dv_out.begin(), dv_out.end());
thrust::copy(dv_out.begin(), dv_out.end(), out);
}
GpuTimer timer;
float sort(const uint32_t * in, int n,
uint32_t * out,
int nBits,
int useDevice=0, int * blockSizes=NULL)
{
timer.Start();
if (useDevice == 0)
{
printf("\nRadix sort by host\n");
sortByHost(in, n, out, nBits);
}
else if (useDevice == 1)
{
printf("\nRadix sort by host level 1\n");
sortRadixBase04(in, n, out, nBits, blockSizes); //use default 8
}
else if (useDevice == 2)
{
sortRadixBase04_device(in, n, out, nBits, blockSizes);
}
else
{
printf("\nSort by thrust\n");
sortByDevice_thrust(in, n, out);
}
timer.Stop();
float time = timer.Elapsed();
if (useDevice != 2)
printf("Time: %.3f ms\n", time);
return time;
}
void printDeviceInfo()
{
cudaDeviceProp devProv;
CHECK(cudaGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %zu byte\n", devProv.totalGlobalMem);
printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor);
printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock);
printf("****************************\n");
}
void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n)
{
for (int i = 0; i < n; i++)
{
if (out[i] != correctOut[i])
{
printf("%d with %d != %d\n", i, out[i], correctOut[i]);
printf("INCORRECT :(\n");
return;
}
}
printf("CORRECT :)\n");
}
void printArray(uint32_t * a, int n)
{
for (int i = 0; i < n; i++)
printf("%i ", a[i]);
printf("\n");
}
int main(int argc, char ** argv)
{
// PRINT OUT DEVICE INFO
printDeviceInfo();
// SET UP INPUT SIZE
int nBits = 8;
int n = (1 << 24) + 1;
if (argc > 1)
nBits = atoi(argv[1]);
printf("\nInput size: %d\n", n);
printf("nBits: %d\n", nBits);
// ALLOCATE MEMORIES
size_t bytes = n * sizeof(uint32_t);
uint32_t * in = (uint32_t *)malloc(bytes);
uint32_t * out_0 = (uint32_t *)malloc(bytes); // base 4 Host result
uint32_t * out_1 = (uint32_t *)malloc(bytes); // base 4 Device result
uint32_t * out_thrust = (uint32_t *)malloc(bytes); // result by Thrust
uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result
// SET UP INPUT DATA
for (int i = 0; i < n; i++)
in[i] = rand();
// DETERMINE BLOCK SIZES
int blockSizes[2] = {512, 512}; // One for histogram, one for scan
if (argc == 4)
{
blockSizes[0] = atoi(argv[2]);
blockSizes[1] = atoi(argv[3]);
}
printf("\nHist block size: %d, scan block size: %d\n", blockSizes[0], blockSizes[1]);
// SORT BY HOST
sort(in, n, correctOut, nBits);
sort(in, n, out_0, nBits, 1, blockSizes);
checkCorrectness(out_0, correctOut, n);
float avg_time = 0;
int loop = 16;
printf("\nRadix sort by device level 2\n");
for (int i = 0; i < loop; i++)
{
float time = sort(in, n, out_1, nBits, 2, blockSizes);
avg_time += time;
}
printf("Avg Time: %.3f ms\n", avg_time / loop);
checkCorrectness(out_1, correctOut, n);
sort(in, n, out_thrust, nBits, 3, blockSizes);
checkCorrectness(out_thrust, out_1, n);
// FREE MEMORIES
free(in);
free(out_0);
free(out_thrust);
free(out_1);
free(correctOut);
return EXIT_SUCCESS;
} |
20,824 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <float.h>
#include <iostream>
#include <vector>
#include <unordered_map>
#include <string>
#include <algorithm>
/***all macros**/
#define E_INIT 5 // in joules
#define E_ELEC 50e-9 //in nj = 1e-9j
#define E_AMP 100e-12 // in pj = 1e-12j
#define ALPHA 0.7
#define BETA 0.3
#define DELTA 0.0
#define MAX_NODE 100000000
#define DEBUG 1
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/**all type declaration***/
using namespace std;
//all fn declarations here
void calc_hx(int* offset,int* edges,float* hx,float* cord_x,float* cord_y,int N,int E,int sink);
void update_energy(int* parent,float* res_energy,int* Nt,int* Nr,float* cord_x,float* cord_y,int N,int sink);
void check_failure(float* res_energy,int N, int* flag);
void computeTime(float& time,cudaEvent_t start, cudaEvent_t stop);
void check_remove_cycle(int* nodes,int* size,int* rev_offset,int* rev_edges,
int* parent,int* parent_old,float* Cx,float* Hx, int N,int E,
float* res_energy,int* Nt,int* Nr);
/**** device Code *******/
// __device__ volatile int Cx[MAX_NODE];
__device__ volatile int PQ[MAX_NODE];
//K in parallel
__global__ void extractMin(int* PQ_size, int* expandNodes,int* expandNodes_size,float* Cx,int* openList,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<K && PQ_size[id]>0){
//extract min from PQ
int front = id* ( (N+K-1)/K );
int node = PQ[front];
// restructure the heap
PQ[front]=PQ[front+PQ_size[id]-1];
PQ_size[id]-=1;
int pqIndex = 0;
while(2*pqIndex+1 < PQ_size[id]){
if(2*pqIndex+2 >= PQ_size[id]){
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]]){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else
break;
}
else{
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]] && Cx[PQ[front+2*pqIndex+1]] <= Cx[PQ[front+2*pqIndex+2]] ){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else if(Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+2]] && Cx[PQ[front+2*pqIndex+2]] <= Cx[PQ[front+2*pqIndex+1]] ){
int swap = PQ[front + 2*pqIndex+2];
PQ[front + 2*pqIndex+2] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+2;
}
else{
break;
}
}
}
//removed from openList
openList[node] = -1;
//added to expand next
int len = atomicAdd(expandNodes_size,1);
expandNodes[len]=node;
}
}
//for K in parallel
__global__ void A_star_expand(int* off,int* edge,float* Hx,int* parent,volatile float* Cx,
int* expandNodes,int* expandNodes_size, int* lock,int* openList,
int N,int E, int K,int* nVFlag,int* PQ_size,
float* res_energy,int* Nt,int* Nr){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id< *expandNodes_size ){
int node = expandNodes[id];
// expand
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
while(start < end){
int child = edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(leaveLoop==false){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
float exp_weight = 1 / ( ALPHA*(res_energy[node]/E_INIT) + BETA*(Nr[node]/Nt[node]) + DELTA*0) ;
if( Cx[child] > (Cx[node] - Hx[node])+ exp_weight + Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ exp_weight + Hx[child];
__threadfence();
parent[child] = node;
if(openList[child]==-1){
nVFlag[child]=1;
//add only once
}
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
}
__syncthreads();
}
start++;
}
}//end
}
//K in parallel -- O(N)
__global__ void keepHeapPQ(int* PQ_size,float* Cx,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0){
int front = id*( (N+K-1)/K );
int size = PQ_size[id];
for(int i=front;i<front+size;i++){
if(2*i+2 < front+size){
int cost = Cx[PQ[i]];
int costLeft = Cx[PQ[2*i+1]];
int costRight = Cx[PQ[2*i+2]];
if( cost > costLeft || cost > costRight ){
int index ;
if(costLeft <= costRight)
index = 2*i+1;
else
index = 2*i+2;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
else if(2*i+1 < front+size){
if(Cx[PQ[i]] > Cx[PQ[2*i+1]]){
int index = 2*i+1;
while(index > front){
if( Cx[PQ[(index-1)/2]] > Cx[PQ[index]] ){
int swap = PQ[index];
PQ[index] = PQ[(index-1)/2];
PQ[(index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
}
}
}
}
//N threads
__global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < N){
if(nextFlag[id]==1){
int index = atomicAdd(nvSize,1);
nextV[index]=id;
}
}
}
//for K in parallel
__global__ void insertPQ(int* PQS,int* nextV,int* nVsize,float* Cx,int K,int N,int* openList){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K){
int front = id*( (N+K-1)/K );
int i = id;
while(i<*nVsize){
//if not already present
if(openList[nextV[i]]!=-1){
i+=K;
continue;
}
PQ[front+PQS[id]]= nextV[i];
PQS[id]+=1;
//add in openList
openList[nextV[i]] = id;
if(PQS[id]>1){
int index = PQS[id]-1;
while(index>0){
if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){
int swap = PQ[front+index];
PQ[front+index]=PQ[front+ (index-1)/2];
PQ[front+ (index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
i += K;
}
}
}
__global__ void delete_propagate(int* nodes, int* size, int* off, int* edge,float* Hx,
int N,int E,volatile float* Cx,int* lock, int* parent,int* parent_old,int* addFlag,
int* rev_offset,int* rev_edges,float* res_energy,int* Nt,int* Nr){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < *size){
int node = nodes[id];
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
float exp_weight = 1 / ( ALPHA*(res_energy[node]/E_INIT) + BETA*(Nr[node]/Nt[node]) + DELTA*0) ;
while(start < end ){
int child = edge[start];
if(child<0){
start++;
continue;
}
bool leaveLoop = false;
while(!leaveLoop){
if(atomicExch(&lock[child],1)==0){
if(Cx[node]!=FLT_MAX && Cx[child]!=FLT_MAX && Cx[child] > (Cx[node] - Hx[node])+ exp_weight + Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ exp_weight+ Hx[child];
__threadfence();
parent[child] = node;
addFlag[child]=1;
}
else
if( (Cx[node]==FLT_MAX && parent[child]==node ) || ( parent[child]==node && (Cx[child] < Cx[node] - Hx[node]+ exp_weight+ Hx[child]) ) ){
//use back edges
int rstart = rev_offset[child];
int rend = E;
if(child!=N-1)
rend = rev_offset[child+1];
//there is always one parent that is node.
Cx[child] = FLT_MAX;
parent[child]=-1;
while(rstart < rend){
int p = rev_edges[rstart];
if(p<0 || p == child){
rstart++;
continue;
}
int weight = 1 / ( ALPHA*(res_energy[p]/E_INIT) + BETA*(Nr[p]/Nt[p]) + DELTA*0) ;
bool flag_cycle = false;
//check parent doesn't contain child
int ancestor = parent_old[p];
while(ancestor > 0){
if(ancestor==child){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
if(!flag_cycle && Cx[p]!=FLT_MAX && Cx[child] > (Cx[p]-Hx[p])+weight+Hx[child] ){
Cx[child] = (Cx[p]-Hx[p] )+weight+Hx[child];
parent[child] = p;
}
rstart++;
}
addFlag[child]=1;
}
leaveLoop = true;
atomicExch(&lock[child],0);
}
__syncthreads();
}
start++;
}
}
}
//single thread
__global__ void add_nodes(int node,int* offset,int* edges,int* parent,float* Hx,volatile float* Cx,
int N,int E,int* addFlag,float* res_energy,int* Nt,int* Nr){
int start = offset[node];
int end = E;
if(node!=N-1)
end = offset[node+1];
float exp_weight = 1 / ( ALPHA*(res_energy[node]/E_INIT) + BETA*(Nr[node]/Nt[node]) + DELTA*0) ;
while(start < end){
int child = edges[start];
if( parent[child]==node && Cx[child] != (Cx[node] - Hx[node])+ exp_weight + Hx[child] ){
addFlag[child]=1;
}
start++;
}
}
int main(){
srand(42);
//the K PQ
int K ;
scanf("%d",&K);
FILE* fgraph = fopen("graph_op.txt","r");
FILE* fgraph_rev = fopen("graph.txt","r");
int N,E;
fscanf(fgraph,"%d %d\n",&N,&E);
fscanf(fgraph_rev,"%d %d\n",&N,&E);
//base station
int startNode = N-1;
int* H_offset = (int*)malloc(sizeof(int)*N);
int* H_edges = (int*)malloc(sizeof(int)*E);
//rev graph
int* H_rev_offset = (int*)malloc(sizeof(int)*N);
int* H_rev_edges = (int*)malloc(sizeof(int)*E);
//weight is hop count =1
float* H_hx = (float*)malloc(sizeof(float)*N);
float* H_cx = (float*)malloc(sizeof(float)*N);
int* H_parent = (int*)malloc(sizeof(int)*N);
int* H_parent_old = (int*)malloc(sizeof(int)*N);
int* H_openList = (int*)malloc(sizeof(int)*N);
int* H_PQ_size = (int*)malloc(sizeof(int)*K);
//to compute distance
float* H_cord_x = (float*)malloc(sizeof(float)*N);
float* H_cord_y = (float*)malloc(sizeof(float)*N);
float* H_res_energy = (float*)malloc(sizeof(float)*N);
int* H_packet_recv = (int*)malloc(sizeof(int)*N);
int* H_packet_sent = (int*)malloc(sizeof(int)*N);
memset(H_PQ_size,0,sizeof(int)*K);
memset(H_openList,-1,sizeof(int)*N);
memset(H_parent,-1,sizeof(int)*N);
for(int i=0;i<N;i++){
H_cx[i] = FLT_MAX;
H_res_energy[i] = E_INIT;
}
for(int i=0;i<E;i++){
fscanf(fgraph,"%d",&H_edges[i]);
fscanf(fgraph_rev,"%d",&H_rev_edges[i]);
}
for(int i=0;i<N;i++){
fscanf(fgraph,"%d",&H_offset[i]);
fscanf(fgraph_rev,"%d",&H_rev_offset[i]);
}
fclose(fgraph_rev);
fclose(fgraph);
FILE* f_cord = fopen("Cord.txt","r");
for(int i=0;i<N;i++){
float x,y;
fscanf(f_cord,"%f %f\n",&x,&y);
H_cord_x[i]= x;
H_cord_y[i] = y;
}
fclose(f_cord);
calc_hx(H_rev_offset,H_rev_edges,H_hx,H_cord_x,H_cord_y,N,E,startNode);
printf("[INFO] completed taking input\n");
//init Host var
int* H_flagEnd = (int*)malloc(sizeof(int));
int* H_flagfound = (int*)malloc(sizeof(int));
int* H_a0 = (int*)malloc(sizeof(int));
int* H_nV_size = (int*)malloc(sizeof(int));
int* H_nV = (int*)malloc(sizeof(int)*N);
//required coz if many tries to add same in diff threads high low lower
int* H_nVFlag = (int*)malloc(sizeof(int)*N);
memset(H_nVFlag,-1,sizeof(int)*N);
*H_flagEnd = 0;
*H_flagfound = 0;
*H_a0 = 0;
int* H_end_A_star = (int*)malloc(sizeof(int));
*H_end_A_star = 0;
//device var
//graph struture
float run_time = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int* D_offset;
int* D_edges ;
int* D_rev_offset;
int* D_rev_edges;
float* D_hx;
float* D_Cx;
int* D_parent;
int* D_parent_old;
//Priority queue size
int* D_PQ_size;
//flag if in openList(contains which PQ)
int* D_openList;
//lock for nodes
int* D_lock;
//next nodes flag
int* D_nVFlag;
//next nodes array to insert PQ
int* D_nV;
int* D_nV_size;
//nodes to be expanded ( extracted from PQ )
int* D_expandNodes;
int* D_expandNodes_size;
//flag to end while loop and found the destination
int* D_flagEnd;
int* D_flagfound;
float* D_cord_x;
float* D_cord_y;
float* D_res_energy;
int* D_packet_recv;
int* D_packet_sent;
gpuErrchk ( cudaMalloc(&D_offset,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_edges,sizeof(int)*E) );
gpuErrchk ( cudaMalloc(&D_rev_offset,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_rev_edges,sizeof(int)*E) );
gpuErrchk ( cudaMalloc(&D_hx,sizeof(float)*N) );
gpuErrchk ( cudaMalloc(&D_Cx,sizeof(float)*N) );
gpuErrchk ( cudaMalloc(&D_parent,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_parent_old,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_PQ_size,sizeof(int)*K) );
gpuErrchk ( cudaMalloc(&D_openList,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_lock,sizeof(int)*N) );
//for next set of vertices to add in PQ
gpuErrchk ( cudaMalloc(&D_nV,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_nV_size,sizeof(int)) );
gpuErrchk ( cudaMalloc(&D_nVFlag,sizeof(int)*N) );
//next nodes to expand
gpuErrchk ( cudaMalloc(&D_expandNodes,sizeof(int)*K) ); //changed to K
gpuErrchk ( cudaMalloc(&D_expandNodes_size,sizeof(int)) );
//flag to end search
gpuErrchk( cudaMalloc(&D_flagEnd,sizeof(int)) );
gpuErrchk( cudaMalloc(&D_flagfound,sizeof(int)) );
//energy and cords
gpuErrchk ( cudaMalloc(&D_res_energy,sizeof(float)*N) );
gpuErrchk ( cudaMalloc(&D_cord_x,sizeof(float)*N) );
gpuErrchk ( cudaMalloc(&D_cord_y,sizeof(float)*N) );
gpuErrchk( cudaMalloc(&D_packet_recv,sizeof(int)*N) );
gpuErrchk( cudaMalloc(&D_packet_sent,sizeof(int)*N) );
//copy
gpuErrchk ( cudaMemcpy(D_offset,H_offset,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_edges,H_edges,sizeof(int)*E,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_rev_offset,H_rev_offset,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_rev_edges,H_rev_edges,sizeof(int)*E,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_hx,H_hx,sizeof(float)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_Cx,H_cx,sizeof(float)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemset(D_lock,0,sizeof(int)*N) );
gpuErrchk ( cudaMemcpy(D_res_energy,H_res_energy,sizeof(float)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_cord_x,H_cord_x,sizeof(float)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_cord_y,H_cord_y,sizeof(float)*N,cudaMemcpyHostToDevice) );
//1 TO EVADE DIVIDE BY 0 ERROR
gpuErrchk ( cudaMemset(D_packet_recv,1,sizeof(int)*N) );
gpuErrchk ( cudaMemset(D_packet_sent,1,sizeof(int)*N) );
int count_round = 0;
int numThreads = 512;
int numBlocks = (K+numThreads-1)/numThreads;
int N_numBlocks = (N+numThreads-1)/numThreads;
//A* algo
H_cx[startNode]=H_hx[startNode];
// H_PQ[0]=startNode;
H_PQ_size[0]=1;
H_openList[startNode]=0;
//reset
gpuErrchk ( cudaMemcpy(D_Cx,H_cx,sizeof(float)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpyToSymbol(PQ,&startNode, sizeof(int), 0, cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_parent,H_parent,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_openList,H_openList,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_PQ_size,H_PQ_size,sizeof(int)*K,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_expandNodes_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
int flag_PQ_not_empty = 0;
for(int i=0;i<K;i++){
if(H_PQ_size[i]>0)
flag_PQ_not_empty=1;
}
//DO A* initailly on whole graph
while(flag_PQ_not_empty == 1){
//extract min
cudaEventRecord(start);
extractMin<<<numBlocks,numThreads>>>(D_PQ_size, D_expandNodes,D_expandNodes_size,D_Cx,D_openList,N,K);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
cudaEventRecord(start);
A_star_expand<<<numBlocks,numThreads>>>(D_offset,D_edges,D_hx,D_parent,D_Cx,
D_expandNodes,D_expandNodes_size, D_lock,D_openList,
N,E,K,D_nVFlag,D_PQ_size,
D_res_energy,D_packet_sent,D_packet_recv);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
cudaEventRecord(start);
keepHeapPQ<<<numBlocks,numThreads>>>(D_PQ_size,D_Cx,N,K);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
//gen from flag D_nV
//for N in parallel
cudaEventRecord(start);
setNV<<<N_numBlocks,numThreads>>>(D_nVFlag,D_nV,D_nV_size,N);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
cudaEventRecord(start);
insertPQ<<<numBlocks,numThreads>>>(D_PQ_size,D_nV,D_nV_size,D_Cx,K,N,D_openList);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
//cpy flagend and flagEmpty
gpuErrchk( cudaMemcpy(H_PQ_size,D_PQ_size, sizeof(int)*K,cudaMemcpyDeviceToHost) );
//reset nVFlag
gpuErrchk( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) );
//reset next insert array
gpuErrchk( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(D_expandNodes_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
flag_PQ_not_empty = 0;
for(int i=0;i<K;i++){
if(H_PQ_size[i]>0)
flag_PQ_not_empty=1;
}
}
gpuErrchk( cudaMemcpy(H_parent,D_parent, sizeof(int)*N,cudaMemcpyDeviceToHost) );
//broadcasted sol
// for(int i=0;i<N;i++){
// int p = i;
// printf("PATH: %d ",i);
// while(H_parent[p]!=-1){
// p = H_parent[p];
// printf("%d ",p);
// if(p==startNode)
// break;
// }
// printf("\n");
// }
// update energy
update_energy(H_parent,H_res_energy,H_packet_sent,H_packet_recv,H_cord_x,H_cord_y,N,startNode);
gpuErrchk ( cudaMemcpy(D_packet_recv,H_packet_recv,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_packet_sent,H_packet_sent,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_res_energy,H_res_energy,sizeof(float)*N,cudaMemcpyHostToDevice) );
//check for end
check_failure(H_res_energy,N,H_end_A_star);
count_round++;
while(*H_end_A_star==0){
//reset flag
gpuErrchk( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) );
cudaEventRecord(start);
add_nodes<<<1,1>>>(startNode,D_offset,D_edges,D_parent,D_hx,D_Cx,
N,E,D_nVFlag,D_res_energy,D_packet_sent,D_packet_recv);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
//make size =0
gpuErrchk( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
//gen from flag D_nV
cudaEventRecord(start);
setNV<<<N_numBlocks,numThreads>>>(D_nVFlag,D_nV,D_nV_size,N);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
//copy back
gpuErrchk( cudaMemcpy(H_nV_size,D_nV_size, sizeof(int),cudaMemcpyDeviceToHost) );
//reset nV flags
gpuErrchk( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) );
while(*H_nV_size > 0){
numBlocks = (*H_nV_size+numThreads-1)/numThreads;
//old parent to check cycle and remove locking on parent
gpuErrchk( cudaMemcpy(D_parent_old,D_parent,sizeof(int)*N,cudaMemcpyDeviceToDevice) );
cudaEventRecord(start);
delete_propagate<<<numBlocks,numThreads>>>(D_nV,D_nV_size,D_offset,D_edges,D_hx,
N,E,D_Cx,D_lock,D_parent,D_parent_old,D_nVFlag,
D_rev_offset,D_rev_edges,D_res_energy,D_packet_sent,D_packet_recv);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
//reset size=0
gpuErrchk( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
//gen from flag D_nV
cudaEventRecord(start);
setNV<<<N_numBlocks,numThreads>>>(D_nVFlag,D_nV,D_nV_size,N);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
computeTime(run_time,start,stop);
//copy back
gpuErrchk( cudaMemcpy(H_nV_size,D_nV_size, sizeof(int),cudaMemcpyDeviceToHost) );
//reset nV flags
gpuErrchk( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) );
//check cycle
gpuErrchk( cudaMemcpy(H_parent,D_parent,sizeof(int)*N,cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(H_parent_old,D_parent_old,sizeof(int)*N,cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(H_cx,D_Cx,sizeof(int)*N,cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(H_nV,D_nV,sizeof(int)*N,cudaMemcpyDeviceToHost) );
//remove cycles
check_remove_cycle(H_nV,H_nV_size,H_rev_offset,H_rev_edges,
H_parent,H_parent_old,H_cx,H_hx,N,E,
H_res_energy,H_packet_sent,H_packet_recv);
gpuErrchk( cudaMemcpy(D_Cx,H_cx,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(D_parent,H_parent,sizeof(int)*N,cudaMemcpyHostToDevice) );
}
gpuErrchk( cudaMemcpy(H_parent,D_parent, sizeof(int)*N,cudaMemcpyDeviceToHost) );
// for(int i=0;i<N;i++){
// int p = i;
// printf("PATH: %d ",i);
// while(H_parent[p]!=-1){
// p = H_parent[p];
// printf("%d ",p);
// if(p==startNode)
// break;
// }
// printf("\n");
// }
// update energy
update_energy(H_parent,H_res_energy,H_packet_sent,H_packet_recv,H_cord_x,H_cord_y,N,startNode);
gpuErrchk ( cudaMemcpy(D_packet_recv,H_packet_recv,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_packet_sent,H_packet_sent,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_res_energy,H_res_energy,sizeof(float)*N,cudaMemcpyHostToDevice) );
//check for end
check_failure(H_res_energy,N,H_end_A_star);
count_round++;
// printf("round %d\n",count_round);
}
printf("rounds: %d\n",count_round);
printf("time: %f\n",run_time);
return 0;
}
void calc_hx(int* offset,int* edges,float* hx,float* cord_x,float* cord_y,int N,int E,int sink){
float dx,dy;
dx = cord_x[sink];
dy = cord_y[sink];
for(int i=0;i<N;i++){
float x,y;
x = cord_x[i];
y = cord_y[i];
int start = offset[i];
int end = N;
if(i!=N-1)
end = offset[i+1];
float sum = 0;
int count = 0;
while(start < end){
int child = edges[start];
sum+= sqrtf( (x-cord_x[child])*(x-cord_x[child]) + (y-cord_y[child])*(y-cord_y[child]) );
start++;
count++;
}
float dist = sqrtf( (x-dx)*(x-dx) + (y-dy)*(y-dy) );
//hop counts
if(dist!=0){
hx[i] = (dist * count )/sum ;
}
else
hx[i] = 0;
}
}
void update_energy(int* parent,float* res_energy,int* Nt,int* Nr,float* cord_x,float* cord_y,int N,int sink){
int k = 2048; //256 bit packet length
int round = rand()%N;
for(int j=0;j<round;j++){
int start = rand()%N;
while(parent[start]!=-1){
int node = parent[start];
Nr[node]+=1;
res_energy[node]-= k*E_ELEC;
Nt[start]+=1;
float dist_sq = (cord_x[start]-cord_x[node])*(cord_x[start]-cord_x[node]) +(cord_y[start]-cord_y[node])*(cord_y[start]-cord_y[node]);
float energy = k*(E_ELEC + E_AMP*dist_sq);
res_energy[start]-=energy;
start = node;
}
// if(start != sink)
// printf("ERROR; invalid path\n");
}
}
void check_failure(float* res_energy,int N, int* flag){
for(int i=0;i<N;i++){
// printf("%d:%f\n",i,res_energy[i]);
if(res_energy[i]<0){
*flag =1;
printf("dead: %d\n",i);
break;
}
}
}
void computeTime(float& time,cudaEvent_t start, cudaEvent_t stop){
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
time+= milliseconds;
//printf("time:%f\n",milliseconds);
}
void check_remove_cycle(int* nodes,int* size,int* rev_offset,int* rev_edges,
int* parent,int* parent_old,float* Cx,float* Hx, int N,int E,
float* res_energy,int* Nt,int* Nr){
for(int i=0;i<*size;i++){
int node = nodes[i];
bool cycle = false;
vector<bool>visited(N,false);
int ancestor = parent[node];
while(ancestor > 0){
if(ancestor==node){
cycle = true;
break;
}
if(visited[ancestor]){
break;
}
visited[ancestor]=true;
ancestor = parent[ancestor];
}
if(cycle){
// printf("cycle at %d->%d\n",node,parent[node]);
int p_cycle = parent[node];
int start = rev_offset[node];
int end = E;
if(node!=N-1)
end = rev_offset[node+1];
//no parent
// write in parent read always from old_parent
float cost = FLT_MAX;
int opt_parent = -1;
//if any parent can change the cost
while(start< end){
int p = rev_edges[start];
//del edges
if(p<0 || p == p_cycle || p==node){
start++;
continue;
}
int weight = 1 / ( ALPHA*(res_energy[p]/E_INIT) + BETA*(Nr[p]/Nt[p]) + DELTA*0) ;
int flag_cycle = false;
//check parent doesn't contain node
int ancestor = parent_old[p];
while(ancestor>0){
if(ancestor==node){
flag_cycle = true;
break;
}
ancestor = parent_old[ancestor];
}
//no need to lock only single parent so only one node in array so one node per thread
if(!flag_cycle && Cx[p]!=FLT_MAX && cost > (Cx[p]-Hx[p])+weight+Hx[node] ){
cost = (Cx[p]-Hx[p] )+weight+Hx[node];
opt_parent = p;
}
start++;
}
Cx[node]=cost;
parent[node]=opt_parent;
// printf("opt p : %d\n",opt_parent);
}
}
} |
20,825 | #include <cstdlib>
#include <cstdio>
#include <cuda.h>
using namespace std;
/*
__global__ void mykernel(void) {
}
int main(void) {
mykernel<<<1,1>>>();
printf("CPU Hello World!\n");
return 0;
}
*/
#define N 10000000
void vector_add(float *out, float *a, float *b, int n) {
for(int i = 0; i < n; i++){
out[i] = a[i] + b[i];
}
}
int main(){
float *a, *b, *out;
// Allocate memory
a = (float*)malloc(sizeof(float) * N);
b = (float*)malloc(sizeof(float) * N);
out = (float*)malloc(sizeof(float) * N);
// Initialize array
for(int i = 0; i < N; i++){
a[i] = 1.0f; b[i] = 2.0f;
}
// Main function
vector_add(out, a, b, N);
/*
for(int i = 0; i < N; i++){
printf("[%d] -> %f\n", i, out[i]);
}
*/
return 0;
}
|
20,826 | /***************************************************************************//**
* \file intermediateVelocity.cu
* \author Christopher Minar (minarc@oregonstate.edu)
* \brief kernels to generate the right hand side for the initial velocity solve
*/
#include "intermediateVelocity.h"
/**
* \namespace kernels
* \brief Contains all the custom-written CUDA kernels.
*/
namespace kernels
{
//updates the east boundary for use with a convective boundary condition
__global__
void updateBoundaryX(double *u, double *xp, double *dx, double dt, double Uinf, int nx, int ny)
{
if (threadIdx.x + (blockDim.x * blockIdx.x) >= ny)
return;
int J = threadIdx.x + (blockDim.x * blockIdx.x),
I = nx-1,
i = J*(nx-1) + I;
double beta = Uinf * dt / dx[I];
xp[J] = xp[J]*(1-beta) + beta*u[i-1];
}
__global__
void updateBoundaryY(double *u, double *xp, double *dx, double dt, double Vinf, int nx, int ny)
{
if (threadIdx.x + (blockDim.x * blockIdx.x) >= ny - 1)
return;
int J = threadIdx.x + (blockDim.x * blockIdx.x),
I = nx,
i = J*nx + I,
numU = (nx-1)*ny;
double beta = Vinf * dt / dx[I-1];
xp[J+ny] = xp[J+ny]*(1-beta) + beta*u[i + numU-1];
}
__global__
void updateRHS1forIBX(int *hybridTagsUV, int *ghostTagsUV, double *rhs, double *distance_from_intersection_to_node, double *distance_between_nodes_at_IB, double *uv, int nx, int ny)
{
if (threadIdx.x + (blockDim.x * blockIdx.x) >= (nx-1)*ny)
return;
int i = threadIdx.x + (blockDim.x * blockIdx.x);
// if not outtag & if not in tag rhs if out tag outside interpolation //flag inside interpolation?
rhs[i] = (hybridTagsUV[i]==-1) * (ghostTagsUV[i]<=0) * (rhs[i]) + (hybridTagsUV[i]!=-1) * distance_between_nodes_at_IB[i]/(distance_from_intersection_to_node[i]+distance_between_nodes_at_IB[i]) * uv[i];
}
__global__//note dx and dy must be equal and uniform at the point the boundary atm for the second line (forcing for the inside) to work
void updateRHS1forIBY(int *hybridTagsUV, int *ghostTagsUV, double *rhs, double *distance_from_intersection_to_node, double *distance_between_nodes_at_IB, double *uv, int nx, int ny)
{
if (threadIdx.x + (blockDim.x * blockIdx.x) >= nx*(ny-1))
return;
int i = threadIdx.x + (blockDim.x * blockIdx.x) + (nx-1)*ny;
// if not outtag & if not in tag rhs if out tag outside interpolation
rhs[i] = (hybridTagsUV[i]==-1) * (ghostTagsUV[i]<=0) * (rhs[i]) + (hybridTagsUV[i]!=-1) * distance_between_nodes_at_IB[i]/(distance_from_intersection_to_node[i]+distance_between_nodes_at_IB[i]) * uv[i];
}
} // end of namespace kernels
|
20,827 | #include "math.h"
#include <iostream>
const int ARRAY_SIZE = 1000;
using namespace std;
__global__ void increment(double *aArray, double val, unsigned int sz) {
unsigned int indx = blockIdx.x * blockDim.x + threadIdx.x;
if (indx < sz)
aArray[indx] += val;
}
int main(int argc, char **argv) {
double *hA;
double *dA;
hA = (double *)malloc(ARRAY_SIZE * sizeof(double));
cudaMalloc(&dA, ARRAY_SIZE * sizeof(double));
for (int i = 0; i < ARRAY_SIZE; i++)
hA[i] = 1. * i;
double inc_val = 2.0;
cudaMemcpy(dA, hA, sizeof(double) * ARRAY_SIZE, cudaMemcpyHostToDevice);
increment<<<2, 512>>>(dA, inc_val, ARRAY_SIZE);
cudaMemcpy(hA, dA, sizeof(double) * ARRAY_SIZE, cudaMemcpyDeviceToHost);
double error = 0.;
for (int i = 0; i < ARRAY_SIZE; i++)
error += fabs(hA[i] - (i + inc_val));
cout << "Test: " << (error < 1.E-9 ? "Passed" : "Failed") << endl;
cudaFree(dA);
free(hA);
return 0;
}
|
20,828 | #include "includes.h"
__global__ void calc(float *d_D, int n, int k){ //kernel (4 cells for every thread)
__shared__ float s_d[4*3*256]; //Shared table within a block
int i = blockIdx.x * blockDim.x + threadIdx.x; //Calculation of i and j
int j = blockIdx.y * blockDim.y + threadIdx.y;
int b_index = 4 * 3 * (threadIdx.x + blockDim.x*threadIdx.y); //Calculation of initial index of thread in the shared table within the block
int istep = blockDim.x*gridDim.x, jstep = blockDim.y*gridDim.y;
int l, m , v=0;
for (l = 0; l<2; l++){
for (m = 0; m<2; m++){ //Pass values from device table to shared block table for every one of the 4 cells
s_d[b_index + 3 * v] = d_D[(i+l*istep)+(j+m*jstep)*n];
s_d[b_index + (3 * v + 1)] = d_D[(i + l*istep) + k*n];
s_d[b_index + (3 * v + 2)] = d_D[k + (j + m*jstep)*n];
v++;
}
}
for (v = 0; v<4; v++){ //Calculate the new cell values (4 for every thread)
if (s_d[b_index + 3 * v] > s_d[b_index + (3 * v + 1)] + s_d[b_index + (3 * v + 2)]) s_d[b_index + 3 * v] = s_d[b_index + (3 * v + 1)] + s_d[b_index + (3 * v + 2)];
}
v = 0;
for (l = 0; l<2; l++){ //Pass the new values to the device table
for (m = 0; m<2; m++){
d_D[(i+l*istep)+(j+m*jstep)*n] = s_d[b_index + 3 * v];
v++;
}
}
} |
20,829 | // ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2015, September 7 - October 6
// ###
// ###
// ### Thomas Moellenhoff, Robert Maier, Caner Hazirbas
// ###
// ###
// ###
// ### THIS FILE IS SUPPOSED TO REMAIN UNCHANGED
// ###
// ###
#include <cuda_runtime.h>
#include <iostream>
using namespace std;
// cuda error checking
#define CUDA_CHECK cuda_check(__FILE__,__LINE__)
void cuda_check(string file, int line)
{
cudaError_t e = cudaGetLastError();
if (e != cudaSuccess)
{
cout << endl << file << ", line " << line << ": " << cudaGetErrorString(e) << " (" << e << ")" << endl;
exit(1);
}
}
__device__ float add (float a, float b)
{
return a + b;
}
__global__ void add_arrays (float *a, float *b, float *c, int n)
{
int ind = threadIdx.x + blockDim.x * blockIdx.x;
if (ind < n)
{
c[ind] = add(a[ind], b[ind]);
}
}
int main(int argc, char **argv)
{
// alloc and init input arrays on host (CPU)
int n = 20;
float *a = new float[n];
float *b = new float[n];
float *c = new float[n];
for(int i=0; i<n; i++)
{
a[i] = i;
b[i] = (i%5)+1;
c[i] = 0;
}
// CPU computation
for(int i=0; i<n; i++) c[i] = a[i] + b[i];
// print result
cout << "CPU:"<<endl;
for(int i=0; i<n; i++) cout << i << ": " << a[i] << " + " << b[i] << " = " << c[i] << endl;
cout << endl;
// init c
for(int i=0; i<n; i++) c[i] = 0;
// GPU computation
// ###
// ### TODO: Implement the array addition on the GPU, store the result in "c"
// ###
// ### Notes:
// ### 1. Remember to free all GPU arrays after the computation
// ### 2. Always use the macro CUDA_CHECK after each CUDA call, e.g. "cudaMalloc(...); CUDA_CHECK;"
// ### For convenience this macro is defined directly in this file, later we will only include "aux.h"
// initialize the arrays on GPU
float *d_a = NULL;
float *d_b = NULL;
float *d_c = NULL;
size_t nbytes = n * sizeof(float);
cudaMalloc(&d_a, nbytes); CUDA_CHECK;
cudaMalloc(&d_b, nbytes); CUDA_CHECK;
cudaMalloc(&d_c, nbytes); CUDA_CHECK;
// move from host to device memory
cudaMemcpy(d_a, a, nbytes, cudaMemcpyHostToDevice); CUDA_CHECK;
cudaMemcpy(d_b, b, nbytes, cudaMemcpyHostToDevice); CUDA_CHECK;
// initialize block and grid size
dim3 block = dim3(4, 1, 1);
dim3 grid = dim3((n + block.x - 1) / block.x, 1, 1);
// dispatch the kernel
add_arrays <<<grid, block>>> (d_a, d_b, d_c, n);
// copy result back to host memory
cudaMemcpy(c, d_c, nbytes, cudaMemcpyDeviceToHost); CUDA_CHECK;
// free the device memory
cudaFree(d_a); CUDA_CHECK;
cudaFree(d_b); CUDA_CHECK;
cudaFree(d_c); CUDA_CHECK;
// print result
cout << "GPU:"<<endl;
for(int i=0; i<n; i++) cout << i << ": " << a[i] << " + " << b[i] << " = " << c[i] << endl;
cout << endl;
// free CPU arrays
delete[] a;
delete[] b;
delete[] c;
}
|
20,830 | #include <cuComplex.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void
remove_cp(cuFloatComplex* in, cuFloatComplex* out, int symlen, int cplen, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
int sym_idx = i / symlen;
int samp_idx = i % symlen;
if (samp_idx >= cplen) {
out[sym_idx * (symlen-cplen) + samp_idx] = in[i+cplen];
}
}
}
void exec_remove_cp(cuFloatComplex* in,
cuFloatComplex* out,
int symlen,
int cplen,
int n,
int grid_size,
int block_size,
cudaStream_t stream)
{
remove_cp<<<grid_size, block_size, 0, stream>>>(in, out, symlen, cplen, n);
}
void get_block_and_grid_remove_cp(int* minGrid, int* minBlock)
{
cudaOccupancyMaxPotentialBlockSize(minGrid, minBlock, remove_cp, 0, 0);
} |
20,831 | __global__ void fillKernel(float* array) {
array[threadIdx.x] = threadIdx.x * 0.5;
}
void fillGpuArray(float* array, int count) {
fillKernel<<<1, count>>>(array);
}
|
20,832 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void aKernel()
{
int idx = threadIdx.x;
int r1, r2, res_diff;
__shared__ int arr[512];
arr[idx] = idx;
printf("A: Thread %5d, value %5d\n", idx, arr[idx]);
__syncthreads();
r1 = arr[idx];
if (idx < 511) {
int temp = arr[idx + 1];
__syncthreads();
arr[idx] = temp;
}
r2 = arr[idx];
res_diff = r2 - r1;
printf("B: Thread %5d, value %5d, diff=%5d\n", idx, arr[idx], res_diff);
}
int main()
{
aKernel<<<1, 512>>> ();
return 0;
}
|
20,833 | #include <stdio.h>
__global__ void dumbkernel(bool *input){
// if( input[threadIdx.x] ){
// printf("we made it to dumbkernel\n");
// }
}
#define SZ 25
int main(){
bool *devDummy;
cudaMalloc( (void**) &devDummy, sizeof(bool) * SZ);
dumbkernel<<<1, 32>>>(devDummy);
} |
20,834 | #include "includes.h"
/************************* CudaMat ******************************************
* Copyright (C) 2008-2009 by Rainer Heintzmann *
* heintzmann@gmail.com *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; Version 2 of the License. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************
* Compile with:
* Windows:
system('"c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\bin\vcvars32.bat"')
system('nvcc -c cudaArith.cu -ccbin "c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\bin')
Window 64 bit:
system('nvcc -c cudaArith.cu -ccbin "c:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\bin" -I"c:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\include" ')
Linux:
* File sudo vi /usr/local/cuda/bin/nvcc.profile
* needs the flag -fPIC in the include line
system('nvcc -c cudaArith.cu -v -I/usr/local/cuda/include/')
*/
// To suppress the unused variable argument for ARM targets
#pragma diag_suppress 177
#ifndef NAN // should be part of math.h
#define NAN (0.0/0.0)
#endif
#define ACCU_ARRTYPE double // Type of the tempory arrays for reduce operations
#define IMUL(a, b) __mul24(a, b)
//#define BLOCKSIZE 512
//#define BLOCKSIZE 512
// below is blocksize for temporary array for reduce operations. Has to be a power of 2 in size
#ifndef CUIMAGE_REDUCE_THREADS // this can be defined at compile time via the flag NVCCFLAG='-D CUIMAGE_REDUCE_THREADS=512'
#define CUIMAGE_REDUCE_THREADS 512
#endif
// (prop.maxThreadsPerBlock)
// #define CUIMAGE_REDUCE_THREADS 512
// #define CUIMAGE_REDUCE_THREADS 128
//#define CUIMAGE_REDUCE_BLOCKS 64
#define NBLOCKS(N,blockSize) (N/blockSize+(N%blockSize==0?0:1))
#define NBLOCKSL(N,blockSize) 1
// min((N/blockSize+(N%blockSize==0?0:1)),prop.maxGridSize[0])
__global__ void rotate(float*a,float b, float * c, size_t sx,size_t sy,size_t sz, size_t dx, size_t dy, size_t dz, size_t ux, size_t uy, size_t uz)
{
// id of this processor
size_t id=((blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x);
size_t Processes=blockDim.x * gridDim.x;
size_t chains=ux*uy*uz; // total number of independent chains
size_t N=sx*sy*sz; // total size of array, has to be chains*length_of_chain
size_t length=N/chains; // chain length
size_t steps=N/Processes; // this is how many steps each processor has to do
size_t step,nl,nx,ny,nz,x,y,z,i,idd;
float swp, nswp;
//if (id != 0) return;
//for (id=0;id<Processes;id++)
{
step=steps*id; // my starting step as the id times the number of steps
nl=step%length; // current position in chain length
nx=(step/length)%ux; // current position in unit cell x
ny=(step/(length*ux))%uy; // current position in unit cell y
nz=(step/(length*ux*uy))%uz; // current position in unit cell z
i=0;
//if (step/steps != 4 && step/steps != 5) return;
while(nz<uz)
{
while(ny<uy)
{
while (nx<ux)
{
x=(nx+nl*dx)%sx; // advance by the offset steps along the chain
y=(ny+nl*dy)%sy;
z=(nz+nl*dz)%sz;
idd=x+sx*y+sx*sy*z;
if (i < steps) {
swp=a[idd];
// a[idd]=a[idd]+0.1;
__syncthreads();
}
while (nl<length-1)
{
if (i > steps-1)
goto nextProcessor; // return;
if (step >= N) // this thread has reached the end of the total data to process
goto nextProcessor; // return;
step++;
x = (x+dx)%sx; // new position
y = (y+dy)%sy;
z = (z+dz)%sz;
idd=x+sx*y+sx*sy*z;
if (i < steps-1) {
nswp=a[idd];
__syncthreads();
//a[idd]=a[idd]+0.1;
}
c[idd]=swp+0.1; // c[idd]+ny+0.1; // c[idd]+i; // swp+0.1; // c[idd]+(step/steps);
i++; // counts number of writes
if (i > steps-1)
goto nextProcessor; // return;
nl++;
if (i < steps) {
swp=nswp;
}
}
nx++; nl=0;
//if (nx < ux) {
x = (x+dx)%sx; // new position
y = (y+dy)%sy;
z = (z+dz)%sz;
idd=x+sx*y+sx*sy*z;
c[idd]=swp+0.1; // no need to save this value as this is the end of the line
//}
i++;
if (i > steps-1)
goto nextProcessor; // return;
// if (nx <ux) x=(x+1)%sx;
}
ny++;
// if (ny <uy) y=(y+1)%sy;
nx=0;x=0;
}
nz++;
// if (nz <uz) z=(z+1)%sz;
ny=0;y=0;
}
nextProcessor:
nz=0;
}
return;
} |
20,835 | #include "AntSimple.cuh"
#include <stdio.h>
namespace SIMPLE
{
__device__
Ant::Ant(int initialLocation, int matrixDim, curandState_t randState) :
visitedIndex(0),
isVisited(new bool[matrixDim]),
position(initialLocation),
goodnessNumerators(new double[matrixDim]),
m_randomState(randState)
{
}
__device__
void Ant::Venture(int* route, const double* distanceMatrix, const double* pheromoneMatrix, int matrixDim, double alpha, double beta)
{
while (visitedIndex < matrixDim)
{
int nextHop = SelectNextHop(distanceMatrix, pheromoneMatrix, matrixDim, alpha, beta);
GoTo(nextHop, route, distanceMatrix, matrixDim);
}
route[matrixDim] = route[0];
distance += distanceMatrix[route[matrixDim - 1] * matrixDim + route[0]];
//printf("Distance Traveled: %f\n", distance);
}
__device__
int Ant::SelectNextHop(const double* distance_matrix, const double* pheromoneMatrix, int matrixDim, double alpha, double beta)
{
double denominator = 0;
for (int i = 0; i < matrixDim; ++i)
{
if (isVisited[i]) { continue; }
int possiblePosition = i;
double goodnessNumerator = pow(pheromoneMatrix[position * matrixDim + possiblePosition], alpha) * pow(1.0 / distance_matrix[position * matrixDim + possiblePosition], beta);
goodnessNumerators[possiblePosition] = goodnessNumerator;
denominator += goodnessNumerator;
}
//New
/*
for (int i = 0; i < matrixDim; ++i)
{
if (isVisited[i]) { continue; }
goodnessNumerators[i] /= denominator;
}
double random = curand_uniform_double(&m_randomState);
for (int i = 0; i < matrixDim; ++i)
{
if (isVisited[i]) { continue; }
random -= goodnessNumerators[i];
if (random <= 0) { return i; }
}
return -1;
*/
double sum = 0;
double random = curand_uniform_double(&m_randomState);
//printf("Random is %f\n", random);
for (int i = 0; i < matrixDim; ++i)
{
if (isVisited[i]) { continue; }
int possiblePosition = i;
double numerator = goodnessNumerators[possiblePosition];
double probability = numerator / denominator;
if (random <= sum + probability)
{
return possiblePosition;
}
sum += probability;
}
return -1;
}
__device__
void Ant::GoTo(int next, int* route, const double* distanceMatrix, int matrixDim)
{
route[visitedIndex++] = next;
isVisited[next] = true;
distance += distanceMatrix[position * matrixDim + next];
position = next;
}
__device__
void Ant::Reset(int* route, int initialLocation, int matrixDim)
{
visitedIndex = 0;
distance = 0;
position = initialLocation;
for (int i = 0; i < matrixDim; ++i) { isVisited[i] = false; }
isVisited[position] = true;
route[visitedIndex++] = initialLocation;
}
}
|
20,836 | #include "includes.h"
//function declaration
unsigned int getmax(unsigned int *, unsigned int);
//unsigned int getmaxSeq(unsigned int *, unsigned int);
__global__ void getmaxcu(unsigned int* num, int size, int threadCount)
{
__shared__ int localBiggest[32];
if (threadIdx.x==0) {
for (int i = 0; i < 32; i++) {
localBiggest[i] = 0;
}
}
__syncthreads();
int current = blockIdx.x *blockDim.x + threadIdx.x; //get current thread ID
int localBiggestCurrent = (current - blockIdx.x *blockDim.x)/32; //get currentID's warp number
//if current number is bigger than the biggest number so far in the warp, replace it
if ((num[current] > localBiggest[localBiggestCurrent]) && (current < size)) {
localBiggest[localBiggestCurrent] = num[current];
}
__syncthreads();
//using only one thread, loop through all the biggest numbers in each warp
//and return the biggest number out of them all
if (threadIdx.x==0) {
int biggest = localBiggest[0];
for (int i = 1; i < 32; i++) {
if (biggest < localBiggest[i]) {
biggest = localBiggest[i];
}
}
//once found the biggest number in this block, put back into global array
//num with corresponding block number
num[blockIdx.x] = biggest;
}
} |
20,837 |
#include <cuda_runtime.h>
#include <cuda.h>
#include <curand.h>
#include <cuda_runtime_api.h>
#include <device_functions.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include<sys/time.h>
#include <sstream>
#include <iostream>
#include <fstream>
#include <iostream>
#include <stdio.h>
// printf() is only supported
// for devices of compute capability 2.0 and higher
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
#define printf(f, ...) ((void)(f, __VA_ARGS__),0)
#endif
#define CURAND_CALL(x) do { if((x)!=CURAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
/// The number of points to generate within 0,1
#define NUM_BOIDS 1000
#define NUM_FRAMES 150
__device__ float vectorMag_kernal(float _vector1, float _vector2, float _vector3)
{
float mag;
mag = sqrtf((_vector1*_vector1) + (_vector2*_vector2) + (_vector3*_vector3));
return mag;
}
__device__ void steerBoid_kernal(float * _targetX, float * _targetZ, float * _currentX, float * _currentZ, float * _sourceX, float *_sourceZ)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
float steeringForce = 0.02;
float diffX[NUM_BOIDS];
float diffZ[NUM_BOIDS];
diffX[idx] = _targetX[idx] - _currentX[idx];
diffZ[idx] = _targetZ[idx] - _currentZ[idx];
_sourceX[idx] =( (diffX[idx]/vectorMag_kernal(diffX[idx], 0, diffZ[idx]))*steeringForce);
_sourceZ[idx] =( (diffZ[idx]/vectorMag_kernal(diffX[idx], 0, diffZ[idx]))*steeringForce);
}
__global__ void avoidBoundaries_kernal(float * _posx, float * _posz, float * _velx, float * _velz, int _noBoids)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
float desiredVelX[NUM_BOIDS];
float desiredVelZ[NUM_BOIDS];
float * desiredVelX_ptr = &desiredVelX[0];
float * desiredVelZ_ptr = &desiredVelZ[0];
if(idx<_noBoids)
{
if(_posz[idx] >= 2 && _velz[idx] >0)
{
desiredVelX[idx] = _velx[idx];
desiredVelZ[idx] = -_velz[idx];
steerBoid_kernal(desiredVelX_ptr, desiredVelZ_ptr, _velx, _velz, _velx, _velz);
// _velz[idx] = -_velz[idx];
}
else if(_posz[idx] <= -2 && _velz[idx] <0)
{
desiredVelX[idx] = _velx[idx];
desiredVelZ[idx] = -_velz[idx];
steerBoid_kernal(desiredVelX_ptr, desiredVelZ_ptr, _velx, _velz, _velx, _velz);
//_velz[idx] = -_velz[idx];
}
else if(_posx[idx] >= 2 && _velx[idx] >0)
{
desiredVelX[idx] = -_velx[idx];
desiredVelZ[idx] = _velz[idx];
steerBoid_kernal(desiredVelX_ptr, desiredVelZ_ptr, _velx, _velz, _velx, _velz);
//_velx[idx] = -_velx[idx];
}
else if(_posx[idx] <= -2 && _velx[idx] <0)
{
desiredVelX[idx] = -_velx[idx];
desiredVelZ[idx] = _velz[idx];
steerBoid_kernal(desiredVelX_ptr, desiredVelZ_ptr, _velx, _velz, _velx, _velz);
//_velx[idx] = -_velx[idx];
}
}
}
__global__ void updatePos_kernal(float * _posx, float * _posz, float * _velx, float * _velz, int _noBoids)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx< _noBoids)
{
_posx[idx] += _velx[idx];
_posz[idx] += _velz[idx];
}
}
__device__ float distance_kernal(float _posx, float _posz, float _otherPosx, float _otherPosz)
{
float distance = sqrtf(((_posx-_otherPosx)*(_posx-_otherPosx)) + ((_posz-_otherPosz)*(_posz-_otherPosz)));
return distance;
}
__global__ void limitVel_kernal(float _limit, float * _posx, float * _posz, float * _velx, float * _velz, const int _noBoids)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
float mag[NUM_BOIDS];
if(idx < _noBoids)
{
mag[idx] = sqrtf((_velx[idx]*_velx[idx]) + (_velz[idx]*_velz[idx]));
if( mag[idx] > _limit)
{
_velx[idx] = (_velx[idx]/mag[idx])*_limit;
_velz[idx] = (_velz[idx]/mag[idx])*_limit;
}
}
}
__device__ void alignment_kernal(float * _alignmentVectorX, float * _alignmentVectorZ, float * _posx, float * _posz, float * _velx, float * _velz, int _numBoids)
{
int const noBoids = _numBoids;
__shared__ unsigned int numberOfNeighbours[NUM_BOIDS];
float tmpX[NUM_BOIDS];
float tmpZ[NUM_BOIDS];
float mag[NUM_BOIDS];
// current boid whos neighbours were looking for
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
// neighbours of current boid
uint idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx < noBoids && idy < noBoids)
{
// reset values
numberOfNeighbours[idx] = 0;
// wait for threads to sync
__syncthreads();
if(idx != idy)
{
if(distance_kernal(_posx[idx], _posz[idx], _posx[idy], _posz[idy]) < 0.15)
{
atomicAdd(&(_alignmentVectorX[idx]), _velx[idy]);
atomicAdd(&(_alignmentVectorZ[idx]), _velz[idy]);
atomicAdd(&numberOfNeighbours[idx], 1);
}
}
}
// wait for threads to sync
__syncthreads();
//limit to 1D
if(idy == 0 && idx< noBoids)
{
//avoid dividing by zero
if(numberOfNeighbours[idx] > 0)
{
//find average position
_alignmentVectorX[idx] = _alignmentVectorX[idx]/numberOfNeighbours[idx];
_alignmentVectorZ[idx] = _alignmentVectorZ[idx]/numberOfNeighbours[idx];
// normalize
mag[idx] = norm3d(_alignmentVectorX[idx], 0.0f, _alignmentVectorZ[idx]);
if(mag[idx] > 0)
{
_alignmentVectorX[idx] = (_alignmentVectorX[idx] / mag[idx]);
_alignmentVectorZ[idx] = (_alignmentVectorZ[idx] / mag[idx]);
}
//steer
steerBoid_kernal(_alignmentVectorX, _alignmentVectorZ, _velx, _velz, _alignmentVectorX, _alignmentVectorZ);
}
}
}
__device__ void seperation_kernal(float * _seperationVectorX, float * _seperationVectorZ, float * _posx, float * _posz, float * _velx, float * _velz, int _numBoids)
{
int const noBoids = _numBoids;
__shared__ float _diffVectorX[NUM_BOIDS];
__shared__ float _diffVectorZ[NUM_BOIDS];
__shared__ unsigned int numberOfNeighbours[NUM_BOIDS];
float tmpX[NUM_BOIDS];
float tmpZ[NUM_BOIDS];
float mag[NUM_BOIDS];
// current boid whos neighbours were looking for
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
// neighbours of current boid
uint idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx < noBoids && idy < noBoids)
{
// reset values
numberOfNeighbours[idx] = 0;
_diffVectorX[idx] = 0;
_diffVectorZ[idx] = 0;
// wait for threads to sync
__syncthreads();
if(idx != idy)
{
if(distance_kernal(_posx[idx], _posz[idx], _posx[idy], _posz[idy]) < 0.1)
{
atomicAdd(&(_diffVectorX[idx]), (_posx[idy]-_posx[idx]));
atomicAdd(&(_diffVectorZ[idx]), (_posz[idy]-_posz[idx]));
// normalise (make atomic)
//_diffVectorX[idx] = _diffVectorX[idx] / norm3d(_diffVectorX[idx], 0.0f, _diffVectorZ[idx]);
//_diffVectorZ[idx] = _diffVectorZ[idx] / norm3d(_diffVectorX[idx], 0.0f, _diffVectorZ[idx]);
// add neighbours position to current boids part of the seperation vector
atomicAdd(&(_seperationVectorX[idx]), _diffVectorX[idx]);
atomicAdd(&(_seperationVectorZ[idx]), _diffVectorZ[idx]);
atomicAdd(&numberOfNeighbours[idx], 1);
}
}
}
// wait for threads to sync
__syncthreads();
//limit to 1D
if(idy == 0 && idx< noBoids)
{
//avoid dividing by zero
if(numberOfNeighbours[idx] > 0)
{
// tmpX[idx] = _seperationVectorX[idx]/numberOfNeighbours[idx];
//tmpZ[idx] = _seperationVectorZ[idx]/numberOfNeighbours[idx];
//find average position
_seperationVectorX[idx] = _seperationVectorX[idx]/numberOfNeighbours[idx];
_seperationVectorZ[idx] = _seperationVectorZ[idx]/numberOfNeighbours[idx];
_seperationVectorX[idx] = ( _seperationVectorX[idx] * -1);
_seperationVectorZ[idx] = ( _seperationVectorZ[idx] * -1);
mag[idx] = norm3d(_seperationVectorX[idx], 0.0f, _seperationVectorZ[idx]);
if(mag[idx]>0)
{
_seperationVectorX[idx] = (_seperationVectorX[idx] / mag[idx]);
_seperationVectorZ[idx] = (_seperationVectorZ[idx] / mag[idx]);
}
steerBoid_kernal(_seperationVectorX, _seperationVectorZ, _velx, _velz, _seperationVectorX, _seperationVectorZ);
}
}
}
__device__ void cohesion_kernal(float * _cohesionVectorX, float * _cohesionVectorZ, float * _posx, float * _posz, float * _velx, float * _velz, int _numBoids)
{
int const noBoids = _numBoids;
__shared__ unsigned int numberOfNeighbours[NUM_BOIDS];
float mag[NUM_BOIDS];
// current boid whos neighbours were looking for
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
// neighbours of current boid
uint idy = blockIdx.y * blockDim.y + threadIdx.y;
if(idx < noBoids && idy < noBoids)
{
// reset values
numberOfNeighbours[idx] = 0;
_cohesionVectorX[idx] = 0;
_cohesionVectorZ[idx] = 0;
// wait for threads to sync
__syncthreads();
if(idx != idy)
{
if(distance_kernal(_posx[idx], _posz[idx], _posx[idy], _posz[idy]) < 0.2)
{
// add neighbours position to current boids part of the cohesion vector
atomicAdd(&(_cohesionVectorX[idx]), _posx[idy]);
atomicAdd(&(_cohesionVectorZ[idx]), _posz[idy]);
atomicAdd(&numberOfNeighbours[idx], 1);
}
}
}
// wait for threads to sync
__syncthreads();
//limit to 1D
if(idy == 0 && idx< noBoids)
{
//avoid dividing by zero
if(numberOfNeighbours[idx] > 0)
{
float tmpX = _cohesionVectorX[idx]/numberOfNeighbours[idx];
float tmpZ = _cohesionVectorZ[idx]/numberOfNeighbours[idx];
//find average position
_cohesionVectorX[idx] = tmpX;
_cohesionVectorZ[idx] = tmpZ;
_cohesionVectorX[idx] = ( _cohesionVectorX[idx] - _posx[idx]);
_cohesionVectorZ[idx] = ( _cohesionVectorZ[idx] - _posz[idx]);
mag[idx] = norm3d(_cohesionVectorX[idx], 0.0f, _cohesionVectorZ[idx]);
if(mag[idx] > 0)
{
_cohesionVectorX[idx] = (_cohesionVectorX[idx] / mag[idx]);
_cohesionVectorZ[idx] = (_cohesionVectorZ[idx] / mag[idx]);
}
steerBoid_kernal(_cohesionVectorX, _cohesionVectorZ, _velx, _velz, _cohesionVectorX, _cohesionVectorZ);
}
}
}
__global__ void flock_kernal(float * _cohesionVectorX, float * _cohesionVectorZ,float * _seperationVectorX, float * _seperationVectorZ, float * _alignmentVectorX, float * _alignmentVectorZ, float * _posx, float * _posz, float * _velx, float * _velz, int _numBoids)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
uint idy = blockIdx.y * blockDim.y + threadIdx.y;
float mag[NUM_BOIDS];
if( idx <_numBoids)
{
// calculate cohesion
cohesion_kernal(_cohesionVectorX, _cohesionVectorZ, _posx, _posz, _velx, _velz, _numBoids);
seperation_kernal(_seperationVectorX, _seperationVectorZ, _posx, _posz, _velx, _velz, _numBoids);
alignment_kernal(_alignmentVectorX, _alignmentVectorZ, _posx, _posz, _velx, _velz, _numBoids);
// wait for threads to sync (dont add cohesion vector until calculated)
__syncthreads();
if(idy == 0)
{
_velx[idx]+= _cohesionVectorX[idx] + _seperationVectorX[idx] + _alignmentVectorX[idx];
_velz[idx]+= _cohesionVectorZ[idx] + _seperationVectorZ[idx] + _alignmentVectorZ[idx];
}
}
}
void dumpGeo(uint _frameNumber, thrust::device_vector <float> _posX, thrust::device_vector <float> _posZ)
{
char fname[150];
std::sprintf(fname,"geo/flock_gpu.%03d.geo",++_frameNumber);
// we will use a stringstream as it may be more efficient
std::stringstream ss;
std::ofstream file;
file.open(fname);
if (!file.is_open())
{
std::cerr << "failed to Open file "<<fname<<'\n';
exit(EXIT_FAILURE);
}
// write header see here http://www.sidefx.com/docs/houdini15.0/io/formats/geo
ss << "PGEOMETRY V5\n";
ss << "NPoints " << NUM_BOIDS << " NPrims 1\n";
ss << "NPointGroups 0 NPrimGroups 1\n";
// this is hard coded but could be flexible we have 1 attrib which is Colour
ss << "NPointAttrib 1 NVertexAttrib 0 NPrimAttrib 2 NAttrib 0\n";
// now write out our point attrib this case Cd for diffuse colour
ss <<"PointAttrib \n";
// default the colour to white
ss <<"Cd 3 float 1 1 1\n";
// now we write out the particle data in the format
// x y z 1 (attrib so in this case colour)
for(unsigned int i=0; i<NUM_BOIDS; ++i)
{
ss<<_posX[i]<<" "<<0<<" "<<_posZ[i] << " 1 ";
ss<<"("<<std::abs(1)<<" "<<std::abs(1)<<" "<<std::abs(1)<<")\n";
}
// now write out the index values
ss<<"PrimitiveAttrib\n";
ss<<"generator 1 index 1 location1\n";
ss<<"dopobject 1 index 1 /obj/AutoDopNetwork:1\n";
ss<<"Part "<<NUM_BOIDS<<" ";
for(size_t i=0; i<NUM_BOIDS; ++i)
{
ss<<i<<" ";
}
ss<<" [0 0]\n";
ss<<"box_object1 unordered\n";
ss<<"1 1\n";
ss<<"beginExtra\n";
ss<<"endExtra\n";
// dump string stream to disk;
file<<ss.rdbuf();
file.close();
}
/**
* Fill an array with random floats using the CURAND function.
* \param devData The chunk of memory you want to fill with floats within the range (0,1]
* \param n The size of the chunk of data
* \author Richard Southern
*/
int randFloats(float *&devData, const size_t n) {
// The generator, used for random numbers
curandGenerator_t gen;
// Create pseudo-random number generator
CURAND_CALL(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
// Set seed to be the current time (note that calls close together will have same seed!)
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen, time(NULL)));
// Generate n floats on device
CURAND_CALL(curandGenerateUniform(gen, devData, n));
// Cleanup
CURAND_CALL(curandDestroyGenerator(gen));
return EXIT_SUCCESS;
}
int main()
{
// stores point pos
thrust::device_vector<float> m_dBoidsPosX;
thrust::device_vector<float> m_dBoidsPosZ;
// stores point pos
thrust::device_vector<float> m_dBoidsVelX;
thrust::device_vector<float> m_dBoidsVelZ;
// stores flocking vectors
thrust::device_vector<float> m_dCohesionX;
thrust::device_vector<float> m_dCohesionZ;
thrust::device_vector<float> m_dSeperationX;
thrust::device_vector<float> m_dSeperationZ;
thrust::device_vector<float> m_dAlignmentX;
thrust::device_vector<float> m_dAlignmentZ;
//thrust::device_vector<float> d_Pos(NUM_BOIDS*3);
// cant set size when constructing as member variable so resize here instead
m_dBoidsPosX.resize(NUM_BOIDS);
m_dBoidsPosZ.resize(NUM_BOIDS);
m_dBoidsVelX.resize(NUM_BOIDS);
m_dBoidsVelZ.resize(NUM_BOIDS);
m_dCohesionX.resize(NUM_BOIDS);
m_dCohesionZ.resize(NUM_BOIDS);
m_dSeperationX.resize(NUM_BOIDS);
m_dSeperationZ.resize(NUM_BOIDS);
m_dAlignmentX.resize(NUM_BOIDS);
m_dAlignmentZ.resize(NUM_BOIDS);
// fill vector with random values for pos
thrust::device_vector <float> tmp_PosPnts(NUM_BOIDS*4);
float * tmp_PosPnts_ptr = thrust::raw_pointer_cast(&tmp_PosPnts[0]);
randFloats(tmp_PosPnts_ptr, NUM_BOIDS*4);
// give random start positions
m_dBoidsPosX.assign(tmp_PosPnts.begin(), tmp_PosPnts.begin() + NUM_BOIDS);
m_dBoidsPosZ.assign(tmp_PosPnts.begin() + NUM_BOIDS, tmp_PosPnts.begin() + 2*NUM_BOIDS);
// give random start vel
m_dBoidsVelX.assign(tmp_PosPnts.begin() + 2*NUM_BOIDS, tmp_PosPnts.begin() + 3*NUM_BOIDS);
m_dBoidsVelZ.assign(tmp_PosPnts.begin() + 3*NUM_BOIDS, tmp_PosPnts.begin() + 4*NUM_BOIDS);
// create pointers pointing to the device vectors
float * m_dBoidsPosX_ptr= thrust::raw_pointer_cast(&m_dBoidsPosX[0]);
float * m_dBoidsPosZ_ptr= thrust::raw_pointer_cast(&m_dBoidsPosZ[0]);
float * m_dBoidsVelX_ptr= thrust::raw_pointer_cast(&m_dBoidsVelX[0]);
float * m_dBoidsVelZ_ptr= thrust::raw_pointer_cast(&m_dBoidsVelZ[0]);
float * m_dCohesionX_ptr= thrust::raw_pointer_cast(&m_dCohesionX[0]);
float * m_dCohesionZ_ptr= thrust::raw_pointer_cast(&m_dCohesionZ[0]);
float * m_dSeperationX_ptr= thrust::raw_pointer_cast(&m_dSeperationX[0]);
float * m_dSeperationZ_ptr= thrust::raw_pointer_cast(&m_dSeperationZ[0]);
float * m_dAlignmentX_ptr= thrust::raw_pointer_cast(&m_dAlignmentX[0]);
float * m_dAlignmentZ_ptr= thrust::raw_pointer_cast(&m_dAlignmentZ[0]);
//UPDATE-----------------------------------------------------------------------------
unsigned int nThreads = 1024;
unsigned int nBlocks = NUM_BOIDS/ nThreads + 1;
//thrust::device_vector<unsigned int> d_numNeighbourBoids(GRID_RESOLUTION*GRID_RESOLUTION, NULL_CELL);
//unsigned int * d_numNeighbourBoids_ptr = thrust::raw_pointer_cast(&d_numNeighbourBoids[0]);
// for nearest neighbour
unsigned int blockN = NUM_BOIDS / 32 + 1;
dim3 block2(32, 32); // block of (X,Y) threads
dim3 grid2(blockN, 1); // grid blockN * blockN blocks
for(int i = 0; i<NUM_FRAMES; i++)
{
// reset vectors
thrust::fill(m_dCohesionX.begin(), m_dCohesionX.begin() + NUM_BOIDS, 0);
thrust::fill(m_dCohesionZ.begin(), m_dCohesionZ.begin() + NUM_BOIDS, 0);
thrust::fill(m_dSeperationX.begin(), m_dSeperationX.begin() + NUM_BOIDS, 0);
thrust::fill(m_dSeperationZ.begin(), m_dSeperationZ.begin() + NUM_BOIDS, 0);
thrust::fill(m_dAlignmentX.begin(), m_dAlignmentX.begin() + NUM_BOIDS, 0);
thrust::fill(m_dAlignmentZ.begin(), m_dAlignmentZ.begin() + NUM_BOIDS, 0);
flock_kernal<<<grid2,block2>>>(m_dCohesionX_ptr, m_dCohesionZ_ptr, m_dSeperationX_ptr, m_dSeperationZ_ptr, m_dAlignmentX_ptr, m_dAlignmentZ_ptr, m_dBoidsPosX_ptr, m_dBoidsPosZ_ptr, m_dBoidsVelX_ptr, m_dBoidsVelZ_ptr, NUM_BOIDS);
cudaThreadSynchronize();
limitVel_kernal<<<nBlocks,nThreads>>>(0.02, m_dBoidsPosX_ptr, m_dBoidsPosZ_ptr, m_dBoidsVelX_ptr, m_dBoidsVelZ_ptr, NUM_BOIDS);
cudaThreadSynchronize();
avoidBoundaries_kernal<<<nBlocks,1024>>>(m_dBoidsPosX_ptr, m_dBoidsPosZ_ptr, m_dBoidsVelX_ptr, m_dBoidsVelZ_ptr, NUM_BOIDS);
cudaThreadSynchronize();
updatePos_kernal<<<nBlocks,1024>>>(m_dBoidsPosX_ptr, m_dBoidsPosZ_ptr, m_dBoidsVelX_ptr, m_dBoidsVelZ_ptr, NUM_BOIDS);
cudaThreadSynchronize();
dumpGeo(i, m_dBoidsPosX, m_dBoidsPosZ);
}
}
|
20,838 | #define t_max 1
#define t 1
/*
(T[0][0][0][1][0]=((((T[0][0][0][0][0]*((c[0][0][0][0][1]*T[0][0][0][0][0])+c[0][0][0][0][2]))+c[0][0][0][0][3])+((c[0][0][0][0][4]*T[-1][0][0][0][0])+(c[0][0][0][0][5]*T[1][0][0][0][0])))+(((c[0][0][0][0][6]*T[0][-1][0][0][0])+(c[0][0][0][0][7]*T[0][1][0][0][0]))+((c[0][0][0][0][8]*T[0][0][-1][0][0])+(c[0][0][0][0][9]*T[0][0][1][0][0])))))
*/
__global__ void hyperthermia(float * * T_0_1_out, float * T_0_0, float * T_0_1, float * c_1_0, float * c_2_0, float * c_3_0, float * c_4_0, float * c_5_0, float * c_6_0, float * c_7_0, float * c_8_0, float * c_9_0, int x_max, int y_max, int z_max, int tbx, int tby, int tbz, int c)
{
/*
const float * const u__c_1[16] = { c_1_0 } ;
const float * const u__c_2[16] = { c_2_0 } ;
const float * const u__c_3[16] = { c_3_0 } ;
const float * const u__c_4[16] = { c_4_0 } ;
const float * const u__c_5[16] = { c_5_0 } ;
const float * const u__c_6[16] = { c_6_0 } ;
const float * const u__c_7[16] = { c_7_0 } ;
const float * const u__c_8[16] = { c_8_0 } ;
const float * const u__c_9[16] = { c_9_0 } ;
float * const u__T_0[16] = { T_0_0, T_0_1 } ;
*/
int _idx0;
int _idx1;
int _idx2;
int _idx3;
int _idx4;
int _idx5;
int _idx6;
int _idx7;
int chunk_idx_x;
int chunk_idx_x_max;
int chunk_idx_y;
int chunk_idx_y_max;
int chunk_idx_z;
int chunk_idx_z_max;
int idx_1_2;
int size_1_1;
int size_1_2;
//int t;
int thd_idx_x;
int thd_idx_y;
int thd_idx_z;
int thdblks_idx_x;
int thdblks_idx_x_max;
int thdblks_idx_y;
int thdblks_idx_y_max;
int thdblks_idx_z;
int thdblks_idx_z_max;
int tmp;
/*
Initializations
*/
size_1_1=(y_max/blockDim.y);
size_1_2=(z_max/blockDim.z);
idx_1_2=(blockIdx.y/size_1_2);
tmp=(blockIdx.y-(idx_1_2*size_1_2));
chunk_idx_x=(c*(threadIdx.x+(blockDim.x*blockIdx.x)));
chunk_idx_x_max=(chunk_idx_x+c);
chunk_idx_y=(threadIdx.y+(tmp*blockDim.y));
chunk_idx_y_max=(chunk_idx_y+1);
chunk_idx_z=(threadIdx.z+(idx_1_2*blockDim.z));
chunk_idx_z_max=(chunk_idx_z+1);
thdblks_idx_x=(tbx*(threadIdx.x+(blockDim.x*blockIdx.x)));
thdblks_idx_x_max=(thdblks_idx_x+tbx);
thdblks_idx_y=(tby*(threadIdx.y+(tmp*blockDim.y)));
thdblks_idx_y_max=(thdblks_idx_y+tby);
thdblks_idx_z=(tbz*(threadIdx.z+(idx_1_2*blockDim.z)));
thdblks_idx_z_max=(thdblks_idx_z+tbz);
/*
Implementation
*/
/*
for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... }
*/
//for (t=1; t<=t_max; t+=1)
{
/* Index bounds calculations for iterators in thdblks[t=t, s=(tbx, tby, tbz)][0] */
/* Index bounds calculations for iterators in chunk[t=t, s=(c, 1, 1)][0] */
/*
for POINT thd[t=t, s=(1, 1, 1)][0] of size [1, 1, 1] in chunk[t=t, s=(:, :, :)][0] parallel 1 <level 2> schedule default { ... }
*/
{
/* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */
thd_idx_z=chunk_idx_z;
thd_idx_y=chunk_idx_y;
for (thd_idx_x=chunk_idx_x; thd_idx_x<(chunk_idx_x_max-0); thd_idx_x+=1)
{
/* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */
/*
u[t=(t+1), s=thd[t=?, s=?][0]][0]=stencil(u[t=t, s=thd[t=?, s=?][0]][0])
*/
/* _idx0 = ((((((((((((thd_idx_z+1)*x_max)+((2*t)*thd_idx_z))+(2*t))*y_max)+((((((2*t)*thd_idx_z)+thd_idx_y)+(2*t))+1)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(4*(t*t)))+(2*t))+1) */
_idx0=((((((((((((thd_idx_z+1)*x_max)+((2*t)*thd_idx_z))+(2*t))*y_max)+((((((2*t)*thd_idx_z)+thd_idx_y)+(2*t))+1)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(4*(t*t)))+(2*t))+1);
/* _idx1 = ((((thd_idx_z*x_max)*y_max)+(thd_idx_y*x_max))+thd_idx_x) */
_idx1=(((((((_idx0+(((( - x_max)-((2*t)*thd_idx_z))-(2*t))*y_max))+(((((-2*t)*thd_idx_z)-(2*t))-1)*x_max))-((4*(t*t))*thd_idx_z))-((2*t)*thd_idx_y))-(4*(t*t)))-(2*t))-1);
/* _idx2 = (((((((((((thd_idx_z+1)*x_max)+((2*t)*thd_idx_z))+(2*t))*y_max)+((((((2*t)*thd_idx_z)+thd_idx_y)+(2*t))+1)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(4*(t*t)))+(2*t)) */
_idx2=(_idx0-1);
/* _idx3 = ((((((((((((thd_idx_z+1)*x_max)+((2*t)*thd_idx_z))+(2*t))*y_max)+((((((2*t)*thd_idx_z)+thd_idx_y)+(2*t))+1)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(4*(t*t)))+(2*t))+2) */
_idx3=(_idx2+2);
/* _idx4 = (((((((((((thd_idx_z+1)*x_max)+((2*t)*thd_idx_z))+(2*t))*y_max)+(((((2*t)*thd_idx_z)+thd_idx_y)+(2*t))*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(4*(t*t)))+1) */
_idx4=((_idx0-x_max)-(2*t));
/* _idx5 = ((((((((((((thd_idx_z+1)*x_max)+((2*t)*thd_idx_z))+(2*t))*y_max)+((((((2*t)*thd_idx_z)+thd_idx_y)+(2*t))+2)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(4*(t*t)))+(4*t))+1) */
_idx5=((_idx0+x_max)+(2*t));
/* _idx6 = (((((((((thd_idx_z*x_max)+((2*t)*thd_idx_z))*y_max)+(((((2*t)*thd_idx_z)+thd_idx_y)+1)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(2*t))+1) */
_idx6=((((_idx2+((( - x_max)-(2*t))*y_max))-((2*t)*x_max))-(4*(t*t)))+1);
/* _idx7 = ((((((((((((thd_idx_z+2)*x_max)+((2*t)*thd_idx_z))+(4*t))*y_max)+((((((2*t)*thd_idx_z)+thd_idx_y)+(4*t))+1)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(8*(t*t)))+(2*t))+1) */
_idx7=(((_idx0+((x_max+(2*t))*y_max))+((2*t)*x_max))+(4*(t*t)));
// u__T_0[t][_idx0]=((((u__T_0[(t-1)][_idx0]*((u__c_1[(t-1)][_idx1]*u__T_0[(t-1)][_idx0])+u__c_2[(t-1)][_idx1]))+u__c_3[(t-1)][_idx1])+((u__c_4[(t-1)][_idx1]*u__T_0[(t-1)][_idx2])+(u__c_5[(t-1)][_idx1]*u__T_0[(t-1)][_idx3])))+(((u__c_6[(t-1)][_idx1]*u__T_0[(t-1)][_idx4])+(u__c_7[(t-1)][_idx1]*u__T_0[(t-1)][_idx5]))+((u__c_8[(t-1)][_idx1]*u__T_0[(t-1)][_idx6])+(u__c_9[(t-1)][_idx1]*u__T_0[(t-1)][_idx7]))));
T_0_1[_idx0]=((((T_0_0[_idx0]*((c_1_0[_idx1]*T_0_0[_idx0])+c_2_0[_idx1]))+c_3_0[_idx1])+((c_4_0[_idx1]*T_0_0[_idx2])+(c_5_0[_idx1]*T_0_0[_idx3])))+(((c_6_0[_idx1]*T_0_0[_idx4])+(c_7_0[_idx1]*T_0_0[_idx5]))+((c_8_0[_idx1]*T_0_0[_idx6])+(c_9_0[_idx1]*T_0_0[_idx7]))));
}
}
}
}
__global__ void initialize(float * T_0_0, float * T_0_1, float * c_1_0, float * c_2_0, float * c_3_0, float * c_4_0, float * c_5_0, float * c_6_0, float * c_7_0, float * c_8_0, float * c_9_0, int x_max, int y_max, int z_max, int tbx, int tby, int tbz, int c)
{
float * const u__c_1[16] = { c_1_0 } ;
float * const u__c_2[16] = { c_2_0 } ;
float * const u__c_3[16] = { c_3_0 } ;
float * const u__c_4[16] = { c_4_0 } ;
float * const u__c_5[16] = { c_5_0 } ;
float * const u__c_6[16] = { c_6_0 } ;
float * const u__c_7[16] = { c_7_0 } ;
float * const u__c_8[16] = { c_8_0 } ;
float * const u__c_9[16] = { c_9_0 } ;
float * const u__T_0[16] = { T_0_0, T_0_1 } ;
int _idx0;
int _idx1;
int _idx2;
int _idx3;
int _idx4;
int _idx5;
int _idx6;
int _idx7;
int chunk_idx_x;
int chunk_idx_x_max;
int chunk_idx_y;
int chunk_idx_y_max;
int chunk_idx_z;
int chunk_idx_z_max;
int idx_1_2;
int size_1_1;
int size_1_2;
//int t;
int thd_idx_x;
int thd_idx_y;
int thd_idx_z;
int thdblks_idx_x;
int thdblks_idx_x_max;
int thdblks_idx_y;
int thdblks_idx_y_max;
int thdblks_idx_z;
int thdblks_idx_z_max;
int tmp;
/*
Initializations
*/
size_1_1=(y_max/blockDim.y);
size_1_2=(z_max/blockDim.z);
idx_1_2=(blockIdx.y/size_1_2);
tmp=(blockIdx.y-(idx_1_2*size_1_2));
chunk_idx_x=(c*(threadIdx.x+(blockDim.x*blockIdx.x)));
chunk_idx_x_max=(chunk_idx_x+c);
chunk_idx_y=(threadIdx.y+(tmp*blockDim.y));
chunk_idx_y_max=(chunk_idx_y+1);
chunk_idx_z=(threadIdx.z+(idx_1_2*blockDim.z));
chunk_idx_z_max=(chunk_idx_z+1);
thdblks_idx_x=(tbx*(threadIdx.x+(blockDim.x*blockIdx.x)));
thdblks_idx_x_max=(thdblks_idx_x+tbx);
thdblks_idx_y=(tby*(threadIdx.y+(tmp*blockDim.y)));
thdblks_idx_y_max=(thdblks_idx_y+tby);
thdblks_idx_z=(tbz*(threadIdx.z+(idx_1_2*blockDim.z)));
thdblks_idx_z_max=(thdblks_idx_z+tbz);
/*
Implementation
*/
/*
for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... }
*/
//for (t=1; t<=t_max; t+=1)
{
/* Index bounds calculations for iterators in thdblks[t=t, s=(tbx, tby, tbz)][0] */
/* Index bounds calculations for iterators in chunk[t=t, s=(c, 1, 1)][0] */
/*
for POINT thd[t=t, s=(1, 1, 1)][0] of size [1, 1, 1] in chunk[t=t, s=(:, :, :)][0] parallel 1 <level 2> schedule default { ... }
*/
{
/* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */
thd_idx_z=chunk_idx_z;
thd_idx_y=chunk_idx_y;
for (thd_idx_x=chunk_idx_x; thd_idx_x<(chunk_idx_x_max-0); thd_idx_x+=1)
{
/* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */
/*
u[t=(t+1), s=thd[t=?, s=?][0]][0]=stencil(u[t=t, s=thd[t=?, s=?][0]][0])
*/
/* _idx0 = (((((((((((thd_idx_z+1)*x_max)+((2*t)*thd_idx_z))+(2*t))*y_max)+((((((2*t)*thd_idx_z)+thd_idx_y)+(2*t))+1)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(4*(t*t)))+(2*t)) */
_idx0=(((((((((((thd_idx_z+1)*x_max)+((2*t)*thd_idx_z))+(2*t))*y_max)+((((((2*t)*thd_idx_z)+thd_idx_y)+(2*t))+1)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(4*(t*t)))+(2*t));
u__T_0[(t-1)][_idx0]=0.1;
/* _idx1 = (((((((((((thd_idx_z+1)*x_max)+((2*t)*thd_idx_z))+(2*t))*y_max)+(((((2*t)*thd_idx_z)+thd_idx_y)+(2*t))*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(4*(t*t)))+1) */
_idx1=(((_idx0-x_max)-(2*t))+1);
u__T_0[(t-1)][_idx1]=0.1;
/* _idx2 = (((((((((thd_idx_z*x_max)+((2*t)*thd_idx_z))*y_max)+(((((2*t)*thd_idx_z)+thd_idx_y)+1)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(2*t))+1) */
_idx2=((((_idx0+((( - x_max)-(2*t))*y_max))-((2*t)*x_max))-(4*(t*t)))+1);
u__T_0[(t-1)][_idx2]=0.1;
/* _idx3 = ((((((((((((thd_idx_z+1)*x_max)+((2*t)*thd_idx_z))+(2*t))*y_max)+((((((2*t)*thd_idx_z)+thd_idx_y)+(2*t))+1)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(4*(t*t)))+(2*t))+1) */
_idx3=(_idx0+1);
u__T_0[(t-1)][_idx3]=0.1;
/* _idx4 = ((((thd_idx_z*x_max)*y_max)+(thd_idx_y*x_max))+thd_idx_x) */
_idx4=((((((_idx2-(((2*t)*thd_idx_z)*y_max))+((((-2*t)*thd_idx_z)-1)*x_max))-((4*(t*t))*thd_idx_z))-((2*t)*thd_idx_y))-(2*t))-1);
u__c_1[(t-1)][_idx4]=0.2;
u__c_2[(t-1)][_idx4]=0.30000000000000004;
u__c_3[(t-1)][_idx4]=0.4;
u__c_4[(t-1)][_idx4]=0.5;
u__c_5[(t-1)][_idx4]=0.6000000000000001;
u__c_6[(t-1)][_idx4]=0.7000000000000001;
u__c_7[(t-1)][_idx4]=0.8;
u__c_8[(t-1)][_idx4]=0.9;
u__c_9[(t-1)][_idx4]=1.0;
/* _idx5 = ((((((((((((thd_idx_z+2)*x_max)+((2*t)*thd_idx_z))+(4*t))*y_max)+((((((2*t)*thd_idx_z)+thd_idx_y)+(4*t))+1)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(8*(t*t)))+(2*t))+1) */
_idx5=(((_idx3+((x_max+(2*t))*y_max))+((2*t)*x_max))+(4*(t*t)));
u__T_0[(t-1)][_idx5]=0.1;
/* _idx6 = ((((((((((((thd_idx_z+1)*x_max)+((2*t)*thd_idx_z))+(2*t))*y_max)+((((((2*t)*thd_idx_z)+thd_idx_y)+(2*t))+2)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(4*(t*t)))+(4*t))+1) */
_idx6=((_idx3+x_max)+(2*t));
u__T_0[(t-1)][_idx6]=0.1;
/* _idx7 = ((((((((((((thd_idx_z+1)*x_max)+((2*t)*thd_idx_z))+(2*t))*y_max)+((((((2*t)*thd_idx_z)+thd_idx_y)+(2*t))+1)*x_max))+((4*(t*t))*thd_idx_z))+((2*t)*thd_idx_y))+thd_idx_x)+(4*(t*t)))+(2*t))+2) */
_idx7=(_idx0+2);
u__T_0[(t-1)][_idx7]=0.1;
u__T_0[t][_idx3]=1.1;
}
}
}
}
|
20,839 | /*
* main.cu
*
* Created on: Nov 14, 2019
* Author: cuda-s01
*/
#include <stdio.h>
#include <time.h>
const int TILE_WIDTH = 2;
const int MATRIX_SIZE = 800;
__global__ void matrixMultiplicationKernel(float* M, float* N, float* P, int Width) {
// Calculate the row index of the P element and M
int Row = blockIdx.y*blockDim.y+threadIdx.y;
// Calculate the column index of P and N
int Col = blockIdx.x*blockDim.x+threadIdx.x;
__shared__ float sum_M[TILE_WIDTH][TILE_WIDTH];
__shared__ float sum_N[TILE_WIDTH][TILE_WIDTH];
sum_M[threadIdx.y][threadIdx.x]=0.0;
sum_N[threadIdx.y][threadIdx.x]=0.0;
float Pval = 0;
for(int k=0; k<((Width - 1)/TILE_WIDTH + 1); k++)
{
//printf("Col:%d, Row:%d, k:%d, th:(%d,%d), ");
if(k*TILE_WIDTH + threadIdx.x < Width && Row < Width)
sum_M[threadIdx.y][threadIdx.x] = M[Row*Width + k*TILE_WIDTH + threadIdx.x];
else sum_M[threadIdx.y][threadIdx.x] = 0.0;
if(k*TILE_WIDTH + threadIdx.y < Width && Col < Width)
sum_N[threadIdx.y][threadIdx.x] = N[(k*TILE_WIDTH + threadIdx.y)*Width + Col];
else sum_N[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for(int n=0; n<TILE_WIDTH;++n)
Pval += sum_M[threadIdx.y][n] * sum_N[n][threadIdx.x];
__syncthreads();
}
if(Row < Width && Col < Width)
{
P[Row * Width + Col] = Pval;
//printf("(%d,%d)=%f\n",Row,Col,P[Row*Width+Col]);
}
}
void multiply(float* M, float* N, float* P, int size) {
float X[size][size], Y[size][size], Z[size][size];
int i, j;
// Rewriting matrices
for (i=0;i<size;i++) {
for (j=0;j<size;j++) {
X[i][j]=M[size*i+j];
Y[i][j]=N[size*i+j];
Z[i][j]=0;
}
}
// Multiplying first and second matrices and storing in Z.
for (i = 0; i < size; ++i) {
for (j = 0; j < size; ++j) {
for (int k = 0; k < size; ++k) {
Z[i][j] += X[i][k] * Y[k][j];
}
}
}
for (i=0;i<size;i++) {
for (j=0;j<size;j++) {
P[size*i+j]=Z[i][j];
}
}
}
void matrixMultiplication(float *M, float *N, float *P, int Width){
// declare the number of blocks per grid and the number of threads per block
int th = TILE_WIDTH;
int bl = (Width/TILE_WIDTH) + 1;
dim3 threadsPerBlock(th,th,1);
dim3 blocksPerGrid(bl,bl,1);
printf("Kernel started: (%d,%d,1) grid, (%d,%d,1) blocks.\n", bl,bl, th,th);
matrixMultiplicationKernel<<<blocksPerGrid,threadsPerBlock>>>(M, N, P, Width);
}
void PrintMatrix(float* M, int Width)
{
for(int i = 0; i < Width; i++)
{
for(int j = 0; j < Width; j++)
printf("%f ",M[i*Width+j]);
printf("\n");
}
printf("\n");
}
int main(void)
{
printf("Starting the program:\n");
cudaError_t err = cudaSuccess;
int matrix_size = MATRIX_SIZE;
int num_of_elements = matrix_size * matrix_size;
size_t size = num_of_elements * sizeof(float);
FILE *fp;
fp=fopen("log.txt", "a");
fprintf(fp, "Multiplicating matrix %d x %d.\n", matrix_size, matrix_size);
//==========================Shared Memory============================================
//allocate matrixes on the device:
printf("Allocating matrices on the device...\n");
// printf("First matrix.\n");
float *M;
err = cudaMallocManaged((void**)&M, size);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate M matrix!\n");
exit(EXIT_FAILURE);
} else printf("M Allocation successful.\n");
// printf("Second matrix.\n");
float *N;
err = cudaMallocManaged((void**)&N, size);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate N matrix!\n");
exit(EXIT_FAILURE);
} else printf("N Allocation successful.\n");
// printf("Third matrix.\n");
float *P;
err = cudaMallocManaged((void**)&P, size);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate P matrix!\n");
exit(EXIT_FAILURE);
} else printf("P Allocation successful.\n");
float *R;
err = cudaMallocManaged((void**)&R, size);
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate R matrix!\n");
exit(EXIT_FAILURE);
} else printf("R Allocation successful.\n");
//initialisation:
for(int i=0; i<num_of_elements; i++)
{
M[i] = rand()/(float)RAND_MAX;
N[i] = rand()/(float)RAND_MAX;
}
printf("Initialisation finished.\n");
//calculations:
clock_t start=clock();
matrixMultiplication(M, N, P, matrix_size);
clock_t end=clock();
double time_elapsed=((double) (end - start)) / CLOCKS_PER_SEC;
err = cudaGetLastError();
if(err != cudaSuccess)
{
fprintf(stderr, "Failed to launch kernel. Error: %s.\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
} else fprintf(fp, "Kernel operations successful. Time elapsed: %lf s.\n", time_elapsed);
//==========================TEST===============================================
PrintMatrix(M, matrix_size);
PrintMatrix(N, matrix_size);
PrintMatrix(P, matrix_size);
for(int i = 0; i < matrix_size; i++)
{
for(int j = 0; j < matrix_size; j++)
{
float tmp = 0;
for(int k = 0; k < matrix_size; k++)
tmp += M[i*matrix_size + k] * N[k*matrix_size + j];
//debug line:
//printf("%f ",tmp);
if(fabs(tmp - P[i*matrix_size + j]) > 1e-3)
{
fprintf(stderr, "Verification test failed!\nElement at index (%d, %d) should be %f, but is %f. \n",
i,j,tmp,P[i*matrix_size + j]);
exit(EXIT_FAILURE);
}
}
}
fprintf(fp, "Verification test PASSED, multi-threaded calculations are correct.\n");
//======================== Single-threaded calculations =====================
start=clock();
multiply(M, N, R, MATRIX_SIZE);
end=clock();
time_elapsed=end-start;
//============================ SINGLE-THREADED TEST ==========================
int isCorrect=1;
for(int i=0;i<matrix_size;i++) {
for (int j=0;j<matrix_size;j++) {
if(fabs(P[i*matrix_size + j]-R[i*matrix_size+j]) > 1e-3) {
isCorrect=0; }
}
}
printf("%d", isCorrect);
if (isCorrect==1) {
fprintf(fp, "Comparision test PASSED, single-threaded calculations are correct.\n");
} else {
fprintf(fp, "Comparision test failed, matrices are not identical");
}
fprintf(fp, "Time elapsed for single-threaded calculations: %lf s.\n\n", time_elapsed/CLOCKS_PER_SEC);
fclose(fp);
PrintMatrix(R, matrix_size);
printf("Freeing memory...\n");
// Free device global memory
err = cudaFree(M);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device matrix M (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(N);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device matrix N (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(P);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device matrix P (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Memory freed successfully.\n");
return 0;
}
|
20,840 | extern "C" {
__global__ void fill_u8(unsigned char *y, unsigned char elem, unsigned int len) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < len) {
y[tid] = elem;
}
}
__global__ void fill_u32(unsigned int *y, unsigned int elem, unsigned int len) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < len) {
y[tid] = elem;
}
}
__global__ void u8_to_f32(const unsigned char* x, float* y, unsigned int len) {
const float scale = 1.0f / 255.0f;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < len) {
y[tid] = scale * x[tid];
}
}
__global__ void u8_to_one_hot_f32(const unsigned char* x, unsigned int nclasses, float* y, unsigned int len) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < len) {
y[tid*nclasses+x[tid]] = 1.0f;
}
}
__global__ void broadcast(const float* x, float* y, unsigned int c, unsigned int len) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < len) {
y[tid] = x[tid % c];
}
}
__global__ void broadcast_backward(float* dx, const float* dy, unsigned int c, unsigned int len) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < len) {
atomicAdd(&dx[tid % c], dy[tid]);
}
}
__global__ void add(const float* x1, const float* x2, float* y, unsigned int len) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < len) {
y[tid] = x1[tid] + x2[tid];
}
}
__global__ void cross_entropy_forward(unsigned int batch_size, unsigned int nclasses, const float* x, const float* t, float* y) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < batch_size) {
// compute max value of slice
float m = x[tid*nclasses];
for(int i = 1; i < nclasses; ++i) {
m = fmaxf(x[tid*nclasses+i], m);
}
// subtract max
for(int i = 0; i < nclasses; ++i) {
y[tid*nclasses+i] = x[tid*nclasses+i]-m;
}
// sum
float s = 0.0f;
for(int i = 0; i < nclasses; ++i) {
s += expf(y[tid*nclasses+i]);
}
// compute ln(s)
float ln_s = logf(s);
// y = (ln_s - y) * t
for(int i = 0; i < nclasses; ++i) {
y[tid*nclasses+i] = (ln_s - y[tid*nclasses+i]) * t[tid*nclasses+i];
}
}
}
__global__ void cross_entropy_backward(const float* x, float* dx, const float* t, float* dy, unsigned int len) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < len) {
dx[tid] = dy[0] * (x[tid] - t[tid]);
}
}
__global__ void reduce_sum_partial(const float* input, float* output, unsigned int len) {
// from http://www.techdarting.com/2014/06/parallel-reduction-in-cuda.html
// Load a segment of the input vector into shared memory
__shared__ float partialSum[2*256];
int globalThreadId = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int t = threadIdx.x;
unsigned int start = 2*blockIdx.x*blockDim.x;
if ((start + t) < len)
{
partialSum[t] = input[start + t];
}
else
{
partialSum[t] = 0.0;
}
if ((start + blockDim.x + t) < len)
{
partialSum[blockDim.x + t] = input[start + blockDim.x + t];
}
else
{
partialSum[blockDim.x + t] = 0.0;
}
// Traverse reduction tree
for (unsigned int stride = blockDim.x; stride > 0; stride /= 2)
{
__syncthreads();
if (t < stride)
partialSum[t] += partialSum[t + stride];
}
__syncthreads();
// Write the computed sum of the block to the output vector at correct index
if (t == 0 && (globalThreadId*2) < len)
{
output[blockIdx.x] = partialSum[t];
}
}
__global__ void reduce_sum_final(const float* x, float* y, unsigned int len) {
*y = 0;
for(int i = 0; i < len; ++i) {
*y += x[i];
}
}
__global__ void reverse_conv_filter(const float* x, float beta, float* y, unsigned int filter_len, unsigned int len) {
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid < len) {
if (beta == 0.0f) {
for(int i = 0; i < filter_len; ++i) {
y[tid*filter_len + i] = x[tid*filter_len + ((filter_len-1) - i)];
}
}
else {
for(int i = 0; i < filter_len; ++i) {
y[tid*filter_len + i] = x[tid*filter_len + ((filter_len-1) - i)] + beta * y[tid*filter_len + i];
}
}
}
}
__global__ void sgd_with_momentum(float* w, const float* dw, float learning_rate, float momentum, float* v, unsigned int len) {
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid < len) {
v[tid] = momentum * v[tid] + dw[tid];
w[tid] -= learning_rate * v[tid];
}
}
}
|
20,841 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define N (1024 * 64)
__global__ void add(int* a, int* b, int* c)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N)
{
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x;
}
}
int main()
{
int a[N];
int b[N];
int c[N];
int* dev_a;
int* dev_b;
int* dev_c;
// Allocate memory for the GPU arrays
cudaMalloc(&dev_a, N * sizeof(int));
cudaMalloc(&dev_b, N * sizeof(int));
cudaMalloc(&dev_c, N * sizeof(int));
// Fill a and b with some "random" numbers
for (int i = 0; i < N; i++)
{
a[i] = i;
b[i] = i * i;
}
// Copy a and b to the gpu
cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
// Perform the addition
add<<<128, 128>>>(dev_a, dev_b, dev_c);
// Copy back the result to host
cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost);
// Verify calculation
bool success = true;
for (int i = 0; i < N; i++)
{
if ((a[i] + b[i]) != c[i])
{
printf("Error at index: %d, %d + %d != %d\n", i, a[i], b[i], c[i]);
success = false;
}
}
if (success)
{
printf("Vector addition successful!\n");
}
//free memory
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
} |
20,842 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/random.h>
int my_rand() {
static thrust::default_random_engine rng;
static thrust::uniform_int_distribution<int> dist(0, 9999);
return dist(rng);
}
int main() {
// Generate random data on host
thrust::host_vector<int> h_vec(100);
thrust::generate(h_vec.begin(), h_vec.end(), my_rand);
thrust::device_vector<int> d_vec = h_vec; // Transfer to device
int init = 0; // Initial value of reduction
thrust::plus<int> binary_op; // Binary operation used to reduce values
int sum = thrust::reduce(d_vec.begin(), d_vec.end(), init, binary_op); // Compute sum on device
std::cout << "Sum is " << sum << std::endl; // Print sum
return 0;
}
|
20,843 | #include <cstdio>
#include <cmath>
#define OCCUPIED(board, field) ((board) & (1L<<(field)))
#define ON_BOARD(field) (0 <= (field) && (field) < 64)
#define EVALUATE(p1, p2) ((builtin_popcount(p1))-(builtin_popcount(p2)))
extern "C" {
const int INF = 128;
const int BOARD_SIZE = 8;
const int WARP_SIZE = 32;
const int MAX_DEPTH = 1;
const int STACK_SIZE = MAX_DEPTH * BOARD_SIZE * BOARD_SIZE;
__device__ void print(long long int X) {
printf(">>%lld\n", X);
for (int i = 0; i < BOARD_SIZE; ++i) {
for (int j = 0; j < BOARD_SIZE; ++j) {
if (X & (1L << (i * BOARD_SIZE + j))) printf("1");
else printf("0");
}
printf("\n");
}
}
__device__ int builtin_popcount(long long int x) {
int ret = 0;
for(int i = 0; i < 64; ++i) {
if(x&(1L<<i)) ++ret;
}
return ret;
}
/* args: Boards[N][2], N-count of boards, PossibleMoves[N][64], Results[N], player_token-'O' or 'X' */
__global__ void generate_moves(long long *Boards, int N, long long *PossibleMoves, int *Results) {
int X[8] = {-1, -1, 0, 1, 1, 1, 0, -1};
int Y[8] = {0, -BOARD_SIZE, -BOARD_SIZE, -BOARD_SIZE, 0, BOARD_SIZE, BOARD_SIZE, BOARD_SIZE};
// 28KB
__shared__ long long int S[4][STACK_SIZE][2];
__shared__ int Result[4][STACK_SIZE];
__shared__ int Parent[4][STACK_SIZE];
__shared__ int Depth[4][STACK_SIZE];
__shared__ int Size[4];
/* INDICES */
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int board_id = thread_id / WARP_SIZE;
if ( board_id >= N ) return;
int field_id = (int) threadIdx.x % WARP_SIZE;
int idx_board = board_id % 4;
/* COPY INPUT BOARD */
if (field_id == 0) {
S[idx_board][0][0] = Boards[board_id];
S[idx_board][0][1] = Boards[N + board_id];
Depth[idx_board][0] = MAX_DEPTH;
Parent[idx_board][0] = -1;
Result[idx_board][0] = -INF;
Size[idx_board] = 1;
}
__syncthreads();
/* DFS */
while (Size[idx_board] != 0) {
int end = Size[idx_board] - 1;
long long player_pawns = S[idx_board][end][0];
long long opponent_pawns = S[idx_board][end][1];
int parent = Parent[idx_board][end];
int depth = Depth[idx_board][end];
bool pop_vertex = false;
if (depth == 0 && field_id == 0) Result[idx_board][end] = EVALUATE(player_pawns, opponent_pawns);
__syncthreads();
// terminal
if (field_id == 0 && Result[idx_board][end] != -INF) {
if(parent != -1) Result[idx_board][parent] = max(Result[idx_board][parent], -Result[idx_board][end]);
Size[idx_board] -= 1;
pop_vertex = true;
}
__syncthreads();
// visit current node
if (field_id == 0 && !pop_vertex) Result[idx_board][end] = EVALUATE(player_pawns, opponent_pawns);
// thread #idInBoard processes fields idInBoard and idInBoard + 32
if(!pop_vertex)
for (int k = 0; k < 2; ++k) {
int field = WARP_SIZE * k + field_id;
// Move cannot be applied if the field is occupied
if (!OCCUPIED(player_pawns, field) && !OCCUPIED(opponent_pawns, field)) { //check whether field is free
bool flag = false;
long long tmp = 0;
// Try all 8 directions
for (int i = 0; i < 8; ++i) {
// Direction of the move
int shift = X[i] + Y[i];
int opponents_field = field + shift;
int j = 0;
// Continue as long as fields in the row are occupied by the opponent
while (ON_BOARD(opponents_field) && OCCUPIED(opponent_pawns, opponents_field)) {
j++;
opponents_field += shift;
}
// If move is possible, Reversi!
if (ON_BOARD(opponents_field) && OCCUPIED(player_pawns, opponents_field) && j > 1) {
// Reversi!
while (opponents_field != field) {
tmp |= 1L << opponents_field; //all gained fields
opponents_field -= shift;
}
flag = true;
}
}
// Place the new pawn
tmp |= 1L << field;
// Avoid if
tmp *= flag;
PossibleMoves[board_id * 64 + field] = tmp; //save gained fields
}
}
__syncthreads();
//zero-thread in board pushes possible moves onto stack
if (field_id == 0 && !pop_vertex) {
for (int i = 0; i < BOARD_SIZE; ++i) {
for (int j = 0; j < BOARD_SIZE; ++j) {
long long move = PossibleMoves[board_id * 64 + i * BOARD_SIZE + j];
if (move) {
int top = Size[idx_board];
Size[idx_board] = top + 1;
S[idx_board][top][1] = player_pawns | move; //old fields + gained
S[idx_board][top][0] = opponent_pawns ^ move; //old fields xor gained by opponent player
Parent[idx_board][top] = end;
Result[idx_board][top] = -INF;
Depth[idx_board][top] = depth - 1;
}
}
}
}
__syncthreads();
}
Results[board_id] = Result[idx_board][0];
}
}
|
20,844 | //xfail:ASSERTION_ERROR
//--blockDim=1024 --gridDim=1
__global__ void foo(int *H) {
size_t tmp = (size_t)H;
tmp += sizeof(int);
int *G = (int *)tmp;
G -= 1;
G[threadIdx.x] = threadIdx.x;
}
|
20,845 | /*
from http://http.developer.nvidia.com/GPUGems3/gpugems3_ch37.html
*/
/*
* Random nubmers on the GPU
*
*
* float RandUniform(unsigned *seeds, unsigned stride); // float, [0.0 1.0)
* unsigned RandUniformui(unsigned *seeds, unsigned stride); // unsigned, [0, RAND_MAX]
* float RandNormal(unsigned *seeds, unsigned stride); // float, gaussian mean = 0 std = 1
*
* seeds must point to 4 unsigned values with the specified stride
*/
__device__ __host__ inline unsigned TausStep(unsigned &z, int S1, int S2, int S3, unsigned M)
{
unsigned b = (((z << S1) ^ z) >> S2);
return z = (((z & M) << S3) ^ b);
}
__device__ __host__ inline unsigned LCGStep(unsigned &z, unsigned A, unsigned C)
{
return z = (A*z + C);
}
/* generate a random number, uses an array of 4 unsigned ints */
__device__ __host__ inline float HybridTaus(unsigned *z, unsigned stride)
{
return 2.3283064365387e-10 * (float)(
TausStep(z[0], 13, 19, 12, 4294967294UL) ^
TausStep(z[stride], 2, 25, 4, 4294967288UL) ^
TausStep(z[2*stride], 3, 11, 17, 4294967280UL) ^
LCGStep(z[3*stride], 16654525, 1013904223UL)
);
}
__device__ __host__ inline unsigned HybridTausui(unsigned *z, unsigned stride)
{
return (
TausStep(z[0], 13, 19, 12, 4294967294UL) ^
TausStep(z[stride], 2, 25, 4, 4294967288UL) ^
TausStep(z[2*stride], 3, 11, 17, 4294967280UL) ^
LCGStep(z[3*stride], 16654525, 1013904223UL)
);
}
/*
Take two random [0,1) floats and produce 2 independent, gaussians with mean=0, var=1
*/
#define PI 3.14159265358979f
//__device__ inline void BoxMuller(float& u1, float& u2){
// float r = sqrtf(-2.0f * logf(u1));
// float phi = 2 * PI * u2;
// u1 = r * __cosf(phi);
// u2 = r * __sinf(phi);
//}
__device__ __host__ inline void BoxMuller(float& u1, float& u2){
float r = sqrtf(-2.0f * logf(u1));
float phi = 2 * PI * u2;
u1 = r * cosf(phi);
u2 = r * sinf(phi);
}
__device__ __host__ inline float RandUniform(unsigned *z, unsigned stride)
{
return HybridTaus(z, stride);
}
__device__ __host__ inline unsigned RandUniformui(unsigned *z, unsigned stride)
{
return HybridTausui(z, stride);
}
__device__ __host__ inline float RandNorm(unsigned *z, unsigned stride)
{
float u1 = RandUniform(z, stride);
float u2 = RandUniform(z, stride);
float r = sqrtf(-2.0f * logf(u1));
float phi = 2 * PI * u2;
return r * cosf(phi);
// return 0.123f;
}
|
20,846 | //-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
#include <stdio.h>
#include <time.h>
#include <cuda.h>
const int MIN_SIZE=1280;
const int MAX_SIZE=10000;
const int STEP_SIZE=256;
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
float* a;
float* b;
float* c;
float* a1;
float* b1;
float* c1;
float* a2;
float* b2;
float* c2;
float* c3;
float* c4;
int n;
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
__global__ void kernelFunc(float* ad, float* bd, float* cd, int n) {
__shared__ float as[32][32];
__shared__ float bs[32][32];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = (blockIdx.x * blockDim.x) + tx;
int y = (blockIdx.y * blockDim.y) + ty;
if ((x<n)&(y<n) )
{
float v = 0.0f;
int yn = y * n;
int s = n / 32;
for(int m=0; m<s; m++) {
int m32 = m * 32;
as[ty][tx] = ad[yn + (m32 + tx)];
bs[ty][tx] = bd[(m32 + ty) * n + x];
__syncthreads();
for(int i=0; i<32; i++) {
v += as[ty][i] * bs[i][tx];
}
__syncthreads();
}
cd[yn + x] = v;
}
}
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
void matrixMultiply(float* ad,float* bd,float* cd,float *H2DBandWidthInMBs,float *D2HBandWidthInMbs) {
cudaMalloc((void**)&ad, n * n * sizeof(float));
cudaMalloc((void**)&bd, n * n * sizeof(float));
cudaMalloc((void**)&cd, n * n * sizeof(float));
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(ad, a, n * n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(bd, b, n * n * sizeof(float), cudaMemcpyHostToDevice);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
*H2DBandWidthInMBs = (1e3f * 2*n*n* sizeof(float)) /
(time* (float)(1 << 20));
//printf("HostToDevice bandwidthInMBs %f\n",H2DBandWidthInMBs );
//printf("cudaMemcpyHostToDevice(ms) %f\n", 1000*(tc1 - tc0) / (float) CLOCKS_PER_SEC);
dim3 block(32, 32);
dim3 grid((n+31)/32, (n+31)/32);
kernelFunc<<<grid, block>>>(ad, bd, cd, n);
float time1;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
cudaMemcpy(c, cd, n * n * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time1, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
*D2HBandWidthInMbs = (1e3f * n*n* sizeof(float)) /
(time1* (float)(1 << 20));
//printf("DeviceToHostbandwidthInMBs %f\n",D2HBandWidthInMbs );
//printf("cudaMemcpyDeviceToHost(ms) %f\n", 1000*(tg1 - tg0) / (float) CLOCKS_PER_SEC);
cudaFree(ad);
cudaFree(bd);
cudaFree(cd);
}
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
void fill(float* data, int size) {
for (int i=0; i<size; ++i)
data[i] = rand() / (float) RAND_MAX;
}
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
void save(char* name, int n, float H2DBandWidthInMBs,float D2HBandWidthInMbs) {
char fname[128];
FILE* f;
/*
sprintf(fname, "results/result-mm-%s_%d.txt", name, n);
f = fopen(fname, "w");
for (int i=0; i<n * n; ++i)
fprintf(f, "%f\n", c[i]);
fclose(f);
*/
sprintf(fname, "runtime-mm-%s.txt", name);
f = fopen(fname, "a");
fprintf(f, "size of matrix%d : H2DBandWidthInMBs %f D2HBandWidthInMbs %f \n",
n, H2DBandWidthInMBs,D2HBandWidthInMbs);
fclose(f);
}
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
int main(int argc, char** argv) {
remove("runtime-mm-gpu-optimized.txt");
int devicenum,dev;
struct cudaDeviceProp p;
cudaGetDeviceCount(&devicenum);
for (dev=0;dev<devicenum;dev++){
cudaGetDeviceProperties(&p, dev);
printf("\nDevice %d: \%s\"\n",dev,p.name);
}
cudaGetDeviceProperties(&p, 0);
printf("maxGridSize: %d %d %d\n", p.maxGridSize[0], p.maxGridSize[1], p.maxGridSize[2]);
printf("maxThreadsDim: %d %d %d\n", p.maxThreadsDim[0], p.maxThreadsDim[1], p.maxThreadsDim[2]);
printf("maxThreadsPerBlock: %d\n", p.maxThreadsPerBlock);
printf("warpSize: %d\n", p.warpSize);
printf("totalGlobalMem(MB): %d\n", p.totalGlobalMem/1024/1024);
printf("sharedMemPerBlock: %d\n", p.sharedMemPerBlock);
float H2DBandWidthInMBs,D2HBandWidthInMbs;
for(n=MIN_SIZE; n<=MAX_SIZE; n+=STEP_SIZE) {
a = (float*)malloc(n * n * sizeof(float));
b = (float*)malloc(n * n * sizeof(float));
c = (float*)malloc(n * n * sizeof(float));
if(n*n*sizeof(float)*8<=p.totalGlobalMem){
srand(0);
fill(a, n * n);
fill(b, n * n);
printf("gpu N*N=%d\n",n);
//clock_t t0 = clock();
matrixMultiply(a,b,c,&H2DBandWidthInMBs,&D2HBandWidthInMbs);
//clock_t t1 = clock();
//printf("gpu time(ms)%f\n",(1000*(t1-t0)/(float) CLOCKS_PER_SEC));
save("gpu-optimized", n, H2DBandWidthInMBs,D2HBandWidthInMbs);
}
if (n*n*sizeof(float)*8>p.totalGlobalMem){
a1 = (float*)malloc(n/2 * n * sizeof(float));a2 = (float*)malloc(n/2 * n * sizeof(float));
b1 = (float*)malloc(n* n/2 * sizeof(float));b2 = (float*)malloc(n * n/2 * sizeof(float));
c1 = (float*)malloc(n/2 * n/2 * sizeof(float));c2 = (float*)malloc(n/2 * n/2 * sizeof(float));
c3 = (float*)malloc(n/2 * n/2 * sizeof(float));c4 = (float*)malloc(n/2 * n/2 * sizeof(float));
if (n/2*n*sizeof(float)*8> (p.totalGlobalMem-50*1024*1024))
break;
srand(0);
fill(a1, n/2 * n);fill(a2, n/2 * n);
fill(b1, n * n/2);fill(b2, n * n/2);
printf("gpu N*N %d\n",n);
//clock_t Mt0 = clock();
matrixMultiply(a1,b1,c1,&H2DBandWidthInMBs,&D2HBandWidthInMbs);
save("gpu-optimized", n, H2DBandWidthInMBs,D2HBandWidthInMbs);
matrixMultiply(a1,b2,c2,&H2DBandWidthInMBs,&D2HBandWidthInMbs);
save("gpu-optimized", n, H2DBandWidthInMBs,D2HBandWidthInMbs);
matrixMultiply(a2,b1,c3,&H2DBandWidthInMBs,&D2HBandWidthInMbs);
save("gpu-optimized", n, H2DBandWidthInMBs,D2HBandWidthInMbs);
matrixMultiply(a2,b2,c4,&H2DBandWidthInMBs,&D2HBandWidthInMbs);
save("gpu-optimized", n, H2DBandWidthInMBs,D2HBandWidthInMbs);
//clock_t Mt1 = clock();
//printf("gpu Matrix time(ms)%f\n",(1000*(Mt1-Mt0)/(float) CLOCKS_PER_SEC));
}
free(a);free(a2);free(a1);
free(b);free(b2);free(b1);
free(c);free(c2);free(c1);
free(c4);free(c3);
}
cudaThreadExit();
return 0;
}
|
20,847 | /*
* Uloha pro cviceni 3 - CUDA - B4M39GPU (zima 2020/2021):
*
* Napiste kernel, ktery otoci pole celych cisel:
*
* a) pro pripad kdy je vstupni pole i vystupni pole ulozeno v globalni pameti
* -> kernel reverseArrayI(int *devIn, int *devOut)
* pouzijte pouze jednorozmernou mrizku
*
* b) to same jako a), ale nyni pouzijte dvourozmernou mrizku
* -> kernel reverseArrayII(int *devIn, int *devOut)
*
* c) kazdy blok otoci svuj kus vstupniho pole ve sdilene pameti a vysledek zapise do globalni pameti sekvencne
* -> reverseArraySM(int *devIn, int *devOut)
*
* based on the code published in Dr Dobb's Online Journal:
* - by Rob Farber , May 13, 2008
* - CUDA, Supercomputing for the Masses: Part 3
* - http://drdobbs.com/high-performance-computing/207603131
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
// Simple function to check for CUDA runtime errors.
static void handleCUDAError(
cudaError_t error, // error code
const char *file, // file within error was generated
int line ) // line where error occurs
{
if (error != cudaSuccess) { // any error -> display error message and terminate application
printf( "%s in %s at line %d\n", cudaGetErrorString( error ), file, line );
exit( EXIT_FAILURE );
}
}
#define CHECK_ERROR( error ) ( handleCUDAError( error, __FILE__, __LINE__ ) )
// Kernel to reverse array directly in the global memory (1D grid).
__global__ void reverseArrayI(int *devIn, int *devOut) {
// number of elements in preceding blocks in input array
int inOffset = blockDim.x * blockIdx.x;
// number of elements in preceding blocks in output array
int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x);
// element index in input array
int inIndex = inOffset + threadIdx.x;
// element index in output array
int outIndex = outOffset + (blockDim.x - 1 - threadIdx.x);
devOut[outIndex] = devIn[inIndex]; // Non-coalesced write
}
// Kernel to reverse array directly in the global memory (2D grid).
__global__ void reverseArrayII(int *devIn, int *devOut) {
// number of elements in preceding blocks in input array
int inOffset = (blockIdx.y * gridDim.x + blockIdx.x) * (blockDim.x * blockDim.y);
// number of elements in preceding blocks in output array
int outOffset = ((gridDim.y - 1 - blockIdx.y) * gridDim.x + (gridDim.x - 1 - blockIdx.x)) * (blockDim.x * blockDim.y);
// element index in input array
int inIndex = inOffset + threadIdx.y * blockDim.x + threadIdx.x;
// element index in output array
int outIndex = outOffset + (blockDim.y - 1 - threadIdx.y) * blockDim.x + (blockDim.x - 1 - threadIdx.x);
devOut[outIndex] = devIn[inIndex]; // Non-coalesced write
}
// Kernel to reverse array using shared memory.
__global__ void reverseArraySM(int *devIn, int *devOut) {
extern __shared__ int shData[]; // shared memory - its amount is given by third parameter during the kernel invocation kernelName<<<gridDim, blockDim, sharedMemPerBlock>>>()
int inOffset = blockDim.x * blockIdx.x; // number of elements in preceding blocks in input array
int inIndex = inOffset + threadIdx.x; // element index in input array
// load one element per thread from device/global memory and store it
// in reversed order into temporary shared memory -> Coalesced Read
shData[blockDim.x - 1 - threadIdx.x] = devIn[inIndex];
// wait until all threads in the block have written their data to shared memory
__syncthreads();
// write the data from shared memory in forward order,
// but to the reversed block offset as before -> Coalesced Write
int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x); // number of elements in preceding blocks in output array
int outIndex = outOffset + threadIdx.x; // element index in output array
devOut[outIndex] = shData[threadIdx.x];
}
int main( int argc, char** argv) {
// pointer for host memory and size
int *hostArray;
int arraySize = 256 * 1024; // 256K elements (1MB total)
int *devIn, *devOut; // pointers for device memory
int numThreadsPerBlock = 256; // define grid and block size
// compute number of blocks needed based on array size and desired block size
int numBlocks = arraySize / numThreadsPerBlock;
// allocate host memory
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int);
hostArray = (int *) malloc(memSize);
// allocate device memory
CHECK_ERROR( cudaMalloc( (void **) &devIn, memSize ) );
CHECK_ERROR( cudaMalloc( (void **) &devOut, memSize ) );
// initialize input array on host
for (int i = 0; i < arraySize; i++)
hostArray[i] = i;
// copy host array to device array
CHECK_ERROR( cudaMemcpy( devIn, hostArray, memSize, cudaMemcpyHostToDevice ) );
// grid configuration
dim3 gridRes(numBlocks, 1, 1);
dim3 blockRes(numThreadsPerBlock, 1, 1);
// launch kernel - reverse array in global memory
reverseArrayI<<< gridRes, blockRes >>>( devIn, devOut );
//dim3 gridResII(numBlocks/16, 16, 1);
//dim3 blockResII(16, numThreadsPerBlock/16, 1);
//reverseArrayII <<< gridResII, blockResII >> > (devIn, devOut);
CHECK_ERROR( cudaGetLastError() );
// block until the device has completed
cudaDeviceSynchronize();
// device to host copy
CHECK_ERROR( cudaMemcpy( hostArray, devOut, memSize, cudaMemcpyDeviceToHost ) );
// verify the data returned to the host is correct
for (int i = 0; i < arraySize; i++)
assert( hostArray[i] == arraySize - 1 - i );
// compute number of bytes of shared memory needed per block
int sharedMemSize = numThreadsPerBlock * sizeof(int);
// launch kernel - reverse array using shared memory
reverseArraySM<<< gridRes, blockRes, sharedMemSize >>>( devOut, devIn );
CHECK_ERROR( cudaGetLastError() );
// block until the device has completed
cudaDeviceSynchronize();
// device to host copy
CHECK_ERROR( cudaMemcpy( hostArray, devIn, memSize, cudaMemcpyDeviceToHost ) );
// verify the data returned to the host is correct
for (int i = 0; i < arraySize; i++)
assert( hostArray[i] == i );
// free device memory
CHECK_ERROR( cudaFree(devIn) );
CHECK_ERROR( cudaFree(devOut) );
// free host memory
free(hostArray);
return 0;
}
|
20,848 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h>
#include <sys/time.h>
#include <time.h>
#define NUM_THREADS 743511 // length of calculation
#define BLOCK_SIZE 256 // number of threads per block used in gpu calc
#define EPS 0.00005 // Epsilon for tolerance of diffs between cpu and gpu calculations
#define INCLUDE_MEMTIME false // Decides whether to include memory transfers to and from gpu in gpu timing
#define PRINTLINES 0 // Number of lines to print in output during validation
__global__ void calcKernel(float* d_in, float *d_out) {
const unsigned int lid = threadIdx.x; // local id inside a block
const unsigned int gid = blockIdx.x*blockDim.x + lid; // global id
d_out[gid] = pow((d_in[gid] / ( d_in[gid] - 2.3 )),3); // do computation
}
int timeval_subtract( struct timeval* result,
struct timeval* t2,
struct timeval* t1) {
unsigned int resolution = 1000000;
long int diff = (t2->tv_usec + resolution * t2->tv_sec) -
(t1->tv_usec + resolution * t1->tv_sec);
result->tv_sec = diff / resolution;
result->tv_usec = diff % resolution;
return (diff<0);
}
unsigned int long gpucalc(float* h_in, float* h_out, unsigned int mem_size, unsigned int num_threads) {
struct timeval t_start, t_end, t_diff;
struct timeval t_startmem, t_endmem, t_diffmem;
// device configuration
unsigned int block_size = BLOCK_SIZE;
unsigned int num_blocks = ((num_threads + (block_size - 1)) / block_size);
// allocate device memory
float* d_in;
float* d_out;
cudaMalloc((void**)&d_in, mem_size);
cudaMalloc((void**)&d_out, mem_size);
gettimeofday(&t_startmem, NULL);
// copy host memory to device
cudaMemcpy(d_in, h_in, mem_size, cudaMemcpyHostToDevice);
// time and execute the kernel
gettimeofday(&t_start, NULL);
calcKernel<<< num_blocks, block_size >>>(d_in, d_out);
cudaThreadSynchronize();
gettimeofday(&t_end, NULL);
// copy result from device to host
cudaMemcpy(h_out, d_out, sizeof(float)*num_threads, cudaMemcpyDeviceToHost );
gettimeofday(&t_endmem, NULL);
// clean up memory
cudaFree(d_in); cudaFree(d_out);
timeval_subtract(&t_diff, &t_end, &t_start);
timeval_subtract(&t_diffmem, &t_endmem, &t_startmem);
if (INCLUDE_MEMTIME) {
return (t_diffmem.tv_sec*1e6+t_diffmem.tv_usec); // microseconds
} else {
return (t_diff.tv_sec*1e6+t_diff.tv_usec); // microseconds
}
}
unsigned long int cpucalc(float* h_in, float* h_out, unsigned int calcsize) {
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
for(unsigned int i=0; i<calcsize; i++) {
h_out[i] = pow((h_in[i] / (h_in[i] - 2.3)),3);
}
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
return t_diff.tv_sec*1e6+t_diff.tv_usec; // microseconds
}
int main(int argc, char** argv) {
unsigned int num_threads = NUM_THREADS;
unsigned int mem_size = num_threads*sizeof(float);
unsigned long int cputime, gputime;
float maxdev = 0;
unsigned int maxdevidx = 0;
// allocate host memory
float* h_in = (float*) malloc(mem_size);
float* h_outgpu = (float*) malloc(mem_size);
float* h_outcpu = (float*) malloc(mem_size);
// initialize the memory
for(unsigned int i = 0; i < num_threads; i++) {
h_in[i] = (float)i+1;
}
// prepare timing and get the calculations done
gputime = gpucalc(h_in, h_outgpu, mem_size, num_threads);
cputime = cpucalc(h_in, h_outcpu, num_threads);
// print and validate result
int printevry = 1;
if (PRINTLINES>0) {
printevry = (NUM_THREADS / PRINTLINES);
printf("cpu\t\tgpu\n");
}
for(unsigned int i=0; i<num_threads; i++) {
if (i % printevry == 1) {printf("%.6f\t%.6f\n", h_outcpu[i], h_outgpu[i]);}
if (maxdev < abs(h_outcpu[i] - h_outgpu[i])) {
maxdev = abs(h_outcpu[i]-h_outgpu[i]);
maxdevidx = i;
}
}
if (maxdev < EPS) {printf("VALID, max deviation: %.6f at calculation no. %d\n", maxdev, maxdevidx);}
else {printf("INVALID, max deviation: %.6f\t at %d\n", maxdev, maxdevidx);}
printf("Time for cpu calculation: %d microseconds (%.2f ms)\n",cputime, cputime/1000.0);
printf("Time for gpu calculation: %d microseconds (%.2f ms)\n",gputime, gputime/1000.0);
// clean up memory
free(h_in); free(h_outgpu); free(h_outcpu);
}
|
20,849 | #include <iostream>
int main() {
std::cout << "Hello world\n";
return 0;
} |
20,850 | /*
* This program uses the device CURAND API to calculate what
* proportion of pseudo - random ints have low bit set.
*/
# include <stdio.h>
# include <stdlib.h>
# include <cuda.h>
# include "curand_kernel.h"
# include <vector>
# define CUDA_CALL(x) do { if ((x) != cudaSuccess ) { \
printf (" Error at %s:%d\n", __FILE__ , __LINE__ ); \
return EXIT_FAILURE ;}} while (0) \
__global__ void setup_kernel ( curandState * state )
{
// int id = threadIdx .x + blockIdx .x * c_thread;
int x = threadIdx.x + blockIdx.x*blockDim.x ;
int y = threadIdx.y + blockIdx.y*blockDim.y ;
int offset = x+y*blockDim.x*gridDim.x;
curand_init (1234 , offset, 0, & state [offset]);
}
__global__ void generate_kernel ( curandState* state , float* result )
{
int x = threadIdx.x + blockIdx.x*blockDim.x ;
int y = threadIdx.y + blockIdx.y*blockDim.y ;
int offset = x+y*blockDim.x*gridDim.x;
curandState localState = state [offset ];
// result [offset] = curand_normal (& localState );
result [offset] = offset;
}
__global__ void look( int* threadIdxx
, int* threadIdxy
, int* blockIdxx
, int* blockIdxy
, int* blockDimx
, int* blockDimy
, int* gridDimx
, int* gridDimy
, int* doffset
)
{
int x = threadIdx.x + blockIdx.x*blockDim.x ;
int y = threadIdx.y + blockIdx.y*blockDim.y ;
int offset = x+y*blockDim.x*gridDim.x;
threadIdxx[offset]=threadIdx.x;
threadIdxy[offset]=threadIdx.y;
blockIdxx[offset] = blockIdx.x;
blockIdxy[offset] = blockIdx.y;
blockDimx [offset] = blockDim.x ;
blockDimy [offset] = blockDim.y ;
gridDimx [offset]= gridDim.x;
gridDimy [offset]= gridDim.y;
doffset[offset]= offset;
}
int main (int argc , char * argv [])
{
int i;
curandState * devStates ;
float * devResults , * hostResults ;
int gridsize = 5;
int blocksize = 2;
int num = gridsize*blocksize*blocksize;
dim3 block(gridsize);
dim3 threads(blocksize,blocksize);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,0);
// ideally we can use 8,5 Million threads to generate the random numbers
hostResults = (float *) calloc (num, sizeof ( float));
CUDA_CALL ( cudaMalloc (( void **)& devResults , num* sizeof ( float)));
CUDA_CALL ( cudaMemset ( devResults , 0, num * sizeof (float)) );
CUDA_CALL ( cudaMalloc (( void **)& devStates , num *sizeof ( curandState )));
int *dthreadIdxx
, *dthreadIdxy
, *dblockIdxx
, *dblockIdxy
, *dblockDimx
, *dblockDimy
, *dgridDimx
, *dgridDimy
, *doffset;
CUDA_CALL ( cudaMalloc (( void **)& dthreadIdxx , num* sizeof ( int)));
CUDA_CALL ( cudaMalloc (( void **)& dthreadIdxy , num* sizeof ( int)));
CUDA_CALL ( cudaMalloc (( void **)& dblockIdxx , num* sizeof ( int)));
CUDA_CALL ( cudaMalloc (( void **)& dblockIdxy , num* sizeof ( int)));
CUDA_CALL ( cudaMalloc (( void **)& dblockDimx , num* sizeof ( int)));
CUDA_CALL ( cudaMalloc (( void **)& dblockDimy , num* sizeof ( int)));
CUDA_CALL ( cudaMalloc (( void **)& dgridDimx , num* sizeof ( int)));
CUDA_CALL ( cudaMalloc (( void **)& dgridDimy , num* sizeof ( int)));
CUDA_CALL ( cudaMalloc (( void **)& doffset , num* sizeof ( int)));
int *hthreadIdxx = (int *) calloc (num, sizeof ( int));
int *hthreadIdxy = (int *) calloc (num, sizeof ( int));
int *hblockIdxx = (int *) calloc (num, sizeof ( int));
int *hblockIdxy = (int *) calloc (num, sizeof ( int));
int *hblockDimx = (int *) calloc (num, sizeof ( int));
int *hblockDimy = (int *) calloc (num, sizeof ( int));
int *hgridDimx = (int *) calloc (num, sizeof ( int));
int *hgridDimy = (int *) calloc (num, sizeof ( int));
int *hoffset = (int *) calloc (num, sizeof ( int));
look<<<block,threads>>>( dthreadIdxx
, dthreadIdxy
, dblockIdxx
, dblockIdxy
, dblockDimx
, dblockDimy
, dgridDimx
, dgridDimy
, doffset);
CUDA_CALL ( cudaMemcpy ( hthreadIdxx , dthreadIdxx, num *sizeof (float), cudaMemcpyDeviceToHost ));
CUDA_CALL ( cudaMemcpy ( hthreadIdxy , dthreadIdxy, num *sizeof (float), cudaMemcpyDeviceToHost ));
CUDA_CALL ( cudaMemcpy ( hblockIdxx , dblockIdxx , num *sizeof (float), cudaMemcpyDeviceToHost ));
CUDA_CALL ( cudaMemcpy ( hblockIdxy , dblockIdxy , num *sizeof (float), cudaMemcpyDeviceToHost ));
CUDA_CALL ( cudaMemcpy ( hblockDimx , dblockDimx , num *sizeof (float), cudaMemcpyDeviceToHost ));
CUDA_CALL ( cudaMemcpy ( hblockDimy , dblockDimy , num *sizeof (float), cudaMemcpyDeviceToHost ));
CUDA_CALL ( cudaMemcpy ( hgridDimx , dgridDimx , num *sizeof (float), cudaMemcpyDeviceToHost ));
CUDA_CALL ( cudaMemcpy ( hgridDimy , dgridDimy , num *sizeof (float), cudaMemcpyDeviceToHost ));
CUDA_CALL ( cudaMemcpy ( hoffset , doffset , num *sizeof (float), cudaMemcpyDeviceToHost ));
std::vector<int> sthreadIdxx(hthreadIdxx,hthreadIdxx+num)
, sthreadIdxy(hthreadIdxy,hthreadIdxy+num)
, sblockIdxx(hblockIdxx,hblockIdxx+num)
, sblockIdxy(hblockIdxy,hblockIdxy+num)
, sblockDimx(hblockDimx,hblockDimx+num)
, sblockDimy(hblockDimy,hblockDimy+num)
, sgridDimx(hgridDimx,hgridDimx+num)
, sgridDimy(hgridDimy,hgridDimy+num)
, soffset(hoffset,hoffset+num);
/* Cleanup */
CUDA_CALL ( cudaFree ( dthreadIdxx));
CUDA_CALL ( cudaFree ( dthreadIdxy));
CUDA_CALL ( cudaFree ( dblockIdxx));
CUDA_CALL ( cudaFree ( dblockIdxy));
CUDA_CALL ( cudaFree ( dblockDimx));
CUDA_CALL ( cudaFree ( dblockDimy));
CUDA_CALL ( cudaFree ( dgridDimx));
CUDA_CALL ( cudaFree ( dgridDimy));
free ( hostResults );
free ( hthreadIdxx);
free ( hthreadIdxy);
free ( hblockIdxx);
free ( hblockIdxy);
free ( hblockDimx);
free ( hblockDimy);
free ( hgridDimx);
free ( hgridDimy);
return EXIT_SUCCESS ;
} |
20,851 | #include "cuda_runtime.h"
#include <iostream>
using namespace std;
__global__ void add(int *d_a,int *d_b,int *d_c){
*d_c = *d_a + *d_b;
}
int main(void){
int a, b, c;
int *d_c, *d_b, *d_a;
int size = sizeof(int);
a = 4;
b = 6;
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
add<<<1,1>>>(d_a, d_b, d_c);
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cout << c << endl;
return 0;
}
|
20,852 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void add1(int *a, int *b, int *c){
int idx = blockIdx.x;
c[idx] = a[idx] + b[idx];
}
__global__ void add2(int* a, int* b, int* c){
int idx = threadIdx.x;
c[idx] = a[idx] + b[idx];
}
__global__ void add3(int* a, int* b, int* c){
int idx = blockIdx.x*blockDim.x + threadIdx.x;
c[idx] = a[idx] + b[idx];
}
int main(){
int N; //number of elements
int *d_a, *d_b, *d_c; //Device copies
int size = sizeof(int); //size var
int *A, *B, *C; //vectors
printf("Enter number of elements: ");
scanf("%d",&N);
A = (int*)malloc(sizeof(int)*N);
B = (int*)malloc(sizeof(int)*N);
C = (int*)malloc(sizeof(int)*N);
printf("Enter elements a <space> b:\n");
for(int i=0; i<N; i++){
scanf("%d %d",&A[i],&B[i]);
}
//Allocate space for device copies of a,b,c
cudaMalloc((void**)&d_a,size*N);
cudaMalloc((void**)&d_b,size*N);
cudaMalloc((void**)&d_c,size*N);
//setup input values
cudaMemcpy(d_a,A,size*N,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,B,size*N,cudaMemcpyHostToDevice);
printf("\n// LAUNCHING ADD1 //\n");
//launch add kernel on GPU
dim3 Blocks1(N,1,1);
dim3 Threads1(1,1,1);
add1<<<Blocks1,Threads1>>>(d_a,d_b,d_c);
//copy result back to host
cudaMemcpy(C,d_c,size*N,cudaMemcpyDeviceToHost);
printf("Result:\n");
for(int i=0; i<N; i++)
printf("%d ",C[i]);
printf("\n// LAUNCHING ADD2 //\n");
//launch add kernel on GPU
dim3 Blocks2(1,1,1);
dim3 Threads2(N,1,1);
add1<<<Blocks2,Threads2>>>(d_a,d_b,d_c);
//copy result back to host
cudaMemcpy(C,d_c,size*N,cudaMemcpyDeviceToHost);
printf("Result:\n");
for(int i=0; i<N; i++)
printf("%d ",C[i]);
printf("\n// LAUNCHING ADD3 //\n");
//launch add kernel on GPU
dim3 Blocks3(ceilf(N/256),1,1);
dim3 Threads3(256,1,1);
add1<<<Blocks3,Threads3>>>(d_a,d_b,d_c);
//copy result back to host
cudaMemcpy(C,d_c,size*N,cudaMemcpyDeviceToHost);
printf("Result:\n");
for(int i=0; i<N; i++)
printf("%d ",C[i]);
//Cleanup
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
20,853 | /*This program implements the CUDA parallel version of matrix multiplication of two square matrices of equal size.
Shared Memory and thread granularity is used for optimizing performance.*/
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define TILE_WIDTH 8 /*Block Dimension of TILE_WIDTH x TILE_WIDTH*/
#define WIDTH 4096 /*WIdth of square Matrix*/
#define GRAN 2 /*Granularity - Number of blocks merged. Verified for powers of 2 i.e. 2, 4, 8, ...*/
void Matrix_Mul_gran(float *, float *, float *, int);
__global__ void Matrix_Mul_gran_Kernel(float* d_M, float* d_N, float* d_P, int Width)
{
int i, j, k;
/*allocate shared memory*/
__shared__ float ds_M[TILE_WIDTH*GRAN][TILE_WIDTH*GRAN];
__shared__ float ds_N[TILE_WIDTH*GRAN][TILE_WIDTH];
int bx, by, tx, ty, Row, Col;
/*Each thread evaluates multiple number of product elements depending on degree of granularity*/
float Pvalue[GRAN];
bx = blockIdx.x; by = blockIdx.y;
tx = threadIdx.x; ty = threadIdx.y;
Col = bx * TILE_WIDTH + tx;
Row = GRAN* by * TILE_WIDTH + ty;
for(i=0;i<GRAN;i++) Pvalue[i] = 0;
for (int m = 0; m < Width/TILE_WIDTH; m+=GRAN){
/*Load shared memory*/
for(i=0;i<GRAN;i++){
ds_N[ty+i*TILE_WIDTH][tx] = d_N[Col+(m*TILE_WIDTH+ty+i*TILE_WIDTH)*Width];
for(j=0;j<GRAN;j++){
ds_M[ty+i*TILE_WIDTH][tx+j*TILE_WIDTH] = d_M[(Row+i*TILE_WIDTH)*Width + (m+j)*TILE_WIDTH+tx];
}
}
__syncthreads();
/*Evaluate product elements*/
for(k=0;k<TILE_WIDTH*GRAN;k++){
for(i=0;i<GRAN;i++){
Pvalue[i] += ds_M[ty+i*TILE_WIDTH][k] * ds_N[k][tx];
}
}
__syncthreads();
}
/*Write to device memory*/
for(i=0;i<GRAN;i++)
d_P[(Row+i*TILE_WIDTH)*Width+Col] = Pvalue[i];
}
int main(int argc, char** argv){
int W = WIDTH;
int i,j;
float *M, *N, *P;
struct timeval start, end;
long utime, seconds, useconds;
M = (float*)malloc(W*W*sizeof(float));
N = (float*)malloc(W*W*sizeof(float));
P = (float*)malloc(W*W*sizeof(float));
/*Initialize the matrices to identity matrices. The result should be an identity matrix and thus can be verified easily*/
for(i =0;i<W;i++){
for(j=0;j<W;j++){
if(i==j){
M[i*W+j]=1;
N[i*W+j]=1;
}
else{
M[i*W+j]=0;
N[i*W+j]=0;
}
}
}
gettimeofday(&start, NULL);
/*Call function for matrix multiplication*/
Matrix_Mul_gran(M,N,P,W);
gettimeofday(&end, NULL);
seconds = end.tv_sec - start.tv_sec;
useconds = end.tv_usec - start.tv_usec;
utime = ((seconds) * 1000000 + useconds);
/*Print execution time in microseconds*/
printf("%d\t%ld\n", GRAN,utime);
free(M); free(N); free(P);
return 0;
}
/*Matrix multiplication function*/
void Matrix_Mul_gran(float *M, float *N, float *P, int W){
int size = W*W*sizeof(float);
float *Md, *Nd, *Pd;
/*Initialize memory and copy data from host to device*/
cudaMalloc((void**)&Md, size);
cudaMemcpy(Md,M,size,cudaMemcpyHostToDevice);
cudaMalloc((void**)&Nd, size);
cudaMemcpy(Nd,N,size,cudaMemcpyHostToDevice);
cudaMalloc((void**)&Pd, size);
/*Launch Kernel*/
/*Blocks along Grid Dimension 'Y' i.e. blockIdx.y are merged as per required granularity*/
dim3 dimGrid(ceil(W/TILE_WIDTH), ceil(W/(TILE_WIDTH*GRAN)), 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
Matrix_Mul_gran_Kernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd, W);
/*Copy result from device to host and free device memory*/
cudaMemcpy(P,Pd,size,cudaMemcpyDeviceToHost);
cudaFree(Md); cudaFree(Nd); cudaFree(Pd);
}
|
20,854 | #include <stdio.h>
int main(void){
int counter, i;
cudaDeviceProp properties;
cudaGetDeviceCount(&counter);
printf("Device count:%d\n", counter);
for(i=0; i<counter; i++){
cudaGetDeviceProperties(&properties, i);
printf("\n\nDEVICE %d: \n",i);
printf("name: %s\ntotalGlobalMam: %d\nsharedMemPerBlock: %d\nregsPerBlock: %d\nwarpSize: %d\nmemPitch: %d\nmaxThreadsPerBlock: %d\nmaxThreadsDim[3]: %d\nmaxGridSize[3]: %d\ntotalConstMem: %d\nmajor: %d\nminor: %d\nclockRate: %d\ntextureAlignment: %d\ndeviceOverlap: %d\nmultiProcessorCount: %d\nkernelExecTimeoutEnabled: %d\nintegrated: %d\ncanMapHostMemory: %d\ncomputeMode: %d\nconcurrentKernels: %d\nECCEnabled: %d\npciBusID: %d\npciDeviceID: %d\ntccDriver: %d\n",
properties.name,
properties.totalGlobalMem,
properties.sharedMemPerBlock,
properties.regsPerBlock,
properties.warpSize,
properties.memPitch,
properties.maxThreadsPerBlock,
properties.maxThreadsDim[3],
properties.maxGridSize[3],
properties.totalConstMem,
properties.major,
properties.minor,
properties.clockRate,
properties.textureAlignment,
properties.deviceOverlap,
properties.multiProcessorCount,
properties.kernelExecTimeoutEnabled,
properties.integrated,
properties.canMapHostMemory,
properties.computeMode,
properties.concurrentKernels,
properties.ECCEnabled,
properties.pciBusID,
properties.pciDeviceID,
properties.tccDriver
);
}
} |
20,855 | #include "includes.h"
__global__ void detect_edges(unsigned char *input, unsigned char *output) {
int i = (blockIdx.x * 72) + threadIdx.x;
int x, y; // the pixel of interest
int b, d, f, h; // the pixels adjacent to the x,y used to calculate
int r; // the calculation result
y = i / width;;
x = i - (width * y);
if (x == 0 || y == 0 || x == width - 1 || y == height - 1) {
output[i] = 0;
} else {
b = i + width;
d = i - 1;
f = i + 1;
h = i - width;
r = (input[i] * 4) + (input[b] * -1) + (input[d] * -1) + (input[f] * -1)
+ (input[h] * -1);
if (r >= 0) {
output[i] = 0;
} else {
output[i] = 255;
}
}
} |
20,856 | #include <stdio.h>
__global__ void mandelgpu(int disp_width, int disp_height, int *array, int max_iter) {
double scale_real, scale_imag;
double x, y, u, v, u2, v2;
int row,column, iter;
column = threadIdx.y + blockIdx.y*blockDim.y;
row= threadIdx.x + blockIdx.x*blockDim.x;
scale_real = 3.5 / (double)disp_width;
scale_imag = 3.5 / (double)disp_height;
x = (column * scale_real) - 2.25;
y = (row * scale_imag) - 1.75;
u = 0.0;
v = 0.0;
u2 = 0.0;
v2 = 0.0;
iter = 0;
while ( u2 + v2 < 4.0 && iter < max_iter ) {
v = 2 * v * u + y;
u = u2 - v2 + x;
u2 = u*u;
v2 = v*v;
iter = iter + 1;
}
// if we exceed max_iter, reset to zero
iter = iter == max_iter ? 0 : iter;
//printf("This is Thread %d and Block %d. \n The value for this Cell is %d. \n", column, row, iter);
array[row*disp_height + column] = iter;
}
|
20,857 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
#define SIZE 102400
#define MOD 102399
#define STEP 128
/* ARRAY A INITIALIZER */
void init_a(int * a)
{
int i;
for(i=0; i<SIZE; i++)
{
a[i] = 1;
}
}
/* ARRAY B INITIALIZER */
void init_b(int * b)
{
int i, j;
j=0;
for(i=0; i<SIZE-1; i++)
{
b[j] = i;
j = (j+STEP)%MOD;
}
b[SIZE-1] = SIZE-1;
}
/* CHECKING A VALUES */
int check_a(int * a)
{
int i;
int correct = 1;
for(i=0; i<SIZE; i++)
{
if(a[i] != (i+1))
{
correct = 0;
}
}
return correct;
}
/* CUDA FUNCTION */
__global__ void mykernel(int * a, int * b, int N)
{
/* A COMPLETER */
// int
int j;
for(j=0; j<100; j++){
int i = threadIdx.x + j*blockDim.x;
if(i < N){
// printf("a[%d] = %d, b[%d] = %d \n",i,a[i], i, b[i] );
a[ b[i] ] += b[i];
}
}
}
int main(int argc, char * argv[])
{
int taille = sizeof(int)*SIZE;
int * h_a = (int *)malloc(taille);
int * h_b = (int *)malloc(taille);
init_a(h_a);
init_b(h_b);
int* d_a;
int* d_b;
/* A COMPLETER */
cudaMalloc((void**)&d_a, taille);
cudaMalloc((void**)&d_b, taille);
cudaMemcpy (d_a, h_a, taille, cudaMemcpyHostToDevice);
cudaMemcpy (d_b, h_b, taille, cudaMemcpyHostToDevice);
dim3 nBlocks;
dim3 nThperBlock;
nBlocks.x = 1;
nThperBlock.x = 1024;
mykernel<<< nBlocks , nThperBlock >>>(d_a, d_b, SIZE);
/* INSERT CUDA COPY HERE */
/* A COMPLETER */
cudaMemcpy (h_a, d_a, taille, cudaMemcpyDeviceToHost);
int correct = check_a(h_a);;
if(0 == correct)
{
printf("\n\n ******************** \n ***/!\\ ERROR /!\\ *** \n ******************** \n\n");
}
else
{
printf("\n\n ******************** \n ***** SUCCESS! ***** \n ******************** \n\n");
}
free(h_a);
free(h_b);
cudaFree(d_a);
cudaFree(d_b);
return 1;
}
|
20,858 | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/inner_product.h>
#include <thrust/reduce.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/sort.h>
#include <iostream>
typedef thrust::device_vector<int> int_vec;
template <typename Vector>
void print_vector(const Vector& v) {
typedef typename Vector::value_type T;
thrust::copy(v.begin(), v.end(), std::ostream_iterator<T>(std::cout, " "));
std::cout << std::endl;
}
void sparse_histogram(int_vec &data,
int_vec &hist_values,
int_vec &hist_counts) {
// sort data
thrust::sort(data.begin(), data.end());
// number of bins = number of unique values in data (assumes data.size() > 0)
int num_bins = thrust::inner_product(data.begin(), data.end() - 1,
data.begin() + 1,
1,
thrust::plus<int>(),
thrust::not_equal_to<int>());
// resize histogram to number of unique elements
hist_values.resize(num_bins);
hist_counts.resize(num_bins);
// calculate number of elements per bin
thrust::reduce_by_key(data.begin(), data.end(),
thrust::constant_iterator<int>(1),
hist_values.begin(),
hist_counts.begin());
}
void histogram(int_vec &data, int_vec &dense_hist) {
thrust::device_vector<int> sparse_hist_values;
thrust::device_vector<int> sparse_hist_counts;
sparse_histogram(data, sparse_hist_values, sparse_hist_counts);
thrust::fill(dense_hist.begin(), dense_hist.end(), 0);
thrust::scatter(sparse_hist_counts.begin(),
sparse_hist_counts.end(),
sparse_hist_values.begin(),
dense_hist.begin());
}
int main() {
const int num_bins = 10;
thrust::host_vector<int> H(10);
H[0] = 1;
H[1] = 1;
H[2] = 3;
H[3] = 6;
H[4] = 1;
H[5] = 1;
H[6] = 5;
H[7] = 6;
H[8] = 7;
H[9] = 6;
// Copy host_vector H to device_vector D
thrust::device_vector<int> D = H;
int_vec hist(num_bins);
histogram(D, hist);
std::cout << "Values:" << std::endl;
print_vector(D);
std::cout << "Histogram:" << std::endl;
print_vector(hist);
return 0;
}
/*
https://www.youtube.com/watch?v=cGffGYBbtbk
https://github.com/thrust/thrust/blob/master/examples/histogram.cu
*/
|
20,859 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define n 512
__global__ void bmk_add(int *a, int *b, int *result)
{
int i = threadIdx.x;
result[i] = a[i] + b[i];
}
int main()
{
int num_blocks = 1, num_threads = n;
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int size = n * sizeof(int);
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
cudaMalloc((void**)&dev_a,size);
cudaMalloc((void**)&dev_b,size);
cudaMalloc((void**)&dev_c,size);
for(int i = 0;i<n;i++)
{
//a[i] = rand()%1024;
//b[i] = rand()%1024;
a[i] = i;
b[i] = i;
}
cudaMemcpy(dev_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,size,cudaMemcpyHostToDevice);
bmk_add <<<num_blocks, num_threads>>>(dev_a,dev_b,dev_c);
cudaMemcpy(c,dev_c,size,cudaMemcpyDeviceToHost);
for(int i = 0;i<n;i++)
printf("%d ",c[i]);
printf("\n");
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
20,860 |
#include <stdio.h>
int main(void) {
// print out important data about the gpu
int nDevices = 0;
cudaGetDeviceCount(&nDevices);
printf("Number of Devices: %d\n", nDevices);
cudaDeviceProp prop;
int i;
for(i = 0; i < nDevices; i++) {
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf("Device Name: %s\n", prop.name);
printf("Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf("Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf("Global Memory size (bytes): %zu\n", prop.totalGlobalMem);
}
return 0;
}
|
20,861 | //#include <omp.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
__global__ void parallel1(int a, int** binaryTree, int** prefixsums)
{
int b = threadIdx.x;
int sum;
sum = binaryTree[a+1][2*b] + binaryTree[a+1][2*b+1];
binaryTree[a][b] = sum;
}
__global__ void parallel2(int a, int** binaryTree, int** prefixsums) //pow(2, x) nodes
{
int b = threadIdx.x;
if(b == 0)
prefixsums[a][b] = binaryTree[a][b];
else if(b % 2 == 1)
prefixsums[a][b] = prefixsums[a-1][(b-1)/2];
else
prefixsums[a][b] = prefixsums[a-1][b/2-1] + binaryTree[a][b];
}
int main()
{
const int num = 8;
int height;
int binaryTreeSize = 0;
if(num%2 == 0)
{
binaryTreeSize = 2*num - 1;
height = (int)(log2((float)binaryTreeSize+2) + .5);
}
else
{
binaryTreeSize = 2*num;
height = (int)(log2((float)binaryTreeSize+1) + .5);
}
printf("%d\n", height);
int x, y;
//Instantiate tree
int** binaryTree;
cudaMalloc(&binaryTree, height * sizeof(int*));
for(x = 0; x < height; x++)
{
cudaMalloc(&binaryTree[x], num * sizeof(int));
}
int** prefixsums;
cudaMalloc(&prefixsums, height * sizeof(int*));
for(x = 0; x < height; x++)
{
cudaMalloc(&prefixsums[x], num * sizeof(int));
}
int nums[8] = {2, 4, 6, 3, 4, 1, 0, 2};
for(x = 0; x < num; x++)
{
binaryTree[height-1][x] = nums[x];
}
for(x = height-2; x >= 0; x--)
{
parallel1<<<1, (int)pow(2,x)>>>(x, binaryTree, prefixsums);
}
for(x = 0; x < height; x++)
{
//omp_set_num_threads(numnodes);
//#pragma omp parallel
parallel2<<<1, (int)pow(2, x)>>>(x, binaryTree, prefixsums);
}
for(x = 0; x < height; x++)
{
for(y = 0; y < pow(2, x); y++)
printf("%d ", binaryTree[x][y]);
printf("\n");
}
puts("");
for(x = 0; x < height; x++)
{
for(y = 0; y < pow(2, x); y++)
printf("%d ", prefixsums[x][y]);
printf("\n");
}
puts("");
/**
for(x = (int)pow(2, height) - 1; x < (int)pow(2, height) + num; x++)
{
printf("%d ", prefixsums[x]);
}
printf("\n");
**/
return 0;
}
|
20,862 | #include "matrix.cuh"
void Matrix::to_gpu(void)
{
if (!gpu_enabled)
{
gpu_enabled = true;
float* d_matrix;
if (cudaMalloc((void**)&d_matrix, sizeof(float)*dim1*dim2) != cudaSuccess)
throw "memory allocation failed\n";
cudaMemcpy(d_matrix, matrix, sizeof(float)*dim1*dim2, cudaMemcpyHostToDevice);
delete[] matrix;
matrix = d_matrix;
}
}
void Matrix::to_cpu(void)
{
if (gpu_enabled)
{
gpu_enabled = false;
float *h_matrix = new float[dim1*dim2];
cudaMemcpy(h_matrix, matrix, sizeof(float)*dim1*dim2, cudaMemcpyDeviceToHost);
cudaFree(matrix);
matrix = h_matrix;
}
}
Matrix Matrix::get_gpu(void) const
{
if (gpu_enabled)
throw "cannot get another gpu matrix if it is already gpu";
return Matrix(matrix, dim1, dim2);
}
Matrix Matrix::get_cpu(void) const
{
if (gpu_enabled)
{
float* h_matrix = new float[dim1*dim2];
cudaMemcpy(h_matrix, matrix, sizeof(float)*dim1*dim2, cudaMemcpyDeviceToHost);
return Matrix(h_matrix, dim1, dim2, false);
}
return Matrix(matrix, dim1, dim2, false);
} |
20,863 | #include<iostream>
#include<vector>
__global__ void matMultiply(float *A, float *B, float *C, int N){
auto i = blockDim.y * blockIdx.y + threadIdx.y;
auto j = blockDim.x * blockIdx.x + threadIdx.x;
// C[i*N+j] = 0.0;
float temp = 0;
for (int k = 0; k < N; k++){
temp += A[i*N+k]*B[k*N+j];
}
C[i*N+j] = temp;
}
int main(){
int N = 32; // NOTE: This size has to be >= 32, becasue we are using 32 threads per row. Less than 32 shall give the ouput vector as 0
std::vector<float> h_A(N*N, 0);
std::vector<float> h_B(N*N, 0);
std::vector<float> h_C(N*N, 0);
for(auto i = 0; i < N*N; i++){
h_A[i] = 2*i;
h_B[i] = 3*i;
}
size_t size = N*N*sizeof(float);
float *d_A, *d_B, *d_C;
cudaMalloc(&d_A, size);
cudaMalloc(&d_B, size);
cudaMalloc(&d_C, size);
cudaMemcpy(d_A, h_A.data(), size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B.data(), size, cudaMemcpyHostToDevice);
// int thread = 32;
// int blocks = N/32;
dim3 ThreadsPerBlock(32, 32); // 32 threads per row and per column
dim3 BlocksPerGrid(N/32,N/32);
matMultiply<<<BlocksPerGrid, ThreadsPerBlock>>>(d_A, d_B, d_C, N);
cudaMemcpy(h_C.data(), d_C, size, cudaMemcpyDeviceToHost);
// Uncomment to see the output
// for(auto& i:h_C){
// std::cout << i << std::endl;
// }
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
20,864 | #include "includes.h"
__global__ void _kgauss32(int mx, int ns, float *xval, int *xrow, int *xcol, float *sval, int *srow, int *scol, float g, float *k) {
// assume x(mx,nd) and s(nd,ns) are in 1-based csc format
// assume k(mx,ns) has been allocated and zeroed out
int s0, s1, sp, sc, sr, x0, x1, xp, xc, xr, k0, k1, kp;
float sv, xv, xs;
sc = threadIdx.x + blockIdx.x * blockDim.x;
k0 = mx*sc; // k[k0]: first element of k[:,sc]
k1 = k0+mx; // k[k1-1]: last element of k[:,sc]
while (sc < ns) { // sc: 0-based column for s
s0 = scol[sc]-1; // first element of s[:,sc] is at sval[s0] (scol entries are 1-based)
s1 = scol[sc+1]-1; // last element of s[:,sc] is at sval[s1-1]
for (sp = s0; sp < s1; sp++) {
sr = srow[sp]-1; // sr: 0-based row for s (srow entries are 1-based)
sv = sval[sp]; // sv: s[sr,sc] (0-based)
xc = sr; // xc: 0-based column for x (=sr)
x0 = xcol[xc]-1; // first element of x[:,xc] is at xval[x0]
x1 = xcol[xc+1]-1; // last element of x[:,xc] is at xval[x1-1]
for (xp = x0; xp < x1; xp++) {
xr = xrow[xp]-1; // xr: 0-based row for x
xv = xval[xp]; // xv: x[xr,xc=sr], now we can set k[xr,sc]
xs = xv - sv;
k[k0+xr] += xs*xs; // k += (xi-si)^2
}
}
for (kp = k0; kp < k1; kp++) {
k[kp] = exp(-g*k[kp]); // k = exp(-g*sum((xi-si)^2))
}
sc += blockDim.x * gridDim.x;
}
} |
20,865 | //pass
//--gridDim=64 --blockDim=256
#include "common.h"
#define MERGE_THREADBLOCK_SIZE 256
__global__ void mergeHistogram64Kernel(
uint *d_Histogram,
uint *d_PartialHistograms,
uint histogramCount
)
{
__shared__ uint data[MERGE_THREADBLOCK_SIZE];
uint sum = 0;
for (uint i = threadIdx.x; i < histogramCount; i += MERGE_THREADBLOCK_SIZE)
{
sum += d_PartialHistograms[blockIdx.x + i * HISTOGRAM64_BIN_COUNT];
}
data[threadIdx.x] = sum;
for (uint stride = MERGE_THREADBLOCK_SIZE / 2; stride > 0; stride >>= 1)
{
// __syncthreads();
if (threadIdx.x < stride)
{
data[threadIdx.x] += data[threadIdx.x + stride];
}
}
if (threadIdx.x == 0)
{
d_Histogram[blockIdx.x] = data[0];
}
}
|
20,866 | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define PI 3.1415926536f
/*
* Paint a 2D surface with a moving bulls-eye pattern. The "face" parameter selects
* between 6 different colors to use. We will use a different color on each face of a
* cube map.
*/
__global__ void cuda_kernel_texture_cube(char *surface, int width, int height, size_t pitch, int face, float t)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned char *pixel;
// in the case where, due to quantization into grids, we have
// more threads than pixels, skip the threads which don't
// correspond to valid pixels
if (x >= width || y >= height) return;
// get a pointer to this pixel
pixel = (unsigned char *)(surface + y*pitch) + 4*x;
// populate it
float theta_x = (2.0f*x)/width - 1.0f;
float theta_y = (2.0f*y)/height - 1.0f;
float theta = 2.0f*PI*sqrt(theta_x*theta_x + theta_y*theta_y);
unsigned char value = 255*(0.6f + 0.4f*cos(theta + t));
pixel[3] = 255; // alpha
if (face%2)
{
pixel[0] = // blue
pixel[1] = // green
pixel[2] = 0; // red
pixel[face/2] = value;
}
else
{
pixel[0] = // blue
pixel[1] = // green
pixel[2] = value; // red
pixel[face/2] = 0;
}
}
extern "C"
void cuda_texture_cube(void *surface, int width, int height, size_t pitch, int face, float t)
{
cudaError_t error = cudaSuccess;
dim3 Db = dim3(16, 16); // block dimensions are fixed to be 256 threads
dim3 Dg = dim3((width+Db.x-1)/Db.x, (height+Db.y-1)/Db.y);
cuda_kernel_texture_cube<<<Dg,Db>>>((char *)surface, width, height, pitch, face, t);
error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("cuda_kernel_texture_cube() failed to launch error = %d\n", error);
}
}
|
20,867 | //
// Created by heidies on 7/7/18.
//
#include <cuda_runtime.h>
#include <iostream>
#include <sys/time.h>
using namespace std;
__global__ void sumMatrixOnGPU2D(float *A, float *B, float *C, const int nx, const int ny){
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = iy * nx + ix;
if(ix < nx && iy < ny)
C[idx] = A[idx] + B[idx];
}
void initialData(float* ip, unsigned long long size){
time_t t;
srand((unsigned)time(&t));
for(unsigned long long i = 0; i < size; ++i){
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
double cpuSecond(){
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6) * 1e+3;
}
int main(int argc, char **argv){
int nx = 1 << 14;
int ny = 1 << 14;
unsigned long long size = nx * ny;
size_t nBytes = size * sizeof(float);
float *h_A, *h_B, *h_C;
h_A = (float*)malloc(nBytes);
h_B = (float*)malloc(nBytes);
h_C = (float*)malloc(nBytes);
initialData(h_A, size);
initialData(h_B, size);
float *d_A, *d_B, *d_C;
cudaMalloc((float**)&d_A, nBytes);
cudaMalloc((float**)&d_B, nBytes);
cudaMalloc((float**)&d_C, nBytes);
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, nBytes, cudaMemcpyHostToDevice);
int blockdimx = 32;
int blockdimy = 16;
if(argc > 2){
blockdimx = atoi(argv[1]);
blockdimy = atoi(argv[2]);
}
dim3 block(blockdimx, blockdimy);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
double iStart, iElaps;
iStart = cpuSecond();
sumMatrixOnGPU2D<<<grid, block>>>(d_A, d_B, d_C, nx, ny);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
cout << "sumMatrixOnGPU2D <<< (" << grid.x << ", " << grid.y << "), " << "(" << block.x << ", " << block.y << ") >>> " <<
"elapsed " << iElaps << " ms" << endl;
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
}
|
20,868 | #include "includes.h"
/***********************************************************
By Huahua Wang, the University of Minnesota, twin cities
***********************************************************/
__global__ void dual( float* err, float* Y, float* X, float* Z, unsigned int size)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
float temp;
err[idx] = 0.0;
for (unsigned int i = idx; i < size; i += stride) {
temp = X[i] - Z[i];
Y[i] += temp;
err[idx] += temp*temp;
}
// __syncthreads();
} |
20,869 | #include "includes.h"
__device__ inline float stableSigmoid(float x) {
if(x >= 0) {
float z = expf(-x);
return 1.0 / (1.0 + z);
} else {
float z = expf(x);
return z / (1.0 + z);
}
}
__global__ void gLSTMOutputBackward(float* outCell, float* outXW, float* outSU, float* outB, const float* cell, const float* xW, const float* sU, const float* b, const float* adj, size_t rows, size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOutCell = outCell + j * cols;
float* rowOutXW = outXW + j * cols * 4;
float* rowOutSU = outSU + j * cols * 4;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
const float* rowAdj = adj + j * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
int k = i + 3 * cols;
float go = stableSigmoid(xWrow[k] + sUrow[k] + b[k]);
float t = tanhf(rowCell[i]);
float adj = rowAdj[i];
// dc/dc_{t-1}
if(outCell)
rowOutCell[i] += go * (1 - t * t) * adj;
// dc/d(b_o) = dc/d(xW_f) ...
float dcdxo = t * go * (1 - go) * adj;
if(outXW)
rowOutXW[k] += dcdxo;
if(outSU)
rowOutSU[k] += dcdxo;
if(outB)
atomicAdd(outB + k, dcdxo);
}
}
}
}
} |
20,870 | #include "includes.h"
__global__ void cuda_neural_net(float *Weights_D, int num_per_sweeper, int num_per_layer, int num_per_input, int num_per_output, int num_weights, int num_layers, float response, float *inputs_d, float *outputs_d)
{
extern __shared__ float buffer[];
int start_of_weights = blockIdx.x * num_weights;
int start_of_hidden_layers = start_of_weights + (num_per_input * num_per_layer);
//input layer
buffer[threadIdx.x] = 0;
for (int i = 0; i < num_per_input; ++i)
{
buffer[threadIdx.x] += inputs_d[(blockIdx.x * num_per_input) + i] * Weights_D[start_of_weights + (threadIdx.x * num_per_input) + i];
}
buffer[threadIdx.x] = 1.0 / (1.0 + exp(-buffer[threadIdx.x] / response));
__syncthreads();
//subsequent hidden layers
float temp;
for (int i = 0; i < num_layers; ++i)
{
temp = 0;
for (int j = 0; j < num_per_layer; ++j)
{
temp += buffer[j] * Weights_D[start_of_hidden_layers + (num_per_layer * num_per_layer * i) + (num_per_layer * threadIdx.x) + j];
}
temp = 1.0 / (1.0 + exp(-temp / response));
__syncthreads();
buffer[threadIdx.x] = temp;
__syncthreads();
}
//output layer
if (threadIdx.x < num_per_output)
{
temp = 0;
for (int i = 0; i < num_per_layer; ++i)
{
temp += buffer[i] * Weights_D[start_of_hidden_layers + (num_per_layer * num_per_layer * num_layers) + (num_per_layer * threadIdx.x) + i];
}
temp = 1.0 / (1.0 + exp(-temp / response));
__syncthreads();
//copy the result back out to the outputs vector
outputs_d[(blockIdx.x * num_per_output) + threadIdx.x] = temp;
}
} |
20,871 | #include <iostream>
#include <ctime>
#include <cuda.h>
#include <cuda_runtime.h> // Stops underlining of __global__
#include <device_launch_parameters.h> // Stops underlining of threadIdx etc.
using namespace std;
__global__ void FindClosestGPU(float3* points, int* indices, int count)
{
if(count <= 1) return;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < count)
{
float3 thisPoint = points[idx];
float smallestSoFar = 3.40282e38f;
for(int i = 0; i < count; i++)
{
if(i == idx) continue;
float dist = (thisPoint.x - points[i].x)*(thisPoint.x - points[i].x);
dist += (thisPoint.y - points[i].y)*(thisPoint.y - points[i].y);
dist += (thisPoint.z - points[i].z)*(thisPoint.z - points[i].z);
if(dist < smallestSoFar)
{
smallestSoFar = dist;
indices[idx] = i;
}
}
}
}
|
20,872 | #include "includes.h"
__global__ void kAddMultSign(float* a, float* b, unsigned int numEls, float mult) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
a[i] = a[i] + ((b[i] > 0) ? mult : ((b[i] < 0) ? -mult : 0));
}
} |
20,873 | __global__ void _add_32_11(int n, float *x, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi=x[i];
float yi=y[i];
z[i] = xi+yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void add_32_11(int n, float *x, float *y, float *z) {
_add_32_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _add_64_11(int n, double *x, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi=x[i];
double yi=y[i];
z[i] = xi+yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void add_64_11(int n, double *x, double *y, double *z) {
_add_64_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _sub_32_11(int n, float *x, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi=x[i];
float yi=y[i];
z[i] = xi-yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sub_32_11(int n, float *x, float *y, float *z) {
_sub_32_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _sub_64_11(int n, double *x, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi=x[i];
double yi=y[i];
z[i] = xi-yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sub_64_11(int n, double *x, double *y, double *z) {
_sub_64_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _mul_32_11(int n, float *x, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi=x[i];
float yi=y[i];
z[i] = xi*yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void mul_32_11(int n, float *x, float *y, float *z) {
_mul_32_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _mul_64_11(int n, double *x, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi=x[i];
double yi=y[i];
z[i] = xi*yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void mul_64_11(int n, double *x, double *y, double *z) {
_mul_64_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _div_32_11(int n, float *x, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi=x[i];
float yi=y[i];
z[i] = xi/yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void div_32_11(int n, float *x, float *y, float *z) {
_div_32_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _div_64_11(int n, double *x, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi=x[i];
double yi=y[i];
z[i] = xi/yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void div_64_11(int n, double *x, double *y, double *z) {
_div_64_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _pow_32_11(int n, float *x, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi=x[i];
float yi=y[i];
z[i] = pow(xi,yi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void pow_32_11(int n, float *x, float *y, float *z) {
_pow_32_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _pow_64_11(int n, double *x, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi=x[i];
double yi=y[i];
z[i] = pow(xi,yi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void pow_64_11(int n, double *x, double *y, double *z) {
_pow_64_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _max_32_11(int n, float *x, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi=x[i];
float yi=y[i];
z[i] = (xi>yi?xi:yi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void max_32_11(int n, float *x, float *y, float *z) {
_max_32_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _max_64_11(int n, double *x, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi=x[i];
double yi=y[i];
z[i] = (xi>yi?xi:yi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void max_64_11(int n, double *x, double *y, double *z) {
_max_64_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _min_32_11(int n, float *x, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi=x[i];
float yi=y[i];
z[i] = (xi<yi?xi:yi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void min_32_11(int n, float *x, float *y, float *z) {
_min_32_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _min_64_11(int n, double *x, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi=x[i];
double yi=y[i];
z[i] = (xi<yi?xi:yi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void min_64_11(int n, double *x, double *y, double *z) {
_min_64_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _eq_32_11(int n, float *x, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi=x[i];
float yi=y[i];
z[i] = xi==yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void eq_32_11(int n, float *x, float *y, float *z) {
_eq_32_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _eq_64_11(int n, double *x, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi=x[i];
double yi=y[i];
z[i] = xi==yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void eq_64_11(int n, double *x, double *y, double *z) {
_eq_64_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _ne_32_11(int n, float *x, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi=x[i];
float yi=y[i];
z[i] = xi!=yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void ne_32_11(int n, float *x, float *y, float *z) {
_ne_32_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _ne_64_11(int n, double *x, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi=x[i];
double yi=y[i];
z[i] = xi!=yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void ne_64_11(int n, double *x, double *y, double *z) {
_ne_64_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _gt_32_11(int n, float *x, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi=x[i];
float yi=y[i];
z[i] = xi>yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void gt_32_11(int n, float *x, float *y, float *z) {
_gt_32_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _gt_64_11(int n, double *x, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi=x[i];
double yi=y[i];
z[i] = xi>yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void gt_64_11(int n, double *x, double *y, double *z) {
_gt_64_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _ge_32_11(int n, float *x, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi=x[i];
float yi=y[i];
z[i] = xi>=yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void ge_32_11(int n, float *x, float *y, float *z) {
_ge_32_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _ge_64_11(int n, double *x, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi=x[i];
double yi=y[i];
z[i] = xi>=yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void ge_64_11(int n, double *x, double *y, double *z) {
_ge_64_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _lt_32_11(int n, float *x, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi=x[i];
float yi=y[i];
z[i] = xi<yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void lt_32_11(int n, float *x, float *y, float *z) {
_lt_32_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _lt_64_11(int n, double *x, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi=x[i];
double yi=y[i];
z[i] = xi<yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void lt_64_11(int n, double *x, double *y, double *z) {
_lt_64_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _le_32_11(int n, float *x, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi=x[i];
float yi=y[i];
z[i] = xi<=yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void le_32_11(int n, float *x, float *y, float *z) {
_le_32_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _le_64_11(int n, double *x, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi=x[i];
double yi=y[i];
z[i] = xi<=yi;
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void le_64_11(int n, double *x, double *y, double *z) {
_le_64_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _invxback_32_11(int n, float *x, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi=x[i];
float yi=y[i];
z[i] = (-xi*yi*yi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void invxback_32_11(int n, float *x, float *y, float *z) {
_invxback_32_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _invxback_64_11(int n, double *x, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi=x[i];
double yi=y[i];
z[i] = (-xi*yi*yi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void invxback_64_11(int n, double *x, double *y, double *z) {
_invxback_64_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _reluback_32_11(int n, float *x, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi=x[i];
float yi=y[i];
z[i] = (yi>0?xi:0);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void reluback_32_11(int n, float *x, float *y, float *z) {
_reluback_32_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _reluback_64_11(int n, double *x, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi=x[i];
double yi=y[i];
z[i] = (yi>0?xi:0);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void reluback_64_11(int n, double *x, double *y, double *z) {
_reluback_64_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _sigmback_32_11(int n, float *x, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi=x[i];
float yi=y[i];
z[i] = (xi*yi*(1-yi));
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sigmback_32_11(int n, float *x, float *y, float *z) {
_sigmback_32_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _sigmback_64_11(int n, double *x, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi=x[i];
double yi=y[i];
z[i] = (xi*yi*(1-yi));
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sigmback_64_11(int n, double *x, double *y, double *z) {
_sigmback_64_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _tanhback_32_11(int n, float *x, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi=x[i];
float yi=y[i];
z[i] = (xi*(1-yi*yi));
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void tanhback_32_11(int n, float *x, float *y, float *z) {
_tanhback_32_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _tanhback_64_11(int n, double *x, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi=x[i];
double yi=y[i];
z[i] = (xi*(1-yi*yi));
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void tanhback_64_11(int n, double *x, double *y, double *z) {
_tanhback_64_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _rpow_32_11(int n, float *x, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float xi=x[i];
float yi=y[i];
z[i] = pow(yi,xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void rpow_32_11(int n, float *x, float *y, float *z) {
_rpow_32_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _rpow_64_11(int n, double *x, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double xi=x[i];
double yi=y[i];
z[i] = pow(yi,xi);
i += blockDim.x * gridDim.x;
}
}
#ifdef __cplusplus
extern "C" {
#endif
void rpow_64_11(int n, double *x, double *y, double *z) {
_rpow_64_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _BGH_32_11(int n, float *x, float *y, float *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
float B=x[i];
float xi=y[i];
float flag = 1.0;
if (B < 0.0)
{
flag = -1.0;
B = -B;
xi = -xi;
}
if (B > 1e6) /*as saying infty*/
{
z[i] = xi > 30 ? flag*(xi + 1/xi * (1 - 2/(xi*xi) * (1 - 5/(xi*xi) * (1 - 7.4/(xi*xi))))) : flag * (exp(-(xi*xi)/2) / (1.2533 * erfc(xi / 1.4142)));
}
else if (B > 15.0)
{
float GH = xi > 30 ? flag*(xi + 1/xi * (1 - 2/(xi*xi) * (1 - 5/(xi*xi) * (1 - 7.4/(xi*xi))))) : flag * (exp(-(xi*xi)/2) / (1.2533 * erfc(xi / 1.4142)));
float H = 0.5 * erfc(xi / 1.4142136);
z[i] = GH/(1 + exp(-2*B - log(H)));
}
else
{
float G = 0.398942 * exp(-xi*xi/2);
float H = 0.5 * erfc(xi / 1.4142136);
z[i] = flag * G / (H + 1/expm1f(2*B));
}
i += blockDim.x * gridDim.x;
}
__syncthreads();
}
#ifdef __cplusplus
extern "C" {
#endif
void BGH_32_11(int n, float *x, float *y, float *z) {
_BGH_32_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
__global__ void _BGH_64_11(int n, double *x, double *y, double *z) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
double B=x[i];
double xi=y[i];
double flag = 1.0;
if (B < 0.0)
{
flag = -1.0;
B = -B;
xi = -xi;
}
if (B > 1e6) /*as saying infty*/
{
z[i] = xi > 30 ? flag*(xi + 1/xi * (1 - 2/(xi*xi) * (1 - 5/(xi*xi) * (1 - 7.4/(xi*xi))))) : flag * (exp(-(xi*xi)/2) / (1.2533 * erfc(xi / 1.4142)));
}
else if (B > 15.0)
{
double GH = xi > 30 ? flag*(xi + 1/xi * (1 - 2/(xi*xi) * (1 - 5/(xi*xi) * (1 - 7.4/(xi*xi))))) : flag * (exp(-(xi*xi)/2) / (1.2533 * erfc(xi / 1.4142)));
double H = 0.5 * erfc(xi / 1.4142136);
z[i] = GH/(1 + exp(-2*B - log(H)));
}
else
{
double G = 0.398942 * exp(-xi*xi/2);
double H = 0.5 * erfc(xi / 1.4142136);
z[i] = flag * G / (H + 1/expm1f(2*B));
}
i += blockDim.x * gridDim.x;
}
__syncthreads();
}
#ifdef __cplusplus
extern "C" {
#endif
void BGH_64_11(int n, double *x, double *y, double *z) {
_BGH_64_11<<<128,128>>>(n,x,y,z);
}
#ifdef __cplusplus
}
#endif
|
20,874 | //Alfred Shaker
//10-13-2015
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel
__global__ void vectorSum(int *a, int *b, int *c, int n)
{
//get the id of global thread
int id = blockIdx.x*blockDim.x+threadIdx.x;
//checks to make sure we're not out of bounds
if(id < n)
c[id] = a[id] + b[id];
}
int main(int argc, char* argv[])
{
//size of vectors
int size = 100;
//host side vectors
int *h_a, *h_b, *h_c;
//device side vectors
int *d_a, *d_b, *d_c;
//size of each vector in bytes
size_t bytes = size*sizeof(int);
//allocate memory for host side vectors
h_a = (int*)malloc(bytes);
h_b = (int*)malloc(bytes);
h_c = (int*)malloc(bytes);
//allocate memory for device side vectors
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
//initiate the vectors with random numbers between 0 and 9
int i;
for(i = 0; i < size; i++)
{
h_a[i] = rand() % 10;
h_b[i] = rand() % 10;
}
//copy host vectors to device vectors
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
//number of threads in each block
int blockSize = 1024;
//number of thread blocks in grid
int gridSize = (int)ceil((float)size/blockSize);
//execute kernel function
vectorSum<<<gridSize, blockSize >>>(d_a, d_b, d_c, size);
//copy the vector back to the host from the device only for the result vector
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
//print out each result, preceeding it with the values of the added vectors for that index
for(i = 0; i < size; i++)
printf("a: %d, b: %d, c[%d] = %d\n",h_a[i], h_b[i], i, h_c[i] );
//release the device side memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
//release the host side memory
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
20,875 | /*
@Author: 3sne ( Mukur Panchani )
@FileName: q3MatrixMul.cu
@Task: CUDA program computes product of two matrices, using different parallelism techniques.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
__global__ void MatMulRowThreads(int *a, int *b, int *c, int m, int n, int q) {
int id = threadIdx.x; //row id
for ( int i = 0; i < q; i++ ) {
c[id * q + i] = 0;
for ( int k = 0; k < n; k++ ) {
c[id * q + i] += a[id * n + k] * b[k * q + i];
}
}
}
__global__ void MatMulColThreads(int *a, int *b, int *c, int m, int n, int q) {
int id = threadIdx.x; //column id
for ( int i = 0; i < m; i++ ) {
c[i * q + id] = 0;
for ( int k = 0; k < n; k++ ) {
c[i * q + id] += a[i * m + k] * b[k * q + id];
}
}
}
__global__ void MatMulElementThread(int *a, int *b, int *c, int m, int n, int q) {
int ci = threadIdx.x;
int ri = threadIdx.y;
int id = ri * m + ci;
c[id] = 0;
for ( int k = 0; k < n; k++ ) {
c[id] += a[ri * n + k] * b[k * q + ci];
}
}
int main() {
int *matA, *matB, *matC;
int *da, *db, *dc;
int m, n, p, q;
printf("== Enter Dimension of Matrix A (m x n) ==\n");
printf("m >> "); scanf("%d", &m);
printf("n >> "); scanf("%d", &n);
matA = (int*)malloc(sizeof(int) * m * n);
printf("== Matrix A Elements ==\n");
for(int i = 0; i < m * n; i++) {
scanf("%d", &matA[i]);
}
printf("== Enter Dimension of Matrix B (p x q) ==\n");
printf("p >> "); scanf("%d", &p);
printf("q >> "); scanf("%d", &q);
matB = (int*)malloc(sizeof(int) * p * q);
if ( n != p ) {
printf("[ERROR] n & p must be equal, Exiting ...\n");
exit(EXIT_FAILURE);
}
printf("== Matrix B Elements ==\n");
for(int i = 0; i < p * q; i++) {
scanf("%d", &matB[i]);
}
matC = (int*)malloc(sizeof(int) * m * q);
cudaMalloc((void **) &da, sizeof(int) * m * n);
cudaMalloc((void **) &db, sizeof(int) * p * q);
cudaMalloc((void **) &dc, sizeof(int) * m * q);
cudaMemcpy(da, matA, sizeof(int) * m * n, cudaMemcpyHostToDevice);
cudaMemcpy(db, matB, sizeof(int) * p * q, cudaMemcpyHostToDevice);
printf("\nChoose a degree of parallelism >> \n");
printf("1. Thread handles row\n");
printf("2. Thread handles column\n");
printf("3. Thread handles element\nChoice >> ");
int choice = 0;
scanf("%d", &choice);
dim3 block_conf (q, m);
switch(choice) {
case 1://Part A: 1 Thread handles 1 row >>
printf("Chose: Thread handles row\n");
MatMulRowThreads<<<1,m>>>(da, db, dc, m, n, q);
break;
case 2://Part B: 1 Thread handles 1 column >>
printf("Chose: Thread handles column\n");
MatMulColThreads<<<1,q>>>(da, db, dc, m, n, q);
break;
case 3://Part C: 1 Thread handles 1 element >>
printf("Chose: Thread handles element\n");
MatMulElementThread<<<1, block_conf>>>(da, db, dc, m, n, q);
break;
default:
printf("Bad Option, exiting ...\n");
exit(EXIT_FAILURE);
break;
}
cudaMemcpy(matC, dc, sizeof(int) * m * n, cudaMemcpyDeviceToHost);
printf("== Matrix C Elements (computed by choice %d)==\n", choice);
for ( int i = 0; i < m; i++ ) {
for ( int j = 0; j < q; j++ ) {
printf("%d ", matC[i * q + j]);
}
printf("\n");
}
cudaFree(da);
cudaFree(db);
cudaFree(dc);
free(matA);
free(matB);
free(matC);
return 0;
} |
20,876 | #include <stdio.h>
#include <cuda.h>
#include <random>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <iostream>
int main(int argc, char *argv[]) {
int n = atol(argv[1]);
// set up random number from -1 to 1 generator
std::random_device entropy_source;
std::mt19937_64 generator(entropy_source());
const float min = -1.0, max = 1.0; // The range for the random number
std::uniform_real_distribution<float> dist(min, max);
thrust::host_vector<float> hostVec(n); // creat host vector
for (int i = 0; i < n; i++) { // fill host vector based on random gen
hostVec[i] = dist(generator);
}
thrust::device_vector<float> deviceVec = hostVec; // copy to device
//start timer
// set up timer
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//do the timing
cudaEventRecord(start);
float result = thrust::reduce(deviceVec.begin() , deviceVec.end(), 0.0, thrust::plus<float>());
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float ms;
cudaEventElapsedTime(&ms, start, stop);
std::cout << result << std::endl;
std::cout << ms << std::endl;
}
|
20,877 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
for (int i=0; i < var_3; ++i) {
comp += (-1.1191E-36f / var_4);
if (comp <= (-1.6213E5f / var_5)) {
comp += var_6 / tanhf(+1.8095E-42f);
comp += log10f(atanf(+1.4939E-41f));
comp += (var_7 / var_8);
comp += (var_9 - (-1.6014E34f - (+0.0f * sinf((var_10 / sinhf(atanf(atan2f((-1.5099E35f + -1.8193E36f), (+0.0f / (var_11 / (var_12 * var_13 * -1.0279E-44f * +1.5324E35f)))))))))));
}
if (comp == sqrtf((var_14 - var_15 - (+1.4097E-44f * -1.3049E34f)))) {
comp += (+1.5851E-44f / var_16);
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17);
cudaDeviceSynchronize();
return 0;
}
|
20,878 | #include<bits/stdc++.h>
using namespace std;
const int MAX_ARRAY_SIZE = 266;
__global__ void stanSum(int N, int *A, int R){
int i = blockIdx.x, j = threadIdx.x, block_size = blockDim.x;
__shared__ int tmp[MAX_ARRAY_SIZE];
assert(MAX_ARRAY_SIZE >= block_size + 2*R);
int gidx = i*block_size + j;
int lidx = R + j;
tmp[lidx] = gidx<N? A[gidx]:0;
if(j<R){
tmp[lidx - R] = gidx<R? 0:A[gidx-R];
tmp[lidx + block_size] = gidx+block_size<N? A[gidx+block_size]:0;
}
__syncthreads();
for(int j=1;j<=R;++j) A[gidx] += tmp[lidx+j] + tmp[lidx-j];
return;
}
int main(int argc, char *argv[]){
int N = 256, R = 10, block_size = 32;
if(argc > 1) N = stoi(argv[1]);
if(argc > 2) R = stoi(argv[2]);
if(argc > 3) block_size = stoi(argv[3]);
int n_block = (N + block_size - 1)/block_size;
int *A = new int [N];
// Initializing A
// memset(A, -1, N*sizeof(int));
for(int i=0;i<N;++i) A[i] = i;
clock_t start_time, end_time;
// Record the starting time.
start_time = clock();
int *dA;
cudaMalloc((void **)&dA, N*sizeof(int));
cudaMemcpy(dA, A, N*sizeof(int), cudaMemcpyHostToDevice);
for(int i=0;i<2;++i){
stanSum<<<n_block, block_size>>>(N, dA, R);
}
cudaMemcpy(A, dA, N*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(A);
// Record the ending time.
end_time = clock();
double dt = double(end_time - start_time)/CLOCKS_PER_SEC;
cout<<"Time Usage: "<<dt<<"s\nResults:\n";
int stride = N/10;
for(int i=0;i<N;i+=stride) cout<<A[i]<<' ';
cout<<endl;
delete [] A;
return 0;
}
|
20,879 |
#include "NA_MathsLib.cuh"
#include <math.h>//used to generate lookup tables when object is constructed
#include <random>
#include <time.h>
const float NA_MathsLib::PI = 3.14f;//this is a stupid compiler rule in my opinion
NA_MathsLib na_maths; //contructs itself, access with extern NA_MathsLib na_maths;
NA_MathsLib::NA_MathsLib(void)
{
for(int i=0; i<NA_M_LOOKUP_MAX; i++)//walk through lookup table assigning values from math.h
{
sinLookup[(int)i] = (float) sinf(degToRad((float)i*NA_M_LOOKUP_UNIT));
cosLookup[(int)i] = (float) cosf(degToRad((float)i*NA_M_LOOKUP_UNIT));
tanLookup[(int)i] = (float) tanf(degToRad((float)i*NA_M_LOOKUP_UNIT));
}
}
NA_MathsLib::~NA_MathsLib(void)
{
}
#pragma region dice
void NA_MathsLib::seedDice(unsigned int seed)
{
srand(seed);
}
int NA_MathsLib::dice(int bottom, int top)
{
//https://answers.yahoo.com/question/index?qid=20100615173601AAMHzBy
return (rand() % ((top + 1) - bottom)) + bottom;//between bottom and top, inclusive
}
int NA_MathsLib::dice(int top)
{
return (rand() % (top+1)); //between 0 and top, inclusive
}
#pragma endregion
#pragma region trig
float NA_MathsLib::degToRad(float d)
{
return (PI*d)/180;
}
float NA_MathsLib::radToDeg(float r)
{
return (r*180)/PI;
}
int NA_MathsLib::degToLookupIndex(float d)
{
return (int) d*(NA_M_LOOKUP_UNIT);
}
float NA_MathsLib::sin(float d)
{
return sinLookup[degToLookupIndex(d)];
}
float NA_MathsLib::cos(float d)
{
return cosLookup[degToLookupIndex(d)];
}
float NA_MathsLib::tan(float d)
{
return tanLookup[degToLookupIndex(d)];
}
#pragma endregion
//returns true if the two floats are within 0.001 of each other
bool NA_MathsLib::aboutEqual(float v1, float v2, float epsilon)
{
if (v1 > v2)
{
if( v1 - v2 < epsilon) return true;
}
else
{
if( v2 - v1 < epsilon) return true; //this returns true if they are exactly equal too, therefore a if(v1 == v2) statement is not required
}
return false; //if this line is executed then the two values are not similar enough to be considered the same
} |
20,880 | /*
* simple.cu
* includes setup funtion called from "driver" program
* also includes kernel function 'cu_fillArray()'
*/
#include <stdio.h>
#include <stdlib.h>
//#include <string.h>
#define BLOCK_SIZE 32
// The __global__ directive identifies this function as a kernel
// Note: all kernels must be declared with return type void
__global__ void cu_fillArray (int *array_d)
{
int x;
// Note: CUDA contains several built-in variables
// blockIdx.x returns the blockId in the x dimension
// threadIdx.x returns the threadId in the x dimension
x = blockIdx.x * BLOCK_SIZE + threadIdx.x;
array_d[x] = x;
}
// This function is called from the host computer.
// It manages memory and calls the function that is executed on the GPU
extern "C" void fillArray (int *array, int arraySize)
{
//a_d is the GPU counterpart of the array that exists in host memory
int *array_d;
cudaError_t result;
// allocate space in the device
result = cudaMalloc ((void**) &array_d, sizeof(int) * arraySize);
if (result != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed.");
exit(1);
}
//copy the array from host to array_d in the device
result = cudaMemcpy (array_d, array, sizeof(int) * arraySize, cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed.");
exit(1);
}
// set execution configuration
dim3 dimblock (BLOCK_SIZE);
dim3 dimgrid (arraySize/BLOCK_SIZE);
// actual computation: Call the kernel
cu_fillArray <<<dimgrid, dimblock>>> (array_d);
// transfer results back to host
result = cudaMemcpy (array, array_d, sizeof(int) * arraySize, cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed.");
exit(1);
}
// release the memory on the GPU
result = cudaFree (array_d);
if (result != cudaSuccess) {
fprintf(stderr, "cudaFree failed.");
exit(1);
}
}
|
20,881 | #include "includes.h"
__global__ void UpdateSecond(float *WHAT , float *WITH , float AMOUNT , float *MULT)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
WHAT[idx] *=MULT[idx];
WHAT[idx] +=AMOUNT*WITH[idx];
MULT[idx] = 1.0f;
} |
20,882 | #include<iostream>
//#include<stdio.h>
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
__global__ void evalJulia(int *d_pixel,
int *d_temp){
int x_index = threadIdx.x + blockIdx.x*blockDim.x;
int y_index = threadIdx.y + blockIdx.y*blockDim.y;
int tmp = x_index + 2*y_index;
d_temp[tmp] = d_pixel[tmp];
}
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#define N 64
int main()
{
int *d_pixel;
int *d_temp;
int size = N * sizeof(int);
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
int *temp = new int[N];
int *h_temp = new int[N];
for (int y=0;y<8;y++)
for(int x=0;x<8;x++)
{
temp[x + 8*y] = x + 8*y;
std::cout<<temp[x+8*y]<<std::endl;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
std::cout<<"test begins"<<std::endl;
dim3 gridDim(4,4);
dim3 blockDim(2,2);
cudaMalloc((void**)&d_pixel, size);
cudaMalloc((void**)&d_temp, size);
cudaMemcpy(temp, d_pixel, size, cudaMemcpyHostToDevice);
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
evalJulia<<<gridDim,blockDim>>>(d_pixel, d_temp);
cudaMemcpy(h_temp, d_temp, size, cudaMemcpyDeviceToHost);
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
for (int y=0;y<8;y++)
for(int x=0;x<8;x++)
{
std::cout<<temp[x+8*y]<<std::endl;}
std::cout<<"last kernel thread printed"<<std::endl;
cudaFree(d_pixel);
cudaFree(d_temp);
delete(h_temp);
delete(temp);
return 0;
}
|
20,883 | #include <stdio.h>
__global__
void matAddKernel(float *A, float *B, float *C, int n){
int i = threadIdx.x + blockDim.x * blockIdx.x, j;
if(i < n){
for(j = 0; j < n; j++){
C[i+j*n] = A[i+j*n] + B[i+j*n];
}
}
}
void matAdd(float* A, float* B, float* C, int n){
int size = n*n*sizeof(float);
float *d_A, *d_B, *d_C;
cudaMalloc((void **) &d_A, size);
cudaMemcpy(d_A,A,size,cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_B, size);
cudaMemcpy(d_B,B,size,cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_C, size);
matAddKernel<<<ceil(n/1024.0), 1024>>>(d_A,d_B,d_C,n);
cudaMemcpy(C,d_C,size,cudaMemcpyDeviceToHost);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
}
int main(){
int n,i,j;
float *h_A,*h_B,*h_C;
printf("ingrese el tamaño de la matriz:\n");
scanf("%d", &n);
h_A = (float*) malloc(n*n*sizeof(float));
h_B = (float*) malloc(n*n*sizeof(float));
h_C = (float*) malloc(n*n*sizeof(float));
for(i = 0; i < n; i++){
for(j = 0; j < n; j++){
h_A[i*n+j] = 1;
h_B[i*n+j] = 1;
}
}
matAdd(h_A,h_B,h_C,n);
for(i = 0; i < n; i++){
for(j = 0; j < n; j++){
printf("%f ", h_C[i*n+j]);
}
printf("\n");
}
printf("\n");
return 0;
} |
20,884 | #include<stdio.h>
__global__ void evenNum_gpu()
{
//int tid = threadIdx.x;
int tid = threadIdx.x + blockDim.x*blockIdx.x;
if(tid%2==0)
{
printf("Even number: %d\n", tid);
}
}
int main() {
int numUpperBound = 10;
printf("\nEven numbers less than %d (GPU version):\n", numUpperBound);
evenNum_gpu<<<3, numUpperBound>>>();
cudaDeviceReset();
return 0;
}
|
20,885 | #include <stdio.h>
#define NUM 1024
__shared__ int v[NUM];
__global__ void deadlock() {
if (threadIdx.x % 2 == 0) {
v[threadIdx.x]++;
__syncthreads();
}
else {
v[threadIdx.x]--;
//__syncthreads(); // remove this one to incur a barrier dismatch
}
}
int main() {
deadlock<<<1,NUM>>>();
return 0;
}
|
20,886 | #include "stdio.h"
__global__ void add(int a,int b,int *c)
{
*c=a+b;
}
int main()
{
int a,b,c;
int *dev_c;
a=3;b=4;
cudaMalloc((void**)&dev_c,sizeof(int));
add<<<1,1>>> (a,b,dev_c);
cudaMemcpy(&c, dev_c,sizeof(int),cudaMemcpyDeviceToHost);
printf("%d + %d is %d \n",a,b,c);
cudaFree(dev_c);
return 0;
}
|
20,887 | #include <stdio.h>
#include <cuda_runtime.h>
#include <chrono>
#include <iostream>
class GpuTimer {
public:
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start()
{
cudaEventRecord(start, 0);
}
void Stop()
{
cudaEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
__global__ void
zaxpy(const double alpha, const double *A, const double *B, double *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = alpha * A[i] + B[i];
}
}
int
main(int argc, char **argv)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
auto timer = GpuTimer();
// Print the vector length to be used, and compute its size
long numElements = atoi(argv[1]);
size_t size = numElements * sizeof(double);
printf("%ld ", numElements);
srand48(time(0));
double *h_A = (double *)malloc(size);
double *h_B = (double *)malloc(size);
double *h_C = (double *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = 1.5;
h_B[i] = 2.3;
}
// Allocate the device input vector A
double *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
double *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
double *d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
//Warmup
zaxpy<<<blocksPerGrid, threadsPerBlock>>>(24.0, d_A, d_B, d_C, numElements);
for(int i=0; i<10; i++) {
timer.Start();
std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();
zaxpy<<<blocksPerGrid, threadsPerBlock>>>(24.0, d_A, d_B, d_C, numElements);
cudaDeviceSynchronize();
std::chrono::steady_clock::time_point end= std::chrono::steady_clock::now();
timer.Stop();
std::cout << std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count() << "|" << timer.Elapsed() *1000 << std::endl;
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch zaxpy kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// printf("%.1f %.1f %.1f ", h_C[0], h_C[1000], h_C[numElements-1]);
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs((24.0f*h_A[i]) + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
// Free device global memory
err = cudaFree(d_A);
err = cudaFree(d_B);
err = cudaFree(d_C);
// Free host memory
free(h_A);
free(h_B);
free(h_C);
return 0;
}
|
20,888 | #pragma once
#include <limits>
#include <curand.h>
#include <curand_kernel.h>
#define INF FLT_MAX
#define EPS 1e-8
#define INT_INF INT_MAX
namespace RayTracing
{
float DegreesToRadians(const float degrees);
__host__ __device__
float Clamp(
const float x,
const float xMin,
const float xMax
);
// uniform distribution
float GenRandom();
// uniform distribution
float GenRandom(
const float a,
const float b
);
} // namespace RayTracing
|
20,889 | //%%cu
/**************************************************************************
C-DAC Tech Workshop : hyPACK-2013
October 15-18, 2013
Objective : Program to solve a solution of Poisson Eq. (PDE) on GPU
Input : No. of Grid Points in X-Dir, No. of Grid Points in Y-Dir
and maximum number of iterations
Output : Solution Vector.
Created : August-2013
E-mail : hpcfte@cdac.in
*****************************************************************************/
#include<stdio.h>
#include<cuda.h>
#include<stdlib.h>
#include<sys/time.h>
//---------------------------------------------------------------------------
#define BLOCKSIZE 16
#define TOLERANCE 1.0E-06
//#define TOPBOUNDARYVALUE 4.1f
//#define BOTTOMBOUNDARYVALUE 3.1f
//#define LEFTBOUNDARYVALUE 1.1f
//#define RIGHTBOUNDARYVALUE 2.1f
#define TOPBOUNDARYVALUE 1.0f
#define BOTTOMBOUNDARYVALUE 1.0f
#define LEFTBOUNDARYVALUE 1.0f
#define RIGHTBOUNDARYVALUE 1.0f
//----------------------------------------------------------------------------
void IntializeAndSetBoundaryConditions(float **, float **, int , int , int );
void SetBoundaryCondition(int , int , float , int , float *, float *);
void IntializeUInteriorIndex(int **, int , int , int );
float GetTheMaximumValue(float *,int );
void IntializeUDifference(float **, int );
//------------------------------------------------------------------------
//Pragma routine to report the detail of cuda error
#define CUDA_SAFE_CALL(call) \
do{ \
cudaError_t err = call; \
if(err != cudaSuccess) \
{ \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(1); \
} \
} while (0) \
//------------------------------------------------------------------------------------------
//Kernel that performs the Jacobi Iteration
__global__ void JacobiIteration(float *DeviceUOld, float *DeviceUNew, int *DeviceUInteriorIndex, float *DeviceUDifference, int NoPointsX, int Size, int ThreadDim)
{
int tidx = threadIdx.x;
int tidy = threadIdx.y;
int ThreadIndex = (ThreadDim * tidx) + tidy;
int MaxNumThread = ThreadDim * ThreadDim;
int CurrentColumnIndex;
int pass = 0;
int Center, Left, Right, Bottom, Top;
while( (CurrentColumnIndex = (ThreadIndex + MaxNumThread * pass)) < Size )
{
Center = DeviceUInteriorIndex[CurrentColumnIndex];
Left = Center - 1;
Right = Center + 1;
Top = Center - NoPointsX;
Bottom = Center + NoPointsX;
//Updating the UNew values
DeviceUNew[Center] = 0.25 * (DeviceUOld[Left] + DeviceUOld[Right] + DeviceUOld[Top] + DeviceUOld[Bottom]);
//Finding the Difference between UNew and UOld
DeviceUDifference[CurrentColumnIndex] = DeviceUNew[Center] - DeviceUOld[Center];
//Assigning UNew to UOld
DeviceUOld[Center] = DeviceUNew[Center];
pass++;
}
__syncthreads();
}//End of Jacobi Iteration Device function
//----------------------------------------------------------------------------------------------
int main(int argc, char **argv)
{
//Checking if valid number of Arguements have been passed
/*
if(argc != 4)
{
printf("Valid number of inputs are not given \n");
printf("Usage:<./Program Name><Number of X points><Number of Y points><Maximum Number of Iterations> \n");
exit(-1);
}
*/
//Host Variables Declaration
float *UOld, *UNew, *UDifference;
int *UInteriorIndex;
float MaxError = 0.0f;
struct timeval TV;
double StartTime,EndTime,ActualTime;
int NoPointsX, NoPointsY, MaxIterations, NoPointsInterior, Index, PresentIteration,NoPointsTotal;
//Device Variables Declaration
float *DeviceUOld, *DeviceUNew, *DeviceUDifference;
int *DeviceUInteriorIndex;
//Obtaining the Values of NoPointsX, NoPointsY and MaxIterations from the arguements passed by the User
// NoPointsX = atoi( argv[1] );
//NoPointsY = atoi( argv[2] );
//MaxIterations = atoi( argv[3] );
NoPointsX = 10;
NoPointsY = 10;
MaxIterations = 10;
//Calculating the Total Points and Interior Points
NoPointsTotal = NoPointsX * NoPointsY;
NoPointsInterior = (NoPointsTotal) - (((2 * NoPointsX) + (2 * NoPointsY)) - 4);
//Intializing the UOld and seting the Boundary conditions
IntializeAndSetBoundaryConditions( &UOld, &UNew, NoPointsX, NoPointsY, NoPointsTotal );
//Intializing the UDifference
IntializeUDifference( &UDifference,NoPointsInterior );
//Filling the UInteriorIndex with Index Values of Interior Points
IntializeUInteriorIndex( &UInteriorIndex, NoPointsX, NoPointsY,NoPointsInterior );
//Allocating Memory on Device
CUDA_SAFE_CALL( cudaMalloc( (void **)&DeviceUOld, NoPointsTotal * sizeof(float)));
CUDA_SAFE_CALL( cudaMalloc( (void **)&DeviceUNew, NoPointsTotal * sizeof(float)));
CUDA_SAFE_CALL( cudaMalloc( (void **)&DeviceUInteriorIndex, NoPointsInterior * sizeof(int)));
CUDA_SAFE_CALL( cudaMalloc( (void **)&DeviceUDifference, NoPointsInterior * sizeof(float)));
//Copying Data from Host to Device
CUDA_SAFE_CALL( cudaMemcpy((void *)DeviceUOld, (void *)UOld, NoPointsTotal * sizeof(float), cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpy((void *)DeviceUNew, (void *)UNew, NoPointsTotal * sizeof(float), cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpy((void *)DeviceUInteriorIndex, (void *)UInteriorIndex, NoPointsInterior * sizeof(float), cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpy((void *)DeviceUDifference, (void *)UDifference, NoPointsInterior * sizeof(float), cudaMemcpyHostToDevice) );
//Defining Thread Grid and the Thread Block
dim3 DimGrid( 1,1 );
dim3 DimBlock( BLOCKSIZE,BLOCKSIZE );
PresentIteration = 0;
//start timing computation
gettimeofday(&TV, NULL);
StartTime = TV.tv_sec+( TV.tv_usec/1000000.0 );
while(1)
{
//Incrementing the Iteration Number
PresentIteration++;
//Invoking the Kernel
JacobiIteration<<<DimGrid, DimBlock>>>( DeviceUOld, DeviceUNew, DeviceUInteriorIndex, DeviceUDifference, NoPointsX, NoPointsInterior, BLOCKSIZE );
//Copying Udifference from Device to Host
CUDA_SAFE_CALL( cudaMemcpy((void *)UDifference, (void *)DeviceUDifference, NoPointsInterior * sizeof(float), cudaMemcpyDeviceToHost) );
//Finding the Maximum among the UDifference values
MaxError = GetTheMaximumValue( UDifference, NoPointsInterior );
//Checking for the convergence
if((MaxError < TOLERANCE) || (PresentIteration == MaxIterations))
break;
}
//stop timing computation
gettimeofday(&TV,NULL);
EndTime = TV.tv_sec+(TV.tv_usec/1000000.0);
//calculate difference between start and stop times
ActualTime = EndTime - StartTime;
//Copying UNew from Device to Host
CUDA_SAFE_CALL(cudaMemcpy((void *)UNew, (void *)DeviceUNew, NoPointsTotal * sizeof(float), cudaMemcpyDeviceToHost));
//Printing the solution
for(Index = 0; Index < NoPointsTotal; Index++)
printf(" %f", UNew[Index]);
printf("Output Vector given above calculated in %d Iterations and in %lf secs.\n",PresentIteration,ActualTime);
//Freeing the Allocated Memory on Device
CUDA_SAFE_CALL( cudaFree( DeviceUOld ) );
CUDA_SAFE_CALL( cudaFree( DeviceUNew ) );
CUDA_SAFE_CALL( cudaFree( DeviceUInteriorIndex ) );
CUDA_SAFE_CALL( cudaFree( DeviceUDifference ) );
//Freeing the Allocated Memory on Host
free( UOld );
free( UNew );
free( UInteriorIndex );
free( UDifference );
return(0);
}//End of Main
//-----------------------------------------------------------------------------------------------------
void IntializeAndSetBoundaryConditions( float **UOld, float **UNew, int NoPointsX, int NoPointsY, int NoPointsTotal )
{
float *TempUOld,*TempUNew;
int Index;
//Allocating memory for UOld and UNew
TempUOld = (float *)malloc( NoPointsTotal * sizeof(float) );
if(TempUOld == NULL)
{
printf("Can't allocate the memory for the variable TempUOld \n");
exit(-1);
}
TempUNew = (float *)malloc( NoPointsTotal * sizeof(float) );
if(TempUNew == NULL)
{
printf("Can't allocate the memory for the variable TempUNew \n");
exit(-1);
}
//Intialize UOld to zeros
for(Index = 0; Index < (NoPointsTotal); Index++)
TempUOld[Index] = 0.0;
//Setting the Boundary Conditions
//Case:Left
for(Index = 0; Index < NoPointsY; Index++)
SetBoundaryCondition(0, Index, LEFTBOUNDARYVALUE, NoPointsX, TempUOld, TempUNew);
//Case:Right
for(Index = 0; Index < NoPointsY; Index++)
SetBoundaryCondition((NoPointsX - 1), Index, RIGHTBOUNDARYVALUE, NoPointsX, TempUOld, TempUNew);
//Case:Bottom
for(Index = 0; Index < NoPointsX; Index++)
SetBoundaryCondition(Index, 0, BOTTOMBOUNDARYVALUE, NoPointsX, TempUOld, TempUNew);
//Case:Top
for(Index = 0; Index < NoPointsX; Index++)
SetBoundaryCondition(Index, (NoPointsY - 1), TOPBOUNDARYVALUE, NoPointsX, TempUOld, TempUNew);
//Assigning Temporary Varibles Locations to Original Variables
*UOld = TempUOld;
*UNew = TempUNew;
}
//---------------------------------------------------------------------------------------------------
void SetBoundaryCondition(int i, int j, float Value, int NoPointsX, float *UOld, float *UNew)
{
int Index;
Index = (j * NoPointsX) + i;
UOld[Index] = Value;
UNew[Index] = Value;
}
//------------------------------------------------------------------------------------------------
void IntializeUInteriorIndex(int **UInteriorIndex, int NoPointsX, int NoPointsY,int NoPointsInterior)
{
int i, j, Index, IndexValue;
int *TempUInteriorIndex;
Index = 0;
//Allocating memory for UInteriorIndex
TempUInteriorIndex = (int *)malloc( NoPointsInterior * sizeof(int) );
if( TempUInteriorIndex == NULL )
{
printf("Can't allocate memory for the variable TempUInteriorIndex \n");
exit(-1);
}
//Assigning the index of the Interior points of UOld and UNew
for(j = 1; j < (NoPointsY - 1); ++j)
{
for(i = 1; i < (NoPointsX - 1); i++)
{
IndexValue = (j * NoPointsX) + i;
TempUInteriorIndex[Index] = IndexValue;
Index++;
}
}
*UInteriorIndex = TempUInteriorIndex;
}
//--------------------------------------------------------------------------------------------------------
float GetTheMaximumValue(float *Array,int NumberOfElements)
{
float MaxError;
int RowNum;
MaxError = 0.0f;
for(RowNum = 0; RowNum < NumberOfElements; RowNum++)
{
if(Array[RowNum] >= MaxError)
MaxError = Array[RowNum];
}
return(MaxError);
}
//---------------------------------------------------------------------------------------------------------------
void IntializeUDifference(float **UDifference, int NoPointsInterior)
{
float *TempUDifference;
int RowNumber;
//Allocating Memory for UDifference
TempUDifference = (float *)malloc( NoPointsInterior * sizeof(float) );
if( TempUDifference == NULL )
{
printf("Can't allocate the memory for the variable TempUDifference \n");
exit(-1);
}
//Intializing the UDifference to zero's
for(RowNumber = 0; RowNumber < NoPointsInterior; RowNumber++)
TempUDifference[RowNumber] = 0.0f;
*UDifference = TempUDifference;
}
//------------------------------------------------------------------------------------------------------- |
20,890 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#define NUM_THREADS 512
#define OUTPUT_FILE_NAME "q3.txt"
#define NUM_BLOCKS 1
// int* fileToArray(char file1[], int* n){
// FILE* fptr = fopen(file1, "r");
// FILE* fptr_cpy = fptr;
// char* str = (char*) malloc(sizeof(char)*2048);
// int token;
// int count = 0;
// while (fscanf(fptr, "%d ,", &token) != EOF) {
// count++;
// }
// count++;
// *n = count;
// //fscanf(fptr, "%d,", n);
// int* array;
// cudaMallocManaged(&array, sizeof(int)*(*n));
// int i = 0
// while (fscanf(fptr, "%d ,", &token) != EOF) {
// array[i] = token;
// i++;
// }
// i++;
// fscanf(fptr, "%d", &token);
// array[i] = token;
// // for(int i = 0; i < *n; i++){
// // fscanf(fptr, "%d,", &token);
// // array[i] = token;
// // }
// fclose(fptr);
// return array;
// }
int* fileToArray(char file1[], int* n){
FILE* fptr = fopen(file1, "r");
// char* str = (char*) malloc(sizeof(char)*2048);
int token;
int count = 0;
while (fscanf(fptr, "%d, ", &token) != EOF) {
//printf("%dth token: %d\n", count, token);
count++;
}
*n = count;
//printf("total number of elements: %d\n", *n);
int* array;
cudaMallocManaged(&array, sizeof(int)*(*n));
rewind(fptr);
for(int i = 0; i < *n; i++){
fscanf(fptr, "%d, ", &token);
array[i] = token;
}
fclose(fptr);
return array;
}
// we want to keep track of how many elements have a 0 in the current bit that is to be masked.
__global__
void maskArray(int* result2, int* result, int* array, int mask, int n) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < n) {
// this is the relative index
result[index] = (array[index] & mask) == mask ? 1 : 0;
result2[index] = (array[index] & mask) == mask ? 0 : 1;
}
}
__global__
void prescan(int* indices, int n) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
//extern __shared__ int local_scan[];
int from = blockIdx.x * blockDim.x;
int to = blockIdx.x * blockDim.x + blockDim.x;
for (int d = 1; d < blockDim.x; d *= 2) {
if (index + 1 - from > d && index < n) {
indices[index] += indices[index-d];
}
__syncthreads();
}
}
__global__
void map(int* result, int from) {
int index = from + threadIdx.x;
int to_map = result[from-1];
result[index] += to_map;
return;
}
__global__
void copy(int* result, int* array, int* ones, int* zeroes, int n, int pivot, int mask) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < n) {
if (array[index] & mask != 0) {
int idx = ones[index];
result[idx + pivot] = array[index];
}
else {
int idx = zeroes[index];
result[idx] = array[index];
}
}
}
void radixSort(int* array, int n) {
int threads = 1024;
int blocks = (n + (threads-1)) / threads;
// stores whether each element in array is odd or not (denoted 1 or 0)
int* ones;
int* zeroes;
int* result_copy; // stores prefix sum of each element
int* result; // stores final result, sizeof prefix[n-1]
int local_array_bytes = sizeof(int)*threads;
cudaMallocManaged(&result_copy, sizeof(int) * n);
cudaMallocManaged(&result, sizeof(int) * n);
cudaMallocManaged(&ones, sizeof(int) * n);
cudaMallocManaged(&zeroes, sizeof(int) * n);
for (unsigned int i = 1; i <= 1024; i <<= 1) {
printf("\n\nMask: %x\n\n", i);
maskArray<<<blocks, threads>>>(zeroes, ones, array, i, n);
cudaDeviceSynchronize();
for(int j = 0; j < 10; j++){
printf("zeroes[%d]: %d - ", n - j, zeroes[n - j]);
printf("ones[%d]: %d\n", n - j, ones[n - j]);
}
printf("zeroes[%d]: %d", 999999, zeroes[999999]);
printf("ones[%d]: %d", 999999, ones[999999]);
prescan<<<blocks, threads, local_array_bytes>>>(zeroes, n);
cudaDeviceSynchronize();
for(int j = threads; j < n; j+=threads) {
map<<<1, threads>>>(zeroes, j); //map last value of previous group of 1024 onto next group of 1024
cudaDeviceSynchronize();
}
for(int j = 0; j < 10; j++){
printf("zeroes[%d]: %d - ", j, zeroes[j]);
printf("zeroes[%d]: %d\n", threads - 2 + j, zeroes[threads - 2 + j]);
}
prescan<<<blocks, threads, local_array_bytes>>>(ones, n);
cudaDeviceSynchronize();
for(int j = threads; j < n; j+=threads) {
map<<<1, threads>>>(ones, j); //map last value of previous group of 1024 onto next group of 1024
cudaDeviceSynchronize();
}
for(int j = 0; j < 10; j++){
printf("ones[%d]: %d - ", j, ones[j]);
printf("ones[%d]: %d\n", threads - 2 + j, ones[threads - 2 + j]);
}
printf("zeroes[%d]: %d", 999999, zeroes[999999]);
printf("ones[%d]: %d", 999999, ones[999999]);
int pivot = zeroes[n-1];
// so far we've only calculated the positions of elements with 0 in the bit of interest
// we need to use new index of the last element with a 0 in the bit of interest as an offset
// to calculate the positions of elements with 1 in the bit of interest
// also I think we need to copy results -> input array at the end of each iteration for each bit of interest
cudaDeviceSynchronize();
copy<<<blocks, threads>>>(result, array, ones, zeroes, n, pivot, i);
cudaDeviceSynchronize();
for(int j = 0; j < 10; j++) {
printf("result[%d]: %d\n", j, result[j]);
}
}
FILE *output = fopen(OUTPUT_FILE_NAME, "w");
if(output == NULL) printf("failed to open file %s\n", OUTPUT_FILE_NAME);
fprintf(output, "%d", result[0]);
for(int i = 1; i < n ; i++) {
fprintf(output, ", %d", result[i]);
}
fclose(output);
}
int main(int argc, char* argv[]){
int n;
int* array = fileToArray("inp.txt", &n);
printf("Number of elements in array: %d\n", n);
for (int i = 0; i < n; i++) {
printf("%d, ", array[i]);
}
radixSort(array, n);
cudaFree(array);
}
|
20,891 | #include <stdio.h>
#include <time.h>
#define TSK 16
#define WPTM 8
#define WPTN 8
#define TSM (TSK * WPTM)
#define TSN (TSK * WPTN)
#define RTSM (TSM/WPTM)
#define RTSN (TSN/WPTN)
#define LPTA (TSK*TSM)
#define LPTB (TSK*TSN)
// Use 2D register blocking (further increase in work per thread)
//C=A*B
__global__ void oreoreSGEMM(int M, int N, int K, float* A, float* B, float* C) {
// Thread identifiers
int tidm = threadIdx.x; // Local row ID (max: TSM/WPTM)
int tidn = threadIdx.y; // Local col ID (max: TSN/WPTN)
int offsetM = TSM * blockIdx.x + tidm; // Work-group offset
int offsetN = TSN * blockIdx.y + tidn; // Work-group offset
// Local memory to fit a tile of A and B
//2 is to avoid bank conflict ?
__shared__ float4 Asub[TSM*TSK / 4];
__shared__ float4 Bsub[TSK*TSN / 4];
// Allocate register space
float4 Areg;
float4 Breg[2];
float acc[WPTM*WPTN];
// Initialise the accumulation registers
for (int wm = 0; wm < WPTM; wm++) {
for (int wn = 0; wn < WPTN; wn++) {
acc[wm * 8 + wn] = 0.0f;
}
}
// Loop over all tiles
int numTiles = K / TSK;
int tid = tidn * 16 + tidm;
int Boffset = tidn / 2 * N + (tidn % 2) * 64 + offsetM; //+TSK*t*N+la*8*N
int Aoffset = tidm + offsetN * K;//+TSK*t+0*K
for (int t = 0; t < numTiles; t++) {
// Load one tile of A and B into local memory
//A
float4 dt;
dt.x = A[Aoffset]; Aoffset += 16 * K;
dt.y = A[Aoffset]; Aoffset += 16 * K;
dt.z = A[Aoffset]; Aoffset += 16 * K;
dt.w = A[Aoffset]; Aoffset += 16 * K;
Asub[tid] = dt;
dt.x = A[Aoffset]; Aoffset += 16 * K;
dt.y = A[Aoffset]; Aoffset += 16 * K;
dt.z = A[Aoffset]; Aoffset += 16 * K;
dt.w = A[Aoffset]; Aoffset -= 112 * K - 16;
Asub[tid + 256] = dt;
//B
dt.x = B[Boffset];
dt.y = B[Boffset + 16];
dt.z = B[Boffset + 32];
dt.w = B[Boffset + 48];
Bsub[tid] = dt;
Boffset += 8 * N;
dt.x = B[Boffset];
dt.y = B[Boffset + 16];
dt.z = B[Boffset + 32];
dt.w = B[Boffset + 48];
Bsub[tid + 256] = dt;
Boffset += 8 * N;
// Synchronise to make sure the tile is loaded
__syncthreads();
int tidmk = tidm;//+k*TSM
int tidnk = tidn * 16;
// Loop over the values of a single tile
for (int k = 0; k < TSK; k++) {
// Cache the values of Bsub in registers
Breg[0] = Bsub[tidmk]; tidmk += 16;
Breg[1] = Bsub[tidmk]; tidmk += 16;
// Perform the computation
Areg = Asub[tidnk]; tidnk += 256;
acc[0] += Areg.x * Breg[0].x;
acc[1] += Areg.x * Breg[0].y;
acc[2] += Areg.x * Breg[0].z;
acc[3] += Areg.x * Breg[0].w;
acc[4] += Areg.x * Breg[1].x;
acc[5] += Areg.x * Breg[1].y;
acc[6] += Areg.x * Breg[1].z;
acc[7] += Areg.x * Breg[1].w;
acc[8 + 0] += Areg.y * Breg[0].x;
acc[8 + 1] += Areg.y * Breg[0].y;
acc[8 + 2] += Areg.y * Breg[0].z;
acc[8 + 3] += Areg.y * Breg[0].w;
acc[8 + 4] += Areg.y * Breg[1].x;
acc[8 + 5] += Areg.y * Breg[1].y;
acc[8 + 6] += Areg.y * Breg[1].z;
acc[8 + 7] += Areg.y * Breg[1].w;
acc[16 + 0] += Areg.z * Breg[0].x;
acc[16 + 1] += Areg.z * Breg[0].y;
acc[16 + 2] += Areg.z * Breg[0].z;
acc[16 + 3] += Areg.z * Breg[0].w;
acc[16 + 4] += Areg.z * Breg[1].x;
acc[16 + 5] += Areg.z * Breg[1].y;
acc[16 + 6] += Areg.z * Breg[1].z;
acc[16 + 7] += Areg.z * Breg[1].w;
acc[24 + 0] += Areg.w * Breg[0].x;
acc[24 + 1] += Areg.w * Breg[0].y;
acc[24 + 2] += Areg.w * Breg[0].z;
acc[24 + 3] += Areg.w * Breg[0].w;
acc[24 + 4] += Areg.w * Breg[1].x;
acc[24 + 5] += Areg.w * Breg[1].y;
acc[24 + 6] += Areg.w * Breg[1].z;
acc[24 + 7] += Areg.w * Breg[1].w;
Areg = Asub[tidnk]; tidnk -= 255;
acc[32 + 0] += Areg.x * Breg[0].x;
acc[32 + 1] += Areg.x * Breg[0].y;
acc[32 + 2] += Areg.x * Breg[0].z;
acc[32 + 3] += Areg.x * Breg[0].w;
acc[32 + 4] += Areg.x * Breg[1].x;
acc[32 + 5] += Areg.x * Breg[1].y;
acc[32 + 6] += Areg.x * Breg[1].z;
acc[32 + 7] += Areg.x * Breg[1].w;
acc[40 + 0] += Areg.y * Breg[0].x;
acc[40 + 1] += Areg.y * Breg[0].y;
acc[40 + 2] += Areg.y * Breg[0].z;
acc[40 + 3] += Areg.y * Breg[0].w;
acc[40 + 4] += Areg.y * Breg[1].x;
acc[40 + 5] += Areg.y * Breg[1].y;
acc[40 + 6] += Areg.y * Breg[1].z;
acc[40 + 7] += Areg.y * Breg[1].w;
acc[48 + 0] += Areg.z * Breg[0].x;
acc[48 + 1] += Areg.z * Breg[0].y;
acc[48 + 2] += Areg.z * Breg[0].z;
acc[48 + 3] += Areg.z * Breg[0].w;
acc[48 + 4] += Areg.z * Breg[1].x;
acc[48 + 5] += Areg.z * Breg[1].y;
acc[48 + 6] += Areg.z * Breg[1].z;
acc[48 + 7] += Areg.z * Breg[1].w;
acc[56 + 0] += Areg.w * Breg[0].x;
acc[56 + 1] += Areg.w * Breg[0].y;
acc[56 + 2] += Areg.w * Breg[0].z;
acc[56 + 3] += Areg.w * Breg[0].w;
acc[56 + 4] += Areg.w * Breg[1].x;
acc[56 + 5] += Areg.w * Breg[1].y;
acc[56 + 6] += Areg.w * Breg[1].z;
acc[56 + 7] += Areg.w * Breg[1].w;
}
// Synchronise before loading the next tile
__syncthreads();
}
// Store the final results in C
for (int wm = 0; wm < WPTM; wm++) {
int globalRow = offsetM + wm * RTSM;
for (int wn = 0; wn < WPTN; wn++) {
int globalCol = offsetN + wn * RTSN;
C[globalCol*M + globalRow] = acc[wn * 8 + wm];
}
}
}
__host__ void Generaterand(float* h_,int nr_rows_ ,int nr_cols_) {
for (int i = 0; i < nr_rows_; i++) {
for (int j = 0; j < nr_cols_; j++) {
h_[i*nr_cols_ + j] = (float)rand()*0.000030517578125f;
}
}
}
//Print matrix C
__host__ void print_matrix(const float* A, int nr_rows_A, int nr_cols_A, const float* B, int nr_rows_B, int nr_cols_B, const float* C, int nr_rows_C, int nr_cols_C) {
float cc = 0.0f;
for (int i = 0; i < nr_cols_A; i++) {
cc += A[i] * B[i*nr_cols_B];
}
printf("out check\n");
printf("CPU_C[0] %f\n", cc);
printf("GPU_C[0] %f\n", C[0]);
}
int main() {
int N = 4096*(time(NULL)%4+1);
printf("start N=%d\n", N);
// Allocate 3 arrays on CPU
int nr_rows_A, nr_cols_A, nr_rows_B, nr_cols_B, nr_rows_C, nr_cols_C;
nr_rows_A = nr_cols_A = nr_rows_B = nr_cols_B = nr_rows_C = nr_cols_C = N;
float* const h_A = (float *)malloc(nr_rows_A * nr_cols_A * sizeof(float));
float* const h_B = (float *)malloc(nr_rows_B * nr_cols_B * sizeof(float));
float* const h_C = (float *)malloc(nr_rows_C * nr_cols_C * sizeof(float));
// Allocate 3 arrays on GPU
float *d_A, * d_B, * d_C;
cudaMalloc(&d_A, nr_rows_A * nr_cols_A * sizeof(float));
cudaMalloc(&d_B, nr_rows_B * nr_cols_B * sizeof(float));
cudaMalloc(&d_C, nr_rows_C * nr_cols_C * sizeof(float));
//rand
Generaterand(h_A, nr_rows_A, nr_cols_A);
Generaterand(h_B, nr_rows_B, nr_cols_B);
//HostToDevice
cudaMemcpy(d_A, h_A, nr_rows_A * nr_cols_A * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nr_rows_B * nr_cols_B * sizeof(float), cudaMemcpyHostToDevice);
// init
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//first kernel overhead
dim3 block(TSM / WPTM, TSN / WPTN);
dim3 grid(N / TSM, N / TSN);
oreoreSGEMM <<<grid, block >>> (nr_rows_A, nr_cols_B, nr_cols_A, d_A, d_B, d_C);
cudaDeviceSynchronize();//wait
int loops = 65536/ N; //SGEMM loop
cudaEventRecord(start);
for (int i = 0; i < loops; i++) {
oreoreSGEMM <<<grid, block >>> (nr_rows_A, nr_cols_B, nr_cols_A, d_A, d_B, d_C);
}
//cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
printf("end\n");
float elapsed;
cudaEventElapsedTime(&elapsed, start, stop);
printf( "Time: %fms, %f TFLOPS\n", elapsed, (double)N*N*N*2 / elapsed / 1000000000* loops);
//output
cudaMemcpy(h_C, d_C, nr_rows_C * nr_cols_C * sizeof(float), cudaMemcpyDeviceToHost);
print_matrix(h_A,nr_rows_A,nr_cols_A,h_B,nr_rows_B,nr_cols_B,h_C,nr_rows_C,nr_cols_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
} |
20,892 | #include<stdio.h>
#include<math.h>
#include<stdlib.h>
//#include<cuda.h>
#include<unistd.h>
#include<time.h>
/*
for(i=0;i<N/c;i++)
{
for(j=0;j<cols[i];j++)
{
result[i*c+0]+=scval_flat[cs[i]+(j*c)]*vecX[sccol_flat[cs[i]+(j*2)]];
result[i*c+1]+=scval_flat[cs[i]+(j*c)+1]*vecX[sccol_flat[cs[i]+(j*2)+1]];
}
}
*/
__global__ void multiply(int *scval, int *sccol, int *vec, int *result, int *cols, int *cs)
{
int tid=blockIdx.x;
int sum1=0;
int j;
//int colidx=tid/2;
//printf("\n tid= %d", tid);
//printf("\n Writing to %d",tid*c+threadIdx.x);
for(j=0;j<cols[tid];j++)
{
sum1 += scval[cs[tid]+(j*blockDim.x)+threadIdx.x]*vec[sccol[cs[tid]+(j*blockDim.x)+threadIdx.x]];
// sum2 += scval[cs[tid]+(j*2)+1]*vec[sccol[cs[tid]+(j*2)+1]];
}
__syncthreads();
result[tid*blockDim.x+threadIdx.x]=sum1;
// result[tid*c+1]=sum2;
}
__global__ void printmatscreen(int* mat, int N)
{
int i;
for (i=0;i<N;i++)
{
printf("%d ",mat[i]);
}
printf("\n");
}
int** Make2DIntArray(int arraySizeX, int arraySizeY)
{
int** theArray;
theArray = (int**) malloc(arraySizeX*sizeof(int*));
int i;
for (i = 0; i < arraySizeX; i++)
theArray[i] = (int*) malloc(arraySizeY*sizeof(int));
int j;
for (i=0;i<arraySizeX;i++)
{
for (j=0;j<arraySizeY;j++)
{
theArray[i][j]=0;
}
}
return theArray;
}
int** Make2DVariableIntArray(int rows, int blocks, int blocksize, int* columns)
{
int** theArray;
theArray = (int**) malloc(rows*sizeof(int*));
int i, j, k;
for (i = 0; i < blocks; i++)
{
k=columns[i];
for (j=0; j < blocksize; j++)
{
theArray[i*blocksize+j] = (int*) malloc(k*sizeof(int));
}
}
//int j;
for (i=0;i<blocks;i++)
{
for (j=0;j<blocksize;j++)
{
for (k=0;k<columns[i];k++)
{
theArray[i*blocksize+j][k]=0;
}
}
}
return theArray;
}
int** Changeto2DVariableIntArray(int** theArray,int rows, int blocks, int blocksize, int* columns)
{
int** NewArray=Make2DVariableIntArray(rows,blocks,blocksize,columns);
int i, j, k;
for (i=0;i<blocks;i++)
{
for (j=0;j<blocksize;j++)
{
for (k=0;k<columns[i];k++)
{
NewArray[i*blocksize+j][k]=theArray[i*blocksize+j][k];
}
}
}
printf("changed to multiple matrixes");
return NewArray;
}
void init_zeros(int** matrix, int N)
{
int i,j;
for (i=0;i<N;i++)
{
for (j=0;j<N;j++)
{
matrix[i][j]=0;
}
}
}
void printmat(int** matrix, int N, int Nj)
{
int i,j;
for (i=0;i<N;i++)
{
printf("\n");
for (j=0;j<N;j++)
{
printf("%d ",matrix[i][j]);
}
}
printf("\n");
}
void printtofile(int** matrix, int K, char* filename)
{
/*
Prints original 2D matrices to file
*/
FILE *fp;
fp=fopen(filename,"wt");
int i,j;
for (i=0;i<K;i++)
{
fprintf(fp, "\n");
for (j=0;j<K;j++)
{
fprintf(fp, "%d\t", matrix[i][j]);
}
}
}
void printtofile1D(int* matrix, int K, char* filename)
{
/*
Prints resultant matrix to a file
*/
FILE *fp;
fp=fopen(filename,"wt");
int i;
for (i=0;i<K;i++)
{
fprintf(fp, "%d\n", matrix[i]);
}
}
int* Make1DIntArray(int arraySizeX)
{
int* theArray;
theArray = (int*)malloc(arraySizeX*sizeof(int));
int i;
for (i=0;i<arraySizeX;i++)
{
theArray[i]=0;
}
return theArray;
}
void freese(int sizeX, int sizeY, double** ptr)
{
int i;
for (i=0;i<sizeX;i++)
free(ptr[i]);
free(ptr);
}
int main()
{
int N=5000;
// const int Dsize=1000;
FILE *arr, *vec;
int i,j,maxrowwidth=0,tint=0;
int** a=Make2DIntArray(N,N);
// int* val=Make1DIntArray(Dsize);
// int* col=Make1DIntArray(Dsize);
// int* row=Make1DIntArray(Dsize);
int* result=Make1DIntArray(N);
int* vecX=Make1DIntArray(N);
int** scval=Make2DIntArray(N,N); //sell c value
int** sccol=Make2DIntArray(N,N); //sell c col
int* rowwidth=Make1DIntArray(N); //number of elements in each row
int* temp=Make1DIntArray(N);
int* rows=Make1DIntArray(N);
int* resultsordered=Make1DIntArray(N);
int sig=4,c=2;
// int* rowwidth=Make1DIntArray(N);
int *dev_vec, *dev_scval, *dev_result, *dev_sccol, *dev_cols, *dev_cs;
//int val[10],col[10],row[10];
arr=fopen("matrix5000.txt","r");
int k=0;
// struct timeval start, end;
// gettimeofday(&start, NULL);
//Reading the vector
vec=fopen("vector5000.txt","r");
for (i=0;i<N;i++)
{
fscanf(vec,"%d",&vecX[i]);
rows[i]=i;
}
//Reading the matrix
for(i=0;i<N;i++)
{
//printf("\n");
for(j=0;j<N;j++)
{
fscanf(arr,"%d",&a[i][j]);
// printf("%d ",a[i][j]);
}
}
printf("\n");
//row[i]=k;
//printf("\n k = %d\n ", k);
//sleep(10);
// gettimeofday(&end, NULL);
// double delta = ((end.tv_sec - start.tv_sec) * 1000000u +
// end.tv_usec - start.tv_usec) / 1.e6;
// printf("\nTime spent=%f\n", delta);
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
if(a[i][j]!=0)
{
scval[i][k]=a[i][j];
sccol[i][k]=j;
rowwidth[i]=k+1;
if(rowwidth[i]>maxrowwidth)
{
maxrowwidth=rowwidth[i];
}k++;
}
}
//printf("\nRow width %d = %d", i, rowwidth[i]);
k=0;
}
if(sig>1&&c!=sig)
{
for(i=0;i<N;i=i+sig)
{
for(k=0;k<sig-1;k++)
{
for(j=i;(j<i+sig-1) && (j<N);j++)
{
if(rowwidth[j]<rowwidth[j+1])
{
temp=scval[j];
scval[j]=scval[j+1];
scval[j+1]=temp;
temp=sccol[j];
sccol[j]=sccol[j+1];
sccol[j+1]=temp;
tint=rowwidth[j];
rowwidth[j]=rowwidth[j+1];
rowwidth[j+1]=tint;
tint=rows[j];
rows[j]=rows[j+1];
rows[j+1]=tint;
}
}
}
}
}
/* for(i=0;i<N;i++)
{
if(scval[i][0]==0)
{
break;
}
}
N=i;
*/
printf("\nmaxrowwidth=%d\n",maxrowwidth);
// printmat(scval,N,N);
// printtofile(scval,N,"scval.txt");
// printtofile(sccol,N,"sccol.txt");
/* printf("\n Shuffled rows is:\n");
for (i=0;i<N;i++)
{
printf("%d\n",rows[i]);
}
*/
//printmatscreen<<<1,1>>>(dev_b,N);
/* multiply<<<N,N>>>(dev_a, dev_b, dev_c, N, N);
cudaMemcpy(result, dev_c, sizeof(int)*N, cudaMemcpyDeviceToHost);
for (i=0;i<N;i++)
{
printf("\n%d",result[i]);
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
// NEED TO FIGURE OUT A WAY TO POPULATE cols SO AS TO HAVE varmat CREATED PROPERLY. SYSTEM CRASHES OTHERWISE
*/
int* cols=Make1DIntArray(N/c);
j=0;
int colsum=0;
for(i=0;i<N;i=i+c)
{
cols[j]=rowwidth[i];
colsum+=cols[j];
j++;
}
int** varscval=Changeto2DVariableIntArray(scval,N,N/c,c,cols);
int** varsccol=Changeto2DVariableIntArray(sccol,N,N/c,c,cols);
/* for (i=0;i<N/c;i++)
{
for(j=0;j<c;j++)
{
printf("\n");
for (k=0;k<cols[i];k++)
{
printf("%d ",varscval[i*c+j][k]);
//printf("%d ",varsccol[i*c+j][k]);
}
}
}
*/
int varsize=colsum*c;
//flattening scval and sccol
int counters=0;
int* scval_flat=Make1DIntArray(varsize);
int* sccol_flat=Make1DIntArray(varsize);
int* cs=Make1DIntArray((N/c)+1);
cs[0]=0;
int countcols=0;
int z=0;
printf("\n");
printf("\n");
printf("\n");
for (i=0;i<N/c;i++)
{
countcols=0;
for(j=0;j<cols[i];j++)
{
for (k=0;k<c;k++)
{
scval_flat[counters]=varscval[i*c+k][j];
sccol_flat[counters]=varsccol[i*c+k][j];
//printf("%d ",scval_flat[counters]);
//printf("%d\n", sccol_flat[counters]);
counters=counters+1;
countcols=countcols+1;
}
}
cs[z+1]=cs[z]+countcols;
z=z+1;
}
/* printf("\ncs:");
for(i=1;i<(N/c)+1;i++)
printf("%d ", cs[i]);
/*
for(i=0;i<N/c;i++)
{
for(j=0;j<cols[i];j++)
{
result[i*c+0]+=scval_flat[cs[i]+(j*c)]*vecX[sccol_flat[cs[i]+(j*2)]];
result[i*c+1]+=scval_flat[cs[i]+(j*c)+1]*vecX[sccol_flat[cs[i]+(j*2)+1]];
}
}
printtofile1D(result,N,"resultstest.txt");
*/
cudaEvent_t start, stop, start_kernel, stop_kernel;
float time_kernel;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&start_kernel);
cudaEventCreate(&stop_kernel);
cudaMalloc((void**)&dev_vec, sizeof(int)*N);
cudaMalloc((void**)&dev_scval, sizeof(int)*varsize);
cudaMalloc((void**)&dev_result, sizeof(int)*N);
cudaMalloc((void**)&dev_sccol, sizeof(int)*varsize);
cudaMalloc((void**)&dev_cols, sizeof(int)*(N/c));
cudaMalloc((void**)&dev_cs, sizeof(int)*(N/c));
//cudaEventRecord(start,0);
cudaMemcpy(dev_vec, vecX, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_scval, scval_flat, sizeof(int)*varsize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_result, result, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_sccol, sccol_flat, sizeof(int)*varsize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_cols, cols, sizeof(int)*(N/c), cudaMemcpyHostToDevice);
cudaMemcpy(dev_cs, cs, sizeof(int)*(N/c), cudaMemcpyHostToDevice);
cudaEventRecord(start_kernel,0);
multiply<<<N/c,c>>>(dev_scval, dev_sccol, dev_vec, dev_result, dev_cols, dev_cs);
//sleep(10);
cudaEventRecord(stop_kernel,0);
cudaMemcpy(result, dev_result, sizeof(int)*N, cudaMemcpyDeviceToHost);
//cudaEventRecord(stop,0);
cudaEventSynchronize(stop_kernel);
//cudaEventElapsedTime(&time, start, stop);
cudaEventElapsedTime(&time_kernel, start_kernel, stop_kernel);
//printf("\nTime for kernel with data transfer = %f ms \n", time);
printf("\nTime for kernel without data transfer = %f ms \n", time_kernel);
for (i=0;i<N;i++)
{
resultsordered[rows[i]]=result[i];
}
printtofile1D(resultsordered,N,"results.txt");
// CODE TO RESHUFFLE BACK
cudaFree(dev_vec);
cudaFree(dev_scval);
cudaFree(dev_result);
cudaFree(dev_sccol);
cudaFree(dev_cols);
return 0;
/*
cudaMalloc((void**)&dev_vec, sizeof(int)*N);
cudaMalloc((void**)&dev_scval_flat, sizeof(int)*varsize);
cudaMalloc((void**)&dev_result, sizeof(int)*N);
cudaMalloc((void**)&dev_sccol_flat, sizeof(int)*varsize);
cudaMemcpy(dev_a, vecX, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, varscval, sizeof(int)*varsize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, result, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_d, varsccol, sizeof(int)*varsize, cudaMemcpyHostToDevice);
*/
}
|
20,893 | #include <math.h>
#include <stdint.h>
#include <stdio.h>
__device__ uint8_t median_pixel(uint8_t *pixels, int stride_H, int stride_W, int size_H, int size_W) {
int hist[256];
for (int i = 0; i < 256; i++) {
hist[i] = 0;
}
for (int i = 0; i < size_H; i++) {
for (int j = 0; j < size_W; j++) {
uint8_t pix = pixels[i*stride_H + j*stride_W];
hist[pix] += 1;
}
}
int lower_half_count = 0;
int i;
for (i = 0; i < 256; i++) {
lower_half_count += hist[i];
if (lower_half_count >= (size_H*size_W-1)/2) break;
}
return (uint8_t)i;
}
__global__ void apply_median_filter_kernel(uint8_t *src, uint8_t *dst, int src_H, int src_W, int src_C, int fil_H, int fil_W) {
int h_idx = threadIdx.x + blockIdx.x*blockDim.x;
int w_idx = threadIdx.y + blockIdx.y*blockDim.y;
int c_idx = threadIdx.z + blockIdx.z*blockDim.z;
if ((h_idx >= (src_H-fil_H+1))|(w_idx >= (src_W-fil_W+1))|(c_idx >= src_C)) return;
extern __shared__ int s[];
uint8_t *shared = (uint8_t*)s;
int src_idx = h_idx*src_W*src_C + w_idx*src_C + c_idx;
for (int i = 0; (h_idx + i*blockDim.x < src_H)&(threadIdx.x + i*blockDim.x < fil_H + blockDim.x - 1); i++) {
for (int j = 0; (w_idx + j*blockDim.y < src_W)&(threadIdx.y + j*blockDim.y < fil_W + blockDim.y - 1); j++) {
int shared_idx = (threadIdx.x + i*blockDim.x)*(blockDim.y + fil_W - 1)*blockDim.z + (threadIdx.y + j*blockDim.y)*blockDim.z + threadIdx.z;
shared[shared_idx] = src[src_idx + i*blockDim.x*src_W*src_C + j*blockDim.y*src_C];
}
}
uint8_t *pixels = shared + threadIdx.x*(blockDim.y + fil_W - 1)*blockDim.z + threadIdx.y*blockDim.z + threadIdx.z;
uint8_t pixel = median_pixel(pixels, (blockDim.y + fil_W - 1)*blockDim.z, blockDim.z, fil_H, fil_W);
dst[h_idx*(src_W-fil_W+1)*(src_C) + w_idx*src_C + c_idx] = pixel;
}
__host__ void apply_median_filter(uint8_t *h_src, uint8_t *h_dst, int src_H, int src_W, int src_C, int fil_H, int fil_W, int block_size) {
uint8_t *d_src;
uint8_t *d_dst;
cudaMalloc(&d_src, src_H*src_W*src_C);
cudaMalloc(&d_dst, (src_H-fil_H+1)*(src_W-fil_W+1)*src_C);
cudaMemcpy(d_src, h_src, src_H*src_W*src_C, cudaMemcpyHostToDevice);
int x_blocks = (src_H-fil_H+1);
x_blocks = x_blocks/block_size + (x_blocks%block_size ? 1 : 0);
//x_blocks = 1;
int y_blocks = (src_W-fil_W+1);
y_blocks = y_blocks/block_size + (y_blocks%block_size ? 1 : 0);
//y_blocks = 1;
apply_median_filter_kernel<<<
dim3(x_blocks,y_blocks,1),
dim3(block_size,block_size,src_C),
sizeof(float)*(block_size+fil_H-1)*(block_size+fil_W-1)
>>>(d_src, d_dst, src_H, src_W, src_C, fil_H, fil_W);
cudaMemcpy(h_dst, d_dst, (src_H-fil_H+1)*(src_W-fil_W+1)*src_C, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaFree(d_src);
cudaFree(d_dst);
}
|
20,894 | #include "includes.h"
#define TILE_WIDTH 7
__global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, int Width)
{
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
//Identify the row and column of the Pd element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
//printf("Row : %d, Col : %d\n", Row, Col);
float Pvalue = 0;
//Loop over the Md and Nd tiles required to compute the Pd element
for (int m = 0; m < Width / TILE_WIDTH; m++)
{
//Collaborative loading of Md and Nd tiles into shared memory
Mds[ty][tx] = Md[Row * Width + (m * TILE_WIDTH + tx)];
Nds[ty][tx] = Nd[(m * TILE_WIDTH + ty) * Width + Col];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; k++)
Pvalue += Mds[ty][k] * Nds[k][tx];
__syncthreads();
}
Pd[Row * Width + Col] = Pvalue;
} |
20,895 | #include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <cufft.h>
#include <math.h>
#define BLOCK_SIZE 1024*1024
#define LOOPS 10
// how many loops of block size to do
cudaEvent_t t_start, t_stop;
cufftHandle plan;
__global__ void filter_block(char *input_buffer, float *taps, float *output_buffer, int N, int P)
{
float temp_output = 0;
for (int x=0; x < P; x++) {
temp_output += taps[threadIdx.x + x*N]*(int)input_buffer[(P*N) + threadIdx.x - x*N + (blockIdx.x*blockDim.x)];
// input buffer of continuous voltage samples should be distributed amongst N channels
// index into input buffer:
// current thread index indicates which channel we are currently processing
// x which tap we are getting data for
// blockIdx.x * blockDim.x is our depth into the block of data in multiples of channel number
// structure of taps vector is actually [N,P]. So each thread will read taps[channel_number:channel_number+8] for taps
}
output_buffer[threadIdx.x + (blockIdx.x*blockDim.x)] = temp_output;
}
__global__ void float_cast(float *in, char *out, int divider)
{
int idx = (blockDim.x * divider) * blockIdx.x + threadIdx.x;
out[blockDim.x*blockIdx.x + threadIdx.x] = (char) (in[idx + (2 * blockIdx.x)] / 512);
// skip the (N/2 +1)'th fft output...
}
int main(int argc, char **argv) {
int write_block = BLOCK_SIZE;
// 1 MB worth of data at a time...
int N = 512;
// # of frequency channels in the PFB
int P = 8;
// length of pre filtering
int divider = 1;
// output divider to reduce rate
int write_size = sizeof(float) * write_block;
int tap_size = sizeof(float) * P * N;
int fft_output_size = (write_block / N) * sizeof(cufftComplex) * (N/2 + 1);
// hold BATCH (write_block / N) worth of complex FFT outputs
int fh;
unsigned int i=0;
char *data_file;
char *fir_taps_file;
char *base_buffer;
float et;
// counter for elapsed time of cuda ops
float *fir_taps;
char *device_char_buffer;
char *host_char_buffer;
float *device_float_buffer;
cufftComplex *device_complex_buffer;
float *device_fir_taps;
short first = 1;
long int start = 0;
if (argc > 2) {
data_file = argv[1];
fir_taps_file = argv[2];
} else { printf("Please supply both data and fir_taps filenames...\n"); return -1;}
fprintf(stderr,"Sizes %li, %li\n", sizeof(char), sizeof(int));
base_buffer = (char*)malloc(write_block*LOOPS);
host_char_buffer = (char*)malloc(write_block);
memset(host_char_buffer, (char) 0, write_block);
// zero as we use part of this for our initial zero padding block
float *float_buffer = (float*)malloc(fft_output_size);
fir_taps = (float*)malloc(tap_size);
fh = open(fir_taps_file, O_RDONLY);
read(fh, fir_taps, tap_size);
// source of taps vector should be flattened [P,N] array
close(fh);
//for (i=0; i < P*N; i++) { fprintf(stderr,"%f ",(float)*(fir_taps+i));}
cudaEventCreate(&t_start);
cudaEventCreate(&t_stop);
cudaMalloc((void**)&device_char_buffer, write_block + (P*N));
cudaMalloc((void**)&device_float_buffer, write_size);
cudaMalloc((void**)&device_complex_buffer, fft_output_size);
cudaMalloc((void**)&device_fir_taps, tap_size);
// allocate the device storage
cudaMemcpy(device_fir_taps, fir_taps, tap_size, cudaMemcpyHostToDevice);
// copy the filter taps to the device
int threadsPerBlock = N;
int blocksPerGrid = write_block / N;
fprintf(stderr,"Blocks per grid: %i, Threads per block: %i\n",blocksPerGrid, threadsPerBlock);
cufftPlan1d(&plan, N, CUFFT_R2C, int(write_block/N));
fprintf(stderr,"FFT Plan has length %i with batch size %i\n",N, int(write_block/N));
fh = open(data_file, O_LARGEFILE);
read(fh, base_buffer, write_block * LOOPS);
fprintf(stderr,"Reading %i bytes of data...\n", write_block * LOOPS);
for (i=0; i < LOOPS; i++) {
start = i * write_block;
fprintf(stderr,"Loop %i (start: %li).\n",i,start);
cudaEventRecord(t_start, 0);
if (first == 1) {
fprintf(stderr,"Appending zero pad block to start of stream...\n");
cudaMemcpy(device_char_buffer, host_char_buffer, P*N, cudaMemcpyHostToDevice);
// copy the pad block in first (we have zero'ed this block earlier)
cudaMemcpy(device_char_buffer+(P*N), base_buffer + start, write_block, cudaMemcpyHostToDevice);
first = 0;
} else {
cudaMemcpy(device_char_buffer, base_buffer+start - (P*N), write_block + (P*N), cudaMemcpyHostToDevice);
// copy data to GPU. Add extra N*P block for boundary handling...
}
filter_block<<<blocksPerGrid, threadsPerBlock>>>(device_char_buffer, device_fir_taps, device_float_buffer, N, P);
// kernel applies pre filtering to entire block leaving it in device_output_buffer ready for FFT
cufftExecR2C(plan, (cufftReal*)device_float_buffer, (cufftComplex*)device_complex_buffer);
// Do FFT's over the entire block, one column at a time
//cudaMemcpy(float_buffer, device_complex_buffer, fft_output_size, cudaMemcpyDeviceToHost);
// get the intermediate results...
//write(1, float_buffer, fft_output_size);
// output the intermediate results...
float_cast<<<write_block/N, N/divider>>>((float*)device_complex_buffer, device_char_buffer, divider);
// prepare our output stream...
cudaMemcpy(host_char_buffer, device_char_buffer, write_block/divider, cudaMemcpyDeviceToHost);
//for (i=0; i < 100 * (N/2 + 1); i++) { fprintf(stderr,"Complex value %i has x=%f, y=%f\n", i, fft_buffer[i].x, fft_buffer[i].y); }
cudaEventRecord(t_stop, 0);
cudaEventSynchronize(t_stop);
cudaEventElapsedTime(&et, t_start, t_stop);
fprintf(stderr,"Loop done. CUDA time is %f ms\n", et);
write(1, host_char_buffer, write_block/divider);
// emit to stdout (which has hopefully been redirected...)
}
return 0;
}
|
20,896 | #include <iostream>
#include <set>
#include "../include/gpu_set.h"
#include <thrust/device_vector.h>
#define def_dvec(t) thrust::device_vector<t>
#define to_ptr(x) thrust::raw_pointer_cast(&x[0])
using namespace std;
const int SET_SIZE = 100;
__global__ void test(int *output){
gpu_set<int, SET_SIZE> set;
for(int i=0;i<SET_SIZE;++i){
set.insert(2*i);
}
int idx = 0;
for(int i=0;i<100;++i){
output[idx++] = i;
output[idx++] = set.find(i);
set.erase(i);
if(i%2) set.insert(i);
}
for(int i=0;i<100;++i) {
output[idx++] = i;
output[idx++] = set.find(i);
}
}
int main(){
def_dvec(int) dev_out(400, 0);
test<<<1, 1>>>(to_ptr(dev_out));
for(auto k:dev_out) cout<<k<<' ';
cout<<endl;
return 0;
} |
20,897 | #include<iostream>
#include<string>
#include<cuda.h>
using namespace std;
int main(){
struct cudaDeviceProp prop;
cudaError_t err;
err = cudaGetDeviceProperties(&prop,0);
if(err!=cudaSuccess){
cout<<"Get failed. Exiting."<<endl;
}
else{
cout<<"Name : "<<string(prop.name)<<endl;
cout<<"Total global memory : "<<prop.totalGlobalMem/(1024*1024*1024.0)<<" GB"<<endl;
cout<<"Shared memmory per block : "<<prop.sharedMemPerBlock/(1024.0)<<" KB"<<endl;
cout<<"32 bit registers per block : "<<prop.regsPerBlock<<endl;
cout<<"Warp size (in threads) : "<<prop.warpSize<<endl;
cout<<"Max pitch allowed by mem copy : "<<prop.memPitch/(1024*1024*1024.0)<<" GB"<<endl;
cout<<"Max threads per block : "<<prop.maxThreadsPerBlock<<endl;
cout<<"Max thread dimensions : "<<"("<<prop.maxThreadsDim[0]<<","<<prop.maxThreadsDim[1]<<","<<prop.maxThreadsDim[2]<<")"<<endl;
cout<<"Max grid dimensions : "<<"("<<prop.maxGridSize[0]<<","<<prop.maxGridSize[1]<<","<<prop.maxGridSize[2]<<")"<<endl;
cout<<"Max const memory : "<<prop.totalConstMem/1024.0<<" KB"<<endl;
cout<<"Major compute capability : "<<prop.major<<endl;
cout<<"Minor compute capability : "<<prop.minor<<endl;
cout<<"Clock frequency : "<<prop.clockRate/1000.0<<" MHz"<<endl;
cout<<"Alignment requirement for textures : "<<prop.textureAlignment<<endl;
cout<<"Device can concurrently copy memory and execute a kernel : "<<(bool)prop.deviceOverlap<<endl;
cout<<"Number of multiprocessors on device : "<<prop.multiProcessorCount<<endl;
cout<<"Specified whether there is a run time limit on kernels : "<<(bool)prop.kernelExecTimeoutEnabled<<endl;
cout<<"Integrated : "<<(bool)prop.integrated<<endl;
cout<<"Can map host memory : "<<(bool)prop.canMapHostMemory<<endl;
cout<<"Compute Mode : "<<prop.computeMode<<endl;
cout<<"Concurrent kernels : "<<(bool)prop.concurrentKernels<<endl;
cout<<"ECC support : "<<(bool)prop.ECCEnabled<<endl;
cout<<"PCI bus id : "<<prop.pciBusID<<endl;
cout<<"PCI device id : "<<prop.pciDeviceID<<endl;
cout<<"TCC Driver : "<<(bool)prop.tccDriver<<endl;
}
return 0;
}
|
20,898 | #include <math.h>
#include <iostream>
#include <array>
#include <cmath>
#include <cstdint>
#include "cuda_runtime.h"
#include <stdlib.h>
#include <cuda_runtime_api.h>
#include <cuda.h>
using namespace std;
template<int E, int M, int T, int P, int B = (1 << (E - 1)) - 1>
static inline __device__ uint64_t compress(float* xyz)
{
static_assert(E + M + P + T == 64, "Invalid number of bits");
const float rpis = 1.0/static_cast<float>(M_PI);
const double told = 1e-13;
float ts = atan2(static_cast<float>(xyz[1]),static_cast<float>(xyz[0]));
uint64_t nt = round(static_cast<float>((1 << (T-1)) - 1)*(abs(ts)*rpis));
if(ts >= 0) nt = nt | (1 << (T-1));
double rd = sqrt((double)xyz[0]*xyz[0] + (double)xyz[1]*xyz[1] + (double)xyz[2]*xyz[2]);
float rs = (float)rd;
float pd = (rd < told) ? 0 : static_cast<float>(acos(static_cast<double>(xyz[2])/rd));
uint64_t np = round(static_cast<float>((1 << P)-1)*pd*rpis);
uint32_t* ptr = reinterpret_cast<uint32_t*>(&rs);
uint64_t fre = (*ptr & 0x7f800000) >> 23;
uint64_t frm = (*ptr & 0x007fffff) >> 23-M ;
return nt | (np << (T)) | (frm << (T + P)) | ((fre - 127 + B) << (T + P + M));
}
template<int E, int M, int T, int P, int B = (1 << (E - 1)) - 1>
static inline __device__ void decompress(const uint64_t& C, float& x, float& y, float& z)
{
static_assert(E + M + P + T == 64, "Invalid number of bits");
const float pi = static_cast<float>(M_PI);
uint32_t tm = (1 << T) - 1;
uint32_t pm = (1 << P) - 1;
uint32_t mm = (1 << M) - 1;
uint32_t em = (1 << E) - 1;
uint32_t td = C & tm;
uint32_t pd = (C >> T) & pm;
uint32_t frm = (C >> (T+P)) & mm;
uint32_t fre = (C >> (T+P+M)) & em;
uint32_t ri = ((fre - B + 127) << 23) | (frm << (23-M));
float r = *reinterpret_cast<float*>(&ri);
float t = pi*(-1.0 + 2.0*(td >> (T-1)))*(td & (tm >> 1))/static_cast<float>((1 << (T-1))-1);
float p = pi*pd/static_cast<float>((1 << P)-1);
x = r*cosf(t)*sinf(p);
y = r*sinf(t)*sinf(p);
z = r*cosf(p);
}
|
20,899 |
#include <stdio.h>
__global__ void add(char *c, char *sub, int *o,int sub_len) {
int idx=threadIdx.x;
int ctr=0;
for (int i = 0; i < sub_len; ++i)
{
if(c[idx+i]==sub[i])
ctr++;
}
o[idx]=0;
if(idx==0 && ctr==sub_len)
o[idx]=-1;
else if(ctr==sub_len)
o[idx]=1;
}
int main(void) {
char c[100];
char sub[100];
int *o;
// host copies of variables a, b & c
char *d_c;
char *d_sub;
int *d_o;
int d_sub_len; // device copies of variables a, b & c
// int size = sizeof(int);
printf("Enter String\n");
fgets (c, 100, stdin);
printf("%s\n",c);
int size=strlen(c)-1;
// printf("MAINS TIRGN %d\n",size );
printf("Enter Sub String\n");
fgets (sub, 100, stdin);
printf("%s\n",sub);
int size_sub=strlen(sub)-1;
d_sub_len=size_sub;
int size_o=size-size_sub+1;
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_c, size*sizeof(char));
cudaMalloc((void **)&d_sub, size_sub*sizeof(char));
cudaMalloc((void **)&d_o, size_o*sizeof(int));
o=(int*)calloc(size_o,sizeof(int));
// o={0};
// cudaMalloc((void **)&o, sizeof(int));
// cudaMalloc((void **)d_sub_len, sizeof(int));
// Setup input values
// Copy inputs to device
cudaMemcpy(d_c, c, size*sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(d_sub, sub, size_sub*sizeof(char), cudaMemcpyHostToDevice);
// cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
add<<<1,size_o>>>(d_c, d_sub, d_o,d_sub_len);
// Copy result back to host
cudaMemcpy(o, d_o, size_o*sizeof(int), cudaMemcpyDeviceToHost);
int temp=0;
// printf("LOLOL %d\n",size_o);
for (int i = 0; i < size_o; ++i)
{
if(o[i]!=0){
temp=i;
break;
}
else
temp=0;
}
printf("LOLOL\n");
if(temp!=0){
int pos;
if(temp==-1)
pos=0;
else
pos=temp;
printf("found at %d\n",pos);
}else{
printf("Not found\n");
}
// printf("%d\n",c );
// Cleanup
cudaFree(d_o);
cudaFree(d_sub);
cudaFree(d_c);
return 0;
} |
20,900 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <exception>
#include <iostream>
#include <map>
#include <sstream>
#include <string>
using duration_t = unsigned long long;
constexpr std::size_t SHARED_MEM_CAPACITY = 49152;
constexpr std::size_t ITERATIONS = 10;
#define CE(err) \
{ \
if (err != cudaSuccess) \
{ \
std::stringstream err_ss; \
err_ss << "CUDA error in " << __FUNCTION__ << " (" << __FILE__ \
<< ":" << __LINE__ << ") - " << cudaGetErrorString(err); \
throw std::runtime_error(err_ss.str()); \
} \
}
template <std::size_t Count, std::size_t Padding>
struct foo
{
float d[Count];
float padding[Padding];
};
template <std::size_t Count>
struct foo<Count, 0>
{
float d[Count];
};
__global__ void k1(duration_t* const duration)
{
using subject_t = foo<4, 3>;
constexpr std::size_t SHARED_MEM_SUBJECT_CAPACITY =
SHARED_MEM_CAPACITY / sizeof(subject_t);
__shared__ subject_t arr[SHARED_MEM_SUBJECT_CAPACITY];
duration_t start_time = clock();
++arr[threadIdx.x].d[0];
duration_t end_time = clock();
*duration = end_time - start_time;
}
__global__ void k2(duration_t* const duration, std::size_t factor)
{
using subject_t = double;
constexpr std::size_t SHARED_MEM_SUBJECT_CAPACITY =
SHARED_MEM_CAPACITY / sizeof(subject_t);
__shared__ subject_t arr[SHARED_MEM_SUBJECT_CAPACITY];
duration_t start_time = clock();
++arr[threadIdx.x * factor];
duration_t end_time = clock();
*duration = end_time - start_time;
}
void run_k1(duration_t* const d_duration)
{
std::cout << "===== k1 =====\n";
duration_t duration = 0;
for (std::size_t i = 0; i < ITERATIONS; ++i)
{
k1<<<1, 256>>>(d_duration);
CE(cudaMemcpy(
&duration, d_duration, sizeof(duration_t), cudaMemcpyDeviceToHost));
std::cout << "Duration: " << (duration / 100.) << '\n';
}
}
void run_k2(duration_t* const d_duration, std::size_t factor)
{
std::cout << "===== k2: " << factor << " =====\n";
duration_t duration = 0;
for (std::size_t i = 0; i < ITERATIONS; ++i)
{
k2<<<1, 256>>>(d_duration, factor);
CE(cudaMemcpy(
&duration, d_duration, sizeof(duration_t), cudaMemcpyDeviceToHost));
std::cout << "Duration: " << (duration / 100.) << '\n';
}
}
void run_kn()
{
duration_t* d_duration;
CE(cudaMalloc(&d_duration, sizeof(duration_t)));
run_k1(d_duration);
for (std::size_t factor = 0; factor < 20; ++factor)
{
run_k2(d_duration, factor);
}
CE(cudaFree(d_duration));
CE(cudaDeviceReset());
}
int main()
{
try
{
run_kn();
}
catch (std::exception const& ex)
{
std::cout << "exception: " << ex.what() << "\n";
return 1;
}
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.