serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
14,001 | #include "includes.h"
__global__ void updateDisplacements_k(float4 *Ui_t, float4 *Ui_tminusdt, float *M, float4 *Ri, float4 *Fi, int maxNumForces, float4 *ABC, unsigned int numPoints)
{
int me_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (me_idx>=numPoints)
return;
float4 F = make_float4(0,0,0,0);
// printf("Max num forces: %i\n", maxNumForces);
for (int i=0; i<maxNumForces; i++)
{
float4 force_to_add = Fi[me_idx*maxNumForces+i];
F.x += force_to_add.x;
F.y += force_to_add.y;
F.z += force_to_add.z;
}
// printf("Accumulated node %i force: %f, %f, %f \n", me_idx, F.x, F.y, F.z);
float4 ABCi = ABC[me_idx];
float4 Uit = Ui_t[me_idx];
float4 Uitminusdt = Ui_tminusdt[me_idx];
float4 R = Ri[me_idx];
float x = ABCi.x * (R.x - F.x) + ABCi.y * Uit.x + ABCi.z * Uitminusdt.x;
float y = ABCi.x * (R.y - F.y) + ABCi.y * Uit.y + ABCi.z * Uitminusdt.y;
float z = ABCi.x * (R.z - F.z) + ABCi.y * Uit.z + ABCi.z * Uitminusdt.z;
/* float x = ABCi.x * (-F.x) + ABCi.y * Ui_t[me_idx].x + ABCi.z * Ui_tminusdt[me_idx].x;
float y = ABCi.x * (-F.x) + ABCi.y * Ui_t[me_idx].y + ABCi.z * Ui_tminusdt[me_idx].y;
float z = ABCi.x * (-F.x ) + ABCi.y * Ui_t[me_idx].z + ABCi.z * Ui_tminusdt[me_idx].z;
*/
Ui_tminusdt[me_idx] = make_float4(x,y,z,0);//XXXXXXXXXXXXXXXXXXXXX
} |
14,002 | #include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define BLOCK_SIZE 8
#define TILE_SIZE 32
#define MATRIX_SIZE 2048
#define CUDA_SAFE_CALL(call) \
{ \
cudaError err = call; \
if (cudaSuccess != err) \
{ \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString(err)); \
exit(EXIT_FAILURE); \
} \
}
double timestamp()
{
struct timeval tv;
gettimeofday(&tv, 0);
return tv.tv_sec + 1e-6 * tv.tv_usec;
}
void showMatrix(unsigned int *mat, const size_t N)
{
for (int i = 0; i < N * N; i++)
{
printf("mat[%d] = %d, ", i, mat[i]);
}
printf("done\n");
}
void Floyd_sequential(unsigned int *mat, const size_t N)
{
for (int k = 0; k < N; k++)
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
{
int i0 = i * N + j;
int i1 = i * N + k;
int i2 = k * N + j;
mat[i0] = (mat[i0] < mat[i1] + mat[i2]) ? mat[i0] : (mat[i1] + mat[i2]);
}
}
__global__ void gpu_primary(unsigned int *result, int N, int start)
{
int x = threadIdx.y;
int y = threadIdx.x;
int row = x + start;
int col = y + start;
//__shared__ int s_mat[TILE_SIZE][TILE_SIZE];
//printf("x: %d, y: %d\n", x, y);
int i0 = row * N + col;
//printf("i0: %d\n", i0);
//s_mat[x][y] = result[i0];
//__syncthreads();
for (int k = start; k < start + TILE_SIZE; k++)
{
if (row < N && col < N){
int i1 = row * N + k;
int i2 = k * N + col;
//printf("result[%d][%d]: %d, result[%d][%d]: %d, result[%d][%d]: %d\n", row, col,result[i0], row, k, result[i1],k, col, result[i2]);
result[i0] = (result[i0] < result[i1] + result[i2]) ? result[i0] : (result[i1] + result[i2]);
//result[i0] = (s_mat[x][y] < s_mat[x][k] + s_mat[k][y]) ? s_mat[x][y] : (s_mat[x][k] + s_mat[k][y]);
//printf("result[%d]=%d\n ",i0, result[i0]);
}
}
}
__global__ void gpu_phase2(unsigned int *result, int N, int start, int k)
{
int skip = blockIdx.x < k ? (blockIdx.x - k): (blockIdx.x - k + 1);
int row_start = 0;
int col_start = 0;
//printf("skip: %d\n",skip);
if(blockIdx.y == 0){
row_start = start;
col_start = skip * TILE_SIZE + start;
}
else{
row_start = skip * TILE_SIZE + start;
col_start = start;
}
//printf("row_start: %d, col_start: %d\n", row_start, col_start);
int row = threadIdx.y + row_start;
int col = threadIdx.x + col_start;
//__shared__ int unsigned s_mat[TILE_SIZE][TILE_SIZE];
//printf("row: %d, col: %d\n", row, col);
int i0 = row * N + col;
//printf("i0: %d\n", i0);
//s_mat[threadIdx.y][threadIdx.x] = result[i0];
//__syncthreads();
for (int idx = start; idx < TILE_SIZE + start; idx++)
{
if (row < N && col < N){
int i1 = row * N + idx;
int i2 = idx * N + col;
//printf("result[%d][%d]: %d, result[%d][%d]: %d, result[%d][%d]: %d\n", row, col,result[i0], row, k, result[i1],k, col, result[i2]);
result[i0] = (result[i0] < result[i1] + result[i2]) ? result[i0] : (result[i1] + result[i2]);
/*
result[i0] = (s_mat[threadIdx.y][threadIdx.x] < s_mat[threadIdx.y][idx] + s_mat[idx][threadIdx.x]) ?
s_mat[threadIdx.y][threadIdx.x] : (s_mat[threadIdx.y][idx] + s_mat[idx][threadIdx.x]);
*/
/*
if(idx % 2 == 1)
printf("idx=%d, mat[%d][%d]=%d\n ",idx, row, col, result[i0]);
*/
}
}
}
__global__ void gpu_phase3(unsigned int *result, int N, int start, int k)
{
int skip_x = blockIdx.x < k ? (blockIdx.x - k): (blockIdx.x - k + 1);
int skip_y = blockIdx.y < k ? (blockIdx.y - k): (blockIdx.y - k + 1);
int row_start = skip_y * TILE_SIZE + start;
int col_start = skip_x * TILE_SIZE + start;
int row = threadIdx.y + row_start;
int col = threadIdx.x + col_start;
//__shared__ int s_mat[TILE_SIZE][TILE_SIZE];
//printf("row: %d, col: %d\n", row, col);
int i0 = row * N + col;
//s_mat[threadIdx.y][threadIdx.x] = result[i0];
//__syncthreads();
for (int idx = start; idx < start + TILE_SIZE; idx++)
{
//printf("result[%d][%d]: %d, result[%d][%d]: %d, result[%d][%d]: %d\n", x, y,result[i0], x, k, result[i1],k, y, result[i2]);
if (row < N && col < N){
int i1 = row * N + idx;
int i2 = idx * N + col;
//printf("result[%d][%d]: %d, result[%d][%d]: %d, result[%d][%d]: %d\n", row, col,result[i0], row, k, result[i1],k, col, result[i2]);
result[i0] = (result[i0] < result[i1] + result[i2]) ? result[i0] : (result[i1] + result[i2]);
/*
result[i0] = (s_mat[threadIdx.y][threadIdx.x] < s_mat[threadIdx.y][idx] + s_mat[idx][threadIdx.x]) ?
s_mat[threadIdx.y][threadIdx.x] : (s_mat[threadIdx.y][idx] + s_mat[idx][threadIdx.x]);
*/
/*
if(idx % 2 == 1)
printf("phase3: idx=%d, mat[%d][%d]=%d\n ",idx, row, col, result[i0]);
*/
}
}
}
void GenMatrix(unsigned int *mat, const size_t N)
{
/*
mat[0] = 0;
mat[1] = 17;
mat[2] = 21;
mat[3] = 12;
mat[4] = 5;
mat[5] = INT_MAX / 2;
mat[6] = 6;
mat[7] = 0;
mat[8] = INT_MAX / 2;
mat[9] = INT_MAX / 2;
mat[10] = INT_MAX / 2;
mat[11] = 3;
mat[12] = 10;
mat[13] = INT_MAX / 2;
mat[14] = 0;
mat[15] = 14;
mat[16] = INT_MAX / 2;
mat[17] = INT_MAX / 2;
mat[18] = INT_MAX / 2;
mat[19] = 11;
mat[20] = INT_MAX / 2;
mat[21] = 0;
mat[22] = INT_MAX / 2;
mat[23] = 4;
mat[24] = INT_MAX / 2;
mat[25] = 4;
mat[26] = 13;
mat[27] = INT_MAX / 2;
mat[28] = 0;
mat[29] = INT_MAX / 2;
mat[30] = 9;
mat[31] = INT_MAX / 2;
mat[32] = INT_MAX / 2;
mat[33] = INT_MAX / 2;
mat[34] = 20;
mat[35] = 0;
*/
for (int i = 0; i < N * N; i++)
{
mat[i] = rand() % 32;
if (mat[i] == 0)
{
mat[i] = INT_MAX / 2;
}
if (i % N == i / N)
{
mat[i] = 0;
}
}
}
bool CmpArray(const unsigned int *l, const unsigned int *r, const size_t eleNum)
{
for (int i = 0; i < eleNum; i++)
if (l[i] != r[i])
{
printf("ERROR: l[%d] = %d, r[%d] = %d\n", i, l[i], i, r[i]);
return false;
}
return true;
}
int main(int argc, char **argv)
{
// generate a random matrix.
size_t N = MATRIX_SIZE;
unsigned int *mat = (unsigned int *)malloc(sizeof(int) * N * N);
GenMatrix(mat, N);
// compute the reference result.
unsigned int *ref = (unsigned int *)malloc(sizeof(int) * N * N);
memcpy(ref, mat, sizeof(int) * N * N);
// ---------------- primary module test -----------------
/*
unsigned int *ref_primary = (unsigned int *)malloc(sizeof(int) * 3 * 3);
ref_primary[0] = mat[0];
ref_primary[1] = mat[1];
ref_primary[2] = mat[N];
ref_primary[3] = mat[N + 1];
Floyd_sequential(ref_primary, 2);
showMatrix(ref_primary, 2);
*/
// ------------------------------------------------------
//printf("mat\n");
//showMatrix(mat, N);
// ------------- sequential ---------------
double time1 = timestamp();
Floyd_sequential(ref, N);
double time2 = timestamp();
// ----------------------------------------
//printf("ref\n");
//showMatrix(ref, N);
//CUDA Portion
unsigned int *result = (unsigned int *)malloc(sizeof(int) * N * N);
memcpy(result, mat, sizeof(int) * N * N);
unsigned int *d_result;
// compute your results
CUDA_SAFE_CALL(cudaMalloc((void **)&d_result, sizeof(int) * N * N));
CUDA_SAFE_CALL(cudaMemcpy(d_result, result, sizeof(int) * N * N, cudaMemcpyHostToDevice));
double time3 = timestamp();
int block_num = N / TILE_SIZE;
printf("block_num: %d\n", block_num);
int k = 0;
double p_cost = 0, s_cost = 0, o_cost = 0;
for (; k < block_num; k += 1)
{
//if(k)break;
// primary modules
dim3 block(TILE_SIZE, TILE_SIZE);
dim3 grid(1,1);
int start = k * TILE_SIZE;
double time_p = timestamp();
gpu_primary<<<grid, block>>>(d_result, N, start);
double time_p_done = timestamp();
p_cost += time_p_done - time_p;
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
printf("Cuda error in file '%s' in line %i : %s.\n",
__FILE__, __LINE__, cudaGetErrorString(err));
}
// phase2 modules
dim3 block2(TILE_SIZE, TILE_SIZE);
dim3 grid2(block_num - 1, 2);
double time_s = timestamp();
gpu_phase2<<<grid2, block2>>>(d_result, N, start, k);
double time_s_done = timestamp();
s_cost += time_s_done - time_s;
cudaError_t err2 = cudaGetLastError();
if (cudaSuccess != err2)
{
printf("Cuda error in file '%s' in line %i : %s.\n",
__FILE__, __LINE__, cudaGetErrorString(err2));
}
//phase3 modules
dim3 block3(TILE_SIZE, TILE_SIZE);
dim3 grid3(block_num - 1, block_num - 1);
double time_o = timestamp();
gpu_phase3<<<grid3, block3>>>(d_result, N, start, k);
double time_o_done = timestamp();
o_cost += time_o_done - time_o;
cudaError_t err3 = cudaGetLastError();
if (cudaSuccess != err3)
{
printf("Cuda error in file '%s' in line %i : %s.\n",
__FILE__, __LINE__, cudaGetErrorString(err3));
}
}
double time4 = timestamp();
CUDA_SAFE_CALL(cudaMemcpy(result, d_result, sizeof(int) * N * N, cudaMemcpyDeviceToHost));
//printf("result\n");
//showMatrix(result, N);
printf("p_cost: %f, s_cost: %f, o_cost: %f\n", p_cost, s_cost, o_cost);
printf("cuda compute time use: %f\n", p_cost + s_cost + o_cost);
printf("sequential time use: %f, cuda time use: %f\nspeedup-rate: %f\n", time2 - time1, time4 - time3, (time2 - time1) / (time4 - time3));
// compare cuda result with reference result
if (CmpArray(result, ref, N * N))
printf("The matrix matches.\n");
else
printf("The matrix do not match.\n");
free(ref);
free(mat);
free(result);
cudaFree(d_result);
}
|
14,003 | // System includes
#include <stdio.h>
#include <assert.h>
#include <iostream>
#include <math.h>
// CUDA runtime
#include <cuda_runtime.h>
#define BILLION 1000000000L
template <unsigned int blockSize>
__device__ void warpReduce(volatile float *sdata, unsigned int tid) {
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
template <unsigned int blockSize>
__global__ void reduce6(float *g_idata, float *g_odata, unsigned int n) {
extern __shared__ float sdata[];
//__shared__ float sdata[1024];
unsigned int tid = threadIdx.x;
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + tid;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid] = 0;
// This loop needs to be generalised to deal with case where n is not a power of 2
while (i < n){sdata[tid] += g_idata[i] + g_idata[i+blockSize]; i += gridSize; }
__syncthreads();
if (blockSize >= 1024) { if (tid < 512) { sdata[tid] += sdata[tid + 512]; } __syncthreads(); }
if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
if (tid < 32)warpReduce<blockSize>(sdata, tid);
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
if (index == 0) { for (int j=1; j < blockDim.x; j++){g_odata[0] +=g_odata[j];}}
}
template <unsigned int blockSize>
__global__ void sumCommSingleBlock(float *a, int arraySize, float *out) {
int idx = threadIdx.x;
float sum = 0.0;
for (int i = idx; i < arraySize; i += blockSize)
sum += a[i];
extern __shared__ float r[];
r[idx] = sum;
__syncthreads();
for (int size = blockSize/2; size>0; size/=2) { //uniform
if (idx<size)
r[idx] += r[idx+size];
__syncthreads();
}
if (idx == 0)
*out = r[0];
}
__device__ double f( double a )
{
return (4.0 / (1.0 + a*a));
}
template <unsigned int blockSize>
__global__ void PiEstSingleBlock(long N, double *piest) {
int idx = threadIdx.x;
double h;
double sum = 0.0;
h = 1.0/(double)N;
// Do the parallel partial sums for pi
for (long i = idx+1; i <= N; i += blockSize)
sum += f(h * ((double)i - 0.5));
__shared__ double p[1024];
// Now add the partial sums together
p[idx] = h*sum;
__syncthreads();
for (int size = blockSize/2; size>0; size/=2) { //uniform
if (idx<size)
p[idx] += p[idx+size];
__syncthreads();
}
if (idx == 0)
*piest = p[0];
}
__global__ void init(float *a, unsigned int n){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride) {
a[i] = (float)i+1.0;
}
}
int main(void) {
struct timespec start , stop ; // variables for timing
double accum ; // elapsed time variable
const unsigned int blockSize=1024;
dim3 numThreads;
dim3 numBlocks;
unsigned int m=32768;
float *a;
float *partsums;
double *mypi;
long N = 1000000;
double sum, h;
h = 1.0/(double)N; // For CPU version of loop
cudaMallocManaged(&a, m * sizeof(float));
numThreads.x = blockSize;
numBlocks.x = (m + numThreads.x - 1) / numThreads.x;
cudaMallocManaged(&partsums, numBlocks.x * sizeof(float));
printf("Numblks x %d Blksize x %d\n",numBlocks.x, numThreads.x);
init<<<numBlocks,numThreads>>>(a,m);
cudaDeviceSynchronize(); // This is critical if going to look at output on CPU!
for (int i = 0; i < 4; i++) {
printf(" %10.1f", a[i]);
}
printf("\n");
clock_gettime ( CLOCK_REALTIME ,&start );
reduce6<blockSize><<<numBlocks,numThreads,blockSize*sizeof(float)>>>(a,partsums,m);
cudaDeviceSynchronize(); // This is critical if going to look at output on CPU!
clock_gettime ( CLOCK_REALTIME ,&stop );
for (int i = 0; i < 4; i++) {
printf(" %10.1f", partsums[i]);
}
// printf(" %10.1f", partsums[0]);
printf("\n");
accum =( stop.tv_sec - start.tv_sec )+
( stop.tv_nsec - start.tv_nsec )/(double)BILLION ;
printf (" Multiblock reduce : %lf sec %lf MBytes/s.\n",accum, 1.e-6*m*sizeof(float)/accum);
clock_gettime ( CLOCK_REALTIME ,&start );
sumCommSingleBlock<blockSize><<<1,numThreads>>>(a,m,partsums);
clock_gettime ( CLOCK_REALTIME ,&stop );
accum =( stop.tv_sec - start.tv_sec )+
( stop.tv_nsec - start.tv_nsec )/(double)BILLION ;
printf("1 block sum %10.1f", partsums[0]);
printf("\n");
printf (" Single block reduce : %lf sec %lf MBytes/s.\n",accum , 1.e-6*m*sizeof(float)/accum);
printf("True answer %10.1f \n",0.5*(float)m*((float)m+1.0));
cudaMallocManaged(&mypi, sizeof(double));
clock_gettime ( CLOCK_REALTIME ,&start );
PiEstSingleBlock<blockSize><<<1,numThreads>>>(N, mypi);
cudaDeviceSynchronize();
clock_gettime ( CLOCK_REALTIME ,&stop );
accum =( stop.tv_sec - start.tv_sec )+
( stop.tv_nsec - start.tv_nsec )/(double)BILLION ;
printf("Pi estimate %.16f", mypi[0]);
printf("\n");
printf("Time to compute mypi is %.16f sec.\n",accum);
sum = 0.0;
for (long i=1; i <= N; i++){
sum += 4.0/(1.0+h * ((double)i - 0.5)*h * ((double)i - 0.5));
}
printf("CPU pi is %.16f \n",h*sum);
}
|
14,004 | #include "includes.h"
__global__ void sobelEdgeDetectionSharedMemUnroll(int *input, int *output, int width, int height, int thresh) {
__shared__ int shMem[4 * _TILESIZE_2 * _TILESIZE_2 ];
int num = _UNROLL_;
int size = num * _TILESIZE_2;
int i = blockIdx.x * num * _TILESIZE_ + threadIdx.x * num;
int j = blockIdx.y * num * _TILESIZE_ + threadIdx.y * num;
int xind = num * threadIdx.x;
int yind = num * threadIdx.y;
for(int x = 0; x < num; x++)
{
for(int y = 0; y < num; y++)
{
shMem[ size * (yind + y) + (xind + x)] = input[(j + y) * width + (i + x)];
}
}
__syncthreads();
if ( xind > 0 && yind > 0 && xind < (size - 2) && yind < (size - 2))
{
for(int x = 0; x < num; x++)
{
for(int y = 0; y < num; y++)
{
int sum1 = shMem[(xind + 1 + x) + size * (yind - 1 + y)] - shMem[(xind - 1 + x) + size * (yind - 1 + y)]
+ 2 * shMem[(xind + 1 + x) + size * (yind + y)] - 2 * shMem[(xind - 1 + x) + size * (yind + y)]
+ shMem[(xind + 1 + x) + size * (yind + 1 + y)] - shMem[(xind - 1 + x) + size * (yind + 1 + y)];
int sum2 = shMem[(xind - 1 + x) + size * (yind - 1 + y)] + 2 * shMem[(xind + x) + size * (yind - 1 + y)] + shMem[(xind + 1 + x) + size * (yind - 1 + y)]
- shMem[(xind - 1 + x) + size * (yind + 1 + y)] - 2 * shMem[(xind + x) + size * (yind + 1 + y)] - shMem[(xind + 1 + x) + size * (yind + 1 + y)];
int magnitude = sum1 * sum1 + sum2 * sum2;
int index = (j + y) * width + (i + x);
if(magnitude > thresh)
output[index] = 255;
else
output[index] = 0;
}
}
}
} |
14,005 | #include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
void handleError(cudaError_t error, int lineno) {
if (error != cudaSuccess) {
printf("Error: %s:%d\n", __FILE__, lineno);
printf("Code: %d, Reason: %s\n", error, cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
}
void initializeInt(int *iptr, int size) {
for(int i = 0; i < size; i++) {
iptr[i] = i;
}
}
void printMatrix(int *iptr, const int nx, const int ny) {
int *C = iptr;
for(int i = 0; i < nx; i++) {
for(int j = 0; j < ny; j++) {
printf("%3d\n", C[j]);
}
C += ny;
printf("\n");
}
}
__global__ void printThreadIndex(int *A, const int nx, const int ny) {
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * nx + ix;
printf("thread_id (%d, %d) block_id (%d, %d) coordinate (%d, %d) global index %2d ival %2d\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, ix, iy, idx, A[idx]);
}
int main(int argc, char *argv[]) {
int dev = 0;
cudaDeviceProp deviceProp;
handleError(cudaGetDeviceProperties(&deviceProp, dev), __LINE__);
printf("Using device %d:%s\n", dev, deviceProp.name);
handleError(cudaSetDevice(dev), __LINE__);
int nx = 8;
int ny = 6;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
int *h_A;
h_A = (int *)malloc(nBytes);
initializeInt(h_A, nBytes);
printMatrix(h_A, nx, ny);
int *d_A;
handleError(cudaMalloc((void **)&d_A, nBytes), __LINE__);
handleError(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice), __LINE__);
dim3 block(4, 2);
dim3 grid((nx + block.x - 1)/ block.x, (ny + block.y - 1)/ block.y);
printThreadIndex<<<grid, block>>>(d_A, nx, ny);
handleError(cudaDeviceSynchronize(), __LINE__);
cudaFree(d_A);
free(h_A);
cudaDeviceReset();
return 0;
}
|
14,006 | #include <iostream>
using namespace std;
int main() {
cout << cudaGetErrorString(cudaErrorMemoryAllocation) << endl;
cout << cudaGetErrorString(cudaErrorInvalidValue) << endl;
cout << cudaGetErrorString(cudaSuccess) << endl;
return 0;
}
|
14,007 | //
// Created by root on 2020/11/23.
//
#include "stdio.h"
#include "cuda_runtime.h"
#define N 256
int n_streams = 4;
__global__ void kernel_1() {
double sum = 0.0;
for (int i = 0; i < N; i++) {
sum = sum + tan(0.1) * tan(0.2);
}
// printf("sum=%.4f in kernel_1\n", sum);
}
__global__ void kernel_2() {
double sum = 0.0;
for (int i = 0; i < N; i++) {
sum = sum + tan(0.1) * tan(0.2);
}
// printf("sum=%.4f in kernel_2\n", sum);
}
__global__ void kernel_3() {
double sum = 0.0;
for (int i = 0; i < N; i++) {
sum = sum + tan(0.1) * tan(0.2);
}
// printf("sum=%.4f in kernel_3\n", sum);
}
__global__ void kernel_4() {
double sum = 0.0;
for (int i = 0; i < N; i++) {
sum = sum + tan(0.1) * tan(0.2);
}
// printf("sum=%.4f in kernel_4\n", sum);
}
int main() {
setenv("CUDA_DEVICE_MAX_CONNECTIONS", "32", 1); // set max_connections to 1
printf("CUDA_DEVICE_MAX_CONNECTIONS: %s\n", getenv("CUDA_DEVICE_MAX_CONNECTIONS"));
cudaStream_t *streams = (cudaStream_t *) malloc(n_streams * sizeof(cudaStream_t));
for (int i = 0; i < n_streams; i++) {
cudaStreamCreate(&streams[i]);
}
float elapsed_time = 0.0f;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 block(1);
dim3 grid(1);
// cudaEventRecord(start);
// // Deep-first schedule 0.6253ms
// for (int i = 0; i < n_streams; i++) {
// kernel_1<<<grid, block, 0, streams[i]>>>();
// kernel_2<<<grid, block, 0, streams[i]>>>();
// kernel_3<<<grid, block, 0, streams[i]>>>();
//// kernel_3<<<grid, block, 0>>>();
// kernel_4<<<grid, block, 0, streams[i]>>>();
// }
// cudaEventRecord(stop);
//
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&elapsed_time, start, stop);
// printf("time elapsed between start and stop: %.4f\n", elapsed_time);
cudaEventRecord(start);
// Breadth-first schedule 0.2397ms
for (int i = 0; i < n_streams; i++) {
kernel_1<<<grid, block, 0, streams[i]>>>();
}
for (int i = 0; i < n_streams; i++) {
kernel_2<<<grid, block, 0, streams[i]>>>();
}
for (int i = 0; i < n_streams; i++) {
kernel_3<<<grid, block, 0, streams[i]>>>();
}
for (int i = 0; i < n_streams; i++) {
kernel_4<<<grid, block, 0, streams[i]>>>();
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
elapsed_time = 0.0f;
cudaEventElapsedTime(&elapsed_time, start, stop);
printf("time elapsed between start and stop: %.4f", elapsed_time);
}
|
14,008 | #include <assert.h> // assert
#include <float.h> // DECIMAL_DIG
#include <stdio.h> // fprintf, printf, stderr
#include <stddef.h> // size_t, NULL
#include <stdlib.h> // free, malloc, EXIT_SUCCESS
#include <math.h> // cos, fabs, isfinite, sin
#include "cuda_runtime.h" // __global__, __restrict__, cuda*
#ifndef DECIMAL_DIG
#define DECIMAL_DIG (21)
#endif // DECIMAL_DIG
typedef double real_type;
__global__ void
vectorAddition (
const real_type * __restrict__ a, const real_type * __restrict__ b, real_type * __restrict__ c, size_t count
)
{
const unsigned thread_id = blockDim.x * blockIdx.x + threadIdx.x;
if (thread_id < count)
{
c [thread_id] = a [thread_id] + b [thread_id];
}
}
int
isClose (real_type x, real_type y, real_type rel_tol, real_type abs_tol)
{
assert (rel_tol >= 0.0);
assert (abs_tol >= 0.0);
if (isfinite (x) && isfinite (y))
{
if (x == y)
{
return 1;
}
const real_type abs_diff (fabs (x - y));
return (abs_diff <= abs_tol)
|| (abs_diff <= (rel_tol * fabs (x)))
|| (abs_diff <= (rel_tol * fabs (y)));
}
return x == y;
}
int
main (int argc, char * argv [])
{
const size_t count = 65536;
const size_t bytes = count * sizeof (real_type);
real_type * __restrict__ const host_a = (real_type *) malloc (bytes);
if (host_a == NULL)
{
fprintf (stderr, "Error: couldn't allocate memory for host vector `a' (%zu bytes).\n", bytes);
return EXIT_FAILURE;
}
real_type * __restrict__ const host_b = (real_type *) malloc (bytes);
if (host_b == NULL)
{
fprintf (stderr, "Error: couldn't allocate memory for host vector `b' (%zu bytes).\n", bytes);
return EXIT_FAILURE;
}
real_type * __restrict__ const host_c = (real_type *) malloc (bytes);
if (host_c == NULL)
{
fprintf (stderr, "Error: couldn't allocate memory for host vector `c' (%zu bytes).\n", bytes);
return EXIT_FAILURE;
}
real_type * __restrict__ const device_a = NULL;
cudaError_t allocated = cudaMalloc ((void **) & device_a, bytes);
if (allocated != cudaSuccess)
{
fprintf (stderr, "Error: couldn't allocate memory for device vector `a' (%zu bytes): %s.\n", bytes, cudaGetErrorString (allocated));
return EXIT_FAILURE;
}
real_type * __restrict__ const device_b = NULL;
allocated = cudaMalloc ((void **) & device_b, bytes);
if (allocated != cudaSuccess)
{
fprintf (stderr, "Error: couldn't allocate memory for device vector `b' (%zu bytes): %s.\n", bytes, cudaGetErrorString (allocated));
return EXIT_FAILURE;
}
real_type * __restrict__ const device_c = NULL;
allocated = cudaMalloc ((void **) & device_c, bytes);
if (allocated != cudaSuccess)
{
fprintf (stderr, "Error: couldn't allocate memory for device vector `c' (%zu bytes): %s.\n", bytes, cudaGetErrorString (allocated));
return EXIT_FAILURE;
}
for (size_t i = 0; i < count; ++ i)
{
/* host_a [i] = i;
host_b [i] = count - i - 1;*/
host_a [i] = sin (i) * sin (i);
host_b [i] = cos (i) * cos (i);
}
cudaError_t copied = cudaMemcpy (device_a, host_a, bytes, cudaMemcpyHostToDevice);
if (copied != cudaSuccess)
{
fprintf (stderr, "Error: couldn't copy host vector `a' to device: %s.\n", cudaGetErrorString (copied));
return EXIT_FAILURE;
}
copied = cudaMemcpy (device_b, host_b, bytes, cudaMemcpyHostToDevice);
if (copied != cudaSuccess)
{
fprintf (stderr, "Error: couldn't copy host vector `b' to device: %s.\n", cudaGetErrorString (copied));
return EXIT_FAILURE;
}
const unsigned threads_per_block = 256;
const unsigned blocks_per_grid = (count + threads_per_block - 1) / threads_per_block;
cudaGetLastError ();
vectorAddition <<<blocks_per_grid, threads_per_block>>> (device_a, device_b, device_c, count);
cudaDeviceSynchronize ();
const cudaError_t added = cudaGetLastError ();
if (added != cudaSuccess)
{
fprintf (stderr, "Error: couldn't launch kernel: %s.\n", cudaGetErrorString (added));
return EXIT_FAILURE;
}
copied = cudaMemcpy (host_c, device_c, bytes, cudaMemcpyDeviceToHost);
if (copied != cudaSuccess)
{
fprintf (stderr, "Error: couldn't copy vector `c' to host: %s.\n", cudaGetErrorString (copied));
return EXIT_FAILURE;
}
for (size_t i = 0; i < count; ++ i)
{
const real_type expected = host_a [i] + host_b [i];
const real_type actual = host_c [i];
if (! isClose (expected, actual, 1e-8, 1e-16))
{
fprintf (
stderr, "Test failed at element %zu: expected=%.*f;actual=%.*f;.\n",
i, DECIMAL_DIG, expected, DECIMAL_DIG, actual
);
return EXIT_FAILURE;
}
}
cudaError_t freed = cudaFree (device_a);
if (freed != cudaSuccess)
{
fprintf (stderr, "Error: couldn't free device vector `a': %s.\n", cudaGetErrorString (freed));
return EXIT_FAILURE;
}
freed = cudaFree (device_b);
if (freed != cudaSuccess)
{
fprintf (stderr, "Error: couldn't free device vector `b': %s.\n", cudaGetErrorString (freed));
return EXIT_FAILURE;
}
freed = cudaFree (device_c);
if (freed != cudaSuccess)
{
fprintf (stderr, "Error: couldn't free device vector `c': %s.\n", cudaGetErrorString (freed));
return EXIT_FAILURE;
}
free (host_a);
free (host_b);
free (host_c);
printf ("Done.\n");
return EXIT_SUCCESS;
}
|
14,009 | #include "includes.h"
__global__ void ppcg_calc_ur( const int x_inner, const int y_inner, const int halo_depth, const double* kx, const double* ky, const double* sd, double* u, double* r)
{
const int gid = threadIdx.x+blockIdx.x*blockDim.x;
if(gid >= x_inner*y_inner) return;
const int x = x_inner + 2*halo_depth;
const int col = gid % x_inner;
const int row = gid / x_inner;
const int off0 = halo_depth*(x + 1);
const int index = off0 + col + row*x;
const double smvp = (1.0
+ (kx[index+1]+kx[index])
+ (ky[index+x]+ky[index]))*sd[index]
- (kx[index+1]*sd[index+1]+kx[index]*sd[index-1])
- (ky[index+x]*sd[index+x]+ky[index]*sd[index-x]);
r[index] -= smvp;
u[index] += sd[index];
} |
14,010 | #include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <time.h>
__global__ void add(int N, double *a,double *b)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if(tid < N)
{
b[tid] = a[tid]*a[tid];
}
}
int main(int argc, char *argv[])
{
int N; //Problem Size
int T = 10, B = 1; // threads per block/blocks per grid
double *a,*b;
double *dev_a, *dev_b;
for(N=10000000;N<=100000000;N=N+10000000)
{
printf("N = %d\n",N);
a = (double*)malloc(sizeof(double)*N);
b = (double*)malloc(sizeof(double)*N);
cudaMalloc((void**)&dev_a,N * sizeof(double));
cudaMalloc((void**)&dev_b,N * sizeof(double));
for(int i=0;i<N;i++)
{
// load arrays with some numbers
a[i] = i;
}
cudaMemcpy(dev_a, a , N*sizeof(double),cudaMemcpyHostToDevice);
T = 1024;
B = ceil(double(N)/T);
clock_t start_time = clock();
add<<<B,T>>>(N,dev_a,dev_b);
cudaDeviceSynchronize();
clock_t end_time = clock();
double parallel_time = (double(end_time-start_time)/CLOCKS_PER_SEC);
cudaMemcpy(b,dev_b,N*sizeof(double),cudaMemcpyDeviceToHost);
start_time = clock();
int i;
for(i=0;i<N;i++)
{
b[i] = a[i] * a[i];
}
end_time = clock();
double serial_time = (double(end_time-start_time)/CLOCKS_PER_SEC);
double speedup = serial_time/parallel_time;
printf("N=%d, parallel_time = %lf, serial_time = %lf, speedup = %lf\n",N,parallel_time,serial_time,speedup);
//printf("%lf %lf\n",a[5],b[5]);
free(a);
free(b);
cudaFree(dev_a); // clean up
cudaFree(dev_b);
}
return 0;
} |
14,011 | #include "includes.h"
__global__ void cudaclaw5_update_q_cuda(int mbc, double dtdx, double dtdy, double* qold, double* fm, double* fp, double* gm, double* gp)
{
int mq = threadIdx.z;
int x = threadIdx.x;
int x_stride = blockDim.z;
int y = threadIdx.y;
int y_stride = (blockDim.x + 2*mbc)*x_stride;
int i = mq + (x+mbc)*x_stride + (y+mbc)*y_stride;
qold[i] = qold[i] - dtdx * (fm[i+x_stride] - fp[i])
- dtdy * (gm[i+y_stride] - gp[i]);
} |
14,012 | #define BLK_CHN 64
#define WLEN 1
#define WLEN_N 2
__global__ void NormalizeL(const float *input, float *output) {
const int H = 256;
const int W = 256;
const int h = blockIdx.x * blockDim.x + threadIdx.x;
const int w = (blockIdx.y * blockDim.y + threadIdx.y)*WLEN_N;
if (h >= H || w >= W) return;
const int hW_w = h * W + w;
for(int j=0;j<WLEN_N;j++) {
output[hW_w+j] = input[hW_w+j]/100-0.5;
}
}
__global__ void Conv2d(const float *input, const float *weight, const float *bias, float *output,
const int stride, const int pad, const int dilation, const int has_bias_int,
const int C, const int H,
const int K, const int R,
const int OH,
const int with_relu_int) {
const int W = H;
const int S = R;
const int OW = OH;
const int k = blockIdx.x * blockDim.x + threadIdx.x;
const int oh = blockIdx.y * blockDim.y + threadIdx.y;
const int ow = (blockIdx.z * blockDim.z+ threadIdx.z)*WLEN;
if (k >= K || oh >= OH || ow >= OW) return;
const int RS = R * S;
const int kCRS = k * C * RS;
const float b_val = (has_bias_int==1) ? bias[k] : 0;
//float o[WLEN] = {b_val, b_val};
float o[WLEN] = {b_val};
for (int c = 0; c < C; ++c) {
const int kCRS_cRS = kCRS + c * RS;
const int cHW = c * H * W;
for (int r = 0; r < R; ++r) {
const int kCRS_cRS_rS = kCRS_cRS + r * S;
for (int s = 0; s < S; ++s) {
const int h = oh * stride - pad + r * dilation;
int w[WLEN];
w[0] = ow * stride - pad + s * dilation;
for(int j=1;j<WLEN;j++) {
w[j] = w[j-1] + stride;
}
const float wt = weight[kCRS_cRS_rS + s];
const int cHW_hW = cHW + h * W;
for (int j=0;j<WLEN;j++) {
if (h < 0 || h >= H || w[j] < 0 || w[j] >= W) {}
else {
o[j] += input[cHW_hW + w[j]] * wt;
}
}
}
}
}
const int kOHOW_ohOW_ow = k * OH * OW + oh * OW + ow;
for (int j=0;j<WLEN;j++) {
output[kOHOW_ohOW_ow+j] = (with_relu_int==1) ? fmaxf(o[j],0) : o[j];
}
}
__global__ void Conv2d64(const float *input, const float *weight, const float *bias, float *output,
const int stride, const int pad, const int dilation,
const int C, const int H,
const int K,
const int OH) {
const int W = H;
const int R = 3;
const int S = 3;
const int OW = OH;
const int k = blockIdx.x * blockDim.x + threadIdx.x;
const int oh = blockIdx.y * blockDim.y + threadIdx.y;
const int ow = (blockIdx.z * blockDim.z+ threadIdx.z)*WLEN;
if (k >= K || oh >= OH || ow >= OW) return;
const int blen_1 = blockDim.y;
const int blen_2 = blockDim.z;
const int index_flattened = threadIdx.y*blen_2 + threadIdx.z;
const int DIV = C/BLK_CHN;
const int ch_div = BLK_CHN/blen_1/blen_2;
const int RS = R * S;
const int kCRS = k * C * RS;
//float o[WLEN] = {bias[k], bias[k]};
float o[WLEN] = {bias[k]};
__shared__ float weight_local[BLK_CHN*3*3];
for (int i=0;i<DIV;i++) {
__syncthreads();
for (int c = ch_div*index_flattened; c < ch_div*(index_flattened+1); ++c) {
const int kCRS_cRS = kCRS + (c + i*BLK_CHN) * RS;
const int _cRS = c * RS;
for (int r = 0; r < R; ++r) {
const int rS = r * S;
const int _cRS_rS = _cRS + rS;
for (int s = 0; s < S; ++s) {
weight_local[_cRS_rS + s] = weight[kCRS_cRS + rS + s];
}
}
}
__syncthreads();
for (int c = i*BLK_CHN; c < (i+1)*BLK_CHN; ++c) {
const int cHW = c * H * W;
const int _cRS = (c-i*BLK_CHN) * RS;
for (int r = 0; r < R; ++r) {
const int _cRS_rS = _cRS + r * S;
for (int s = 0; s < S; ++s) {
const int h = oh * stride - pad + r * dilation;
const int cHW_hW = cHW + h * W;
const float wt = weight_local[_cRS_rS + s];
int w[WLEN];
w[0] = ow * stride - pad + s * dilation;
for(int j=1;j<WLEN;j++) {
w[j] = w[j-1] + stride;
}
for (int j=0;j<WLEN;j++) {
if (h >= 0 && h < H && w[j] >= 0 && w[j] < W) {
o[j] += input[cHW_hW + w[j]] * wt;
}
}
}
}
}
}
const int ind = k * OH * OW + oh * OW + ow;
for (int j=0;j<WLEN;j++) {
output[ind+j] = fmaxf(o[j],0);
}
}
__global__ void BatchNorm2d(const float *input, const float *weight, const float *bias, const float *running_mean, const float *running_var, float *output,
const int C, const int H) {
const float eps = 1e-5;
const int W = H;
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const int h = blockIdx.y * blockDim.y + threadIdx.y;
const int w = blockIdx.z * blockDim.z+ threadIdx.z;
if (c >= C || h >= H || w >= W) return;
const int cHW_hW_w = c * H * W + h * W + w;
output[cHW_hW_w] = (input[cHW_hW_w] - running_mean[c]) / sqrt(running_var[c] + eps) * weight[c] + bias[c];
}
__global__ void ConvTranspose2dReLU(const float *input, const float *weight, const float *bias, float *output) {
const int stride = 2;
const int pad = 1;
const int C = 512;
const int H = 32;
const int W = 32;
const int K = 256;
const int R = 4;
const int S = 4;
const int OH = 64;
const int OW = 64;
const int k = blockIdx.x * blockDim.x + threadIdx.x;
const int oh = blockIdx.y * blockDim.y + threadIdx.y;
const int ow = blockIdx.z * blockDim.z+ threadIdx.z;
if (k >= K || oh >= OH || ow >= OW) return;
const int blen_2 = blockDim.z;
const int index_flattened = threadIdx.z;
const int DIV = C/BLK_CHN;
const int ch_div = BLK_CHN/blen_2;
const int RS = R * S;
const int HW = H * W;
const int KRS = K * RS;
const int kRS = k * RS;
float o = bias[k];
__shared__ float weight_local[BLK_CHN*4*4];
for (int i=0;i<DIV;i++) {
__syncthreads();
for (int c = ch_div*index_flattened; c < ch_div*(index_flattened+1); ++c) {
const int kRS_cKRS = kRS + (c + i*BLK_CHN) * KRS;
const int _cRS = c * RS;
for (int r = 0; r < R; ++r) {
const int rS = r * S;
const int _cRS_rS = _cRS + rS;
for (int s = 0; s < S; ++s) {
weight_local[_cRS_rS + s] = weight[kRS_cKRS + rS + s];
}
}
}
__syncthreads();
for (int c = i*BLK_CHN; c < (i+1)*BLK_CHN; ++c) {
const int _cRS = (c-i*BLK_CHN) * RS;
const int cHW = c * HW;
for (int r = 0; r < R; ++r) {
const int _cRS_rS = _cRS + r * S;
for (int s = 0; s < S; ++s) {
const int oh_pad__r = oh + pad - r;
const int ow_pad__s = ow + pad - s;
const int h = oh_pad__r / stride;
const int w = ow_pad__s / stride;
if ((oh_pad__r % stride != 0) || (ow_pad__s % stride != 0) ||
(h < 0 || h >= H || w < 0 || w >= W)) continue;
o += input[cHW + h * W + w] * weight_local[_cRS_rS + s];
}
}
}
}
output[k * OH * OW + oh * OW + ow] = fmaxf(o,0);
}
__global__ void Softmax(const float *input, float *output) {
const int C = 313;
const int H = 64;
const int W = 64;
const int h = blockIdx.x * blockDim.x + threadIdx.x;
const int w = blockIdx.y * blockDim.y + threadIdx.y;
if (h >= H || w >= W) return;
const int HW = H * W;
const int hW_w = h * W + w;
float exp_input_reg[313];
float sum = 0;
for(int c=0;c<C;++c) {
exp_input_reg[c] = exp(input[c * HW + hW_w]);
sum += exp_input_reg[c];
}
for(int c=0;c<C;++c) {
output[c * HW + hW_w] = exp_input_reg[c] / sum;
}
}
__global__ void UpsampleUnnormalize(const float *input, float *output) {
const float scale_factor = 4;
const int C = 2;
const int H = 64;
const int W = 64;
const int OH = 256;
const int OW = 256;
const int c = blockIdx.x * blockDim.x + threadIdx.x;
const int oh = blockIdx.y * blockDim.y + threadIdx.y;
const int ow = blockIdx.z * blockDim.z+ threadIdx.z;
if (c >= C || oh >= OH || ow >= OW) return;
const float miv = 0.5 / scale_factor - 0.5;
const float h = oh / scale_factor + miv;
const float w = ow / scale_factor + miv;
int h0 = floor(h), w0 = floor(w);
int h1 = h0 + 1, w1 = w0 + 1;
const float h_offset = h - h0, w_offset = w - w0;
const float om_ho = 1 - h_offset;
const float om_wo = 1 - w_offset;
const float w00 = om_ho * om_wo;
const float w01 = om_ho * w_offset;
const float w10 = h_offset * om_wo;
const float w11 = h_offset * w_offset;
const int hm1 = H - 1;
const int wm1 = W - 1;
h0 = h0 < 0 ? 0 : (h0 > hm1 ? hm1 : h0);
h1 = h1 < 0 ? 0 : (h1 > hm1 ? hm1 : h1);
w0 = w0 < 0 ? 0 : (w0 > wm1 ? wm1 : w0);
w1 = w1 < 0 ? 0 : (w1 > wm1 ? wm1 : w1);
const int cHW = c * H * W;
const int h0W = h0 * W;
const int h1W = h1 * W;
const int cHW_h0W = cHW + h0W;
const int cHW_h1W = cHW + h1W;
float mid = w00 * input[cHW_h0W + w0]
+ w01 * input[cHW_h0W + w1]
+ w10 * input[cHW_h1W + w0]
+ w11 * input[cHW_h1W + w1];
output[c * OH * OW + oh * OW + ow] = mid*110;
}
|
14,013 | /*
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
void SuperCategoryFMLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(), top[depth_-1]->mutable_gpu_data());
for(int i = 0; i < depth_-1; ++i)
caffe_gpu_set(top[i]->count(), (Dtype)0., top[i]->mutable_gpu_data());
for(int m = 0; m < M_; ++m) {
for( int i = depth_-2; i >= 0; --i ) {
Blob<Dtype> * tops = top[i];
Blob<Dtype> * bottoms = top[i+1];
int base_idx = base_index_per_level_[i];
for(int j = 0; j < node_num_per_level_[i]; ++j) {
Tree * node = serialized_tree_[base_idx + j];
const std::vector<shared_ptr<Tree> >* children = node->GetChildren();
Dtype * top_data = &tops->mutable_gpu_data()[tops->offset(m,node->GetLabel())];
for(std::vector<shared_ptr<Tree> >::const_iterator it = children->begin(); it != children->end(); ++it) {
int offset = bottoms->offset(m,(*it)->GetLabel());
const Dtype * bottom_data = &bottoms->gpu_data()[offset];
caffe_gpu_axpy(H_*W_,(Dtype)(1.),bottom_data,top_data);
}
caffe_gpu_scal(H_*W_,(Dtype)(1./children->size()),top_data);
}
}
}
}
template <typename Dtype>
void SuperCategoryFMLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if( propagate_down[0] == false )
return;
for(int m = 0; m < M_; ++m) {
for( int i = 0; i < depth_-1; ++i ) {
Blob<Dtype> * tops = top[i];
Blob<Dtype> * bottoms = top[i+1];
int base_idx = base_index_per_level_[i];
for(int j = 0; j < node_num_per_level_[i]; ++j) {
Tree * node = serialized_tree_[base_idx + j];
const std::vector<shared_ptr<Tree> >* children = node->GetChildren();
const Dtype * top_diff = &tops->gpu_diff()[tops->offset(m,node->GetLabel())];
for(std::vector<shared_ptr<Tree> >::const_iterator it = children->begin(); it != children->end(); ++it) {
int offset = bottoms->offset(m,(*it)->GetLabel());
Dtype * bottom_diff = &bottoms->mutable_gpu_diff()[offset];
caffe_gpu_axpy(H_*W_,(Dtype)(1./children->size()),top_diff,bottom_diff);
}
}
}
}
caffe_copy(bottom[0]->count(), top[depth_-1]->gpu_diff(), bottom[0]->mutable_gpu_diff());
}
INSTANTIATE_LAYER_GPU_FUNCS(SuperCategoryFMLayer);
} // namespace caffe
*/
|
14,014 | #include <stdio.h>
#include<stdlib.h>
#define nths 1024
#define n 8192 //size of the matrix
#define dim 32
typedef void(*kernelFunc)(double*,double*,int);
__global__ void mat_transp(double* m_in, double *m_out, int siz)
{
int j=blockIdx.x;
int i=threadIdx.x;
while(i<n)
{
m_out[j+i*n]=m_in[i+j*n];
i+=blockDim.x;
}
}
__global__ void mat_fast_transp(double *m_in, double *m_out, int siz)
{
__shared__ double tile[dim][dim];
int col=blockIdx.x*blockDim.x +threadIdx.x;
int row=blockIdx.y*blockDim.y +threadIdx.y;
tile[threadIdx.x][threadIdx.y]=m_in[row*siz+col];
//__syncthreads(); non é necessario
m_out[col*siz+row]=tile[threadIdx.x][threadIdx.y];
}
int correctness(double *m_in,double *m_out)
{
for (int i=0;i<n; ++i)
for(int j=0;j<n; ++j)
if(m_out[i*n+j]!=m_in[j*n+i])
return 0;
return 1;
}
int TestCuda(kernelFunc kernel,const char *kernelName, int block_x, int block_y)
{
int size= n*n*sizeof(double);
double *mat_in_h, *mat_out_h, *mat_in_d, *mat_out_d;
dim3 block(block_x, block_y);
dim3 grid(n/block.x, n/block.y);
mat_in_h=(double*)malloc(size);
mat_out_h=(double*)malloc(size);
cudaMalloc((void **)&mat_in_d,size);
cudaMalloc((void **)&mat_out_d,size);
//inizialize the matrix
for(int i=0;i<(n*n);i++) mat_in_h[i]= i;
//move data from CPU to GPU
cudaMemcpy( mat_in_d, mat_in_h, size, cudaMemcpyHostToDevice);
//timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
//run a kernel
kernel<<<grid,block>>>( mat_in_d, mat_out_d, dim);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
//move data from GPU to CPU
cudaMemcpy( mat_out_h, mat_out_d, size, cudaMemcpyDeviceToHost);
//verify the correctness
printf("%s: %s\n",kernelName,correctness(mat_in_h,mat_out_h)? "Fail":"Correct");
float milliseconds=0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Time in milliseconds: %f\n", milliseconds);
printf("Bandwidth: %f GB/s\n", 2*size/milliseconds/1e6);
printf("-------------------\n");
//Cleanup
free(mat_in_h);
free(mat_out_h);
cudaFree(mat_in_d);
cudaFree(mat_out_h);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
int main(int argc, char*argv[])
{
TestCuda(&mat_transp,"Naive Transpose threads per block 64", 8,8);
TestCuda(&mat_transp,"Naive Transpose threads per block 512", 16,32);
TestCuda(&mat_transp, "Naive Transpose threads per block 512",32,16);
TestCuda(&mat_transp, "Naive Transpose threads per block 1024", 32,32);
TestCuda(&mat_fast_transp, "Optimized Transpose threads per block 64", 8,8);
TestCuda(&mat_fast_transp,"Optimized Transpose threads per blocks 512", 16,32);
TestCuda(&mat_fast_transp,"Optimized Transpose threads per block 512",32,16);
TestCuda(&mat_fast_transp,"Optimized Transpose threads per blocks 1024",32,32);
return 0;
}
|
14,015 | #include <stdio.h>
#include <stdlib.h>
/*
Function: pillar pooling
Args:
b : batch size
d : depth of the feature map
h : height of pooled feature map
w : width of pooled feature map
n : number of input points
c : number of channels
n_intervals : number of unique points
x : input features, FloatTensor[n, c]
geom_feats : input coordinates, IntTensor[n, 4]
interval_lengths : starting position for pooled point, IntTensor[n_intervals]
interval_starts : how many points in each pooled point, IntTensor[n_intervals]
out : output features, FloatTensor[b, d, h, w, c]
*/
__global__ void bev_pool_kernel(int b, int d, int h, int w, int n, int c, int n_intervals,
const float *__restrict__ x,
const int *__restrict__ geom_feats,
const int *__restrict__ interval_starts,
const int *__restrict__ interval_lengths,
float* __restrict__ out) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int index = idx / c;
int cur_c = idx % c;
if (index >= n_intervals) return;
int interval_start = interval_starts[index];
int interval_length = interval_lengths[index];
const int* cur_geom_feats = geom_feats + interval_start * 4;
const float* cur_x = x + interval_start * c + cur_c;
float* cur_out = out + cur_geom_feats[3] * d * h * w * c +
cur_geom_feats[2] * h * w * c + cur_geom_feats[0] * w * c +
cur_geom_feats[1] * c + cur_c;
float psum = 0;
for(int i = 0; i < interval_length; i++){
psum += cur_x[i * c];
}
*cur_out = psum;
}
/*
Function: pillar pooling backward
Args:
b : batch size
d : depth of the feature map
h : height of pooled feature map
w : width of pooled feature map
n : number of input points
c : number of channels
n_intervals : number of unique points
out_grad : gradient of the BEV fmap from top, FloatTensor[b, d, h, w, c]
geom_feats : input coordinates, IntTensor[n, 4]
interval_lengths : starting position for pooled point, IntTensor[n_intervals]
interval_starts : how many points in each pooled point, IntTensor[n_intervals]
x_grad : gradient of the image fmap, FloatTensor
*/
__global__ void bev_pool_grad_kernel(int b, int d, int h, int w, int n, int c, int n_intervals,
const float *__restrict__ out_grad,
const int *__restrict__ geom_feats,
const int *__restrict__ interval_starts,
const int *__restrict__ interval_lengths,
float* __restrict__ x_grad) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int index = idx / c;
int cur_c = idx % c;
if (index >= n_intervals) return;
int interval_start = interval_starts[index];
int interval_length = interval_lengths[index];
const int* cur_geom_feats = geom_feats + interval_start * 4;
float* cur_x_grad = x_grad + interval_start * c + cur_c;
const float* cur_out_grad = out_grad + cur_geom_feats[3] * d * h * w * c +
cur_geom_feats[2] * h * w * c + cur_geom_feats[0] * w * c +
cur_geom_feats[1] * c + cur_c;
for(int i = 0; i < interval_length; i++){
cur_x_grad[i * c] = *cur_out_grad;
}
}
void bev_pool(int b, int d, int h, int w, int n, int c, int n_intervals, const float* x,
const int* geom_feats, const int* interval_starts, const int* interval_lengths, float* out) {
bev_pool_kernel<<<(int)ceil(((double)n_intervals * c / 256)), 256>>>(
b, d, h, w, n, c, n_intervals, x, geom_feats, interval_starts, interval_lengths, out
);
}
void bev_pool_grad(int b, int d, int h, int w, int n, int c, int n_intervals, const float* out_grad,
const int* geom_feats, const int* interval_starts, const int* interval_lengths, float* x_grad) {
bev_pool_grad_kernel<<<(int)ceil(((double)n_intervals * c / 256)), 256>>>(
b, d, h, w, n, c, n_intervals, out_grad, geom_feats, interval_starts, interval_lengths, x_grad
);
}
|
14,016 | #include "includes.h"
__global__ void setWalkersAtLast ( const int dim, const int nwl, const float *lst, float *xx ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int t = i + j * dim;
if ( i < dim && j < nwl ) {
xx[t] = lst[i+j*(dim+1+1+1+1)];
}
} |
14,017 | #include "matrix_multiplication.cuh"
#define TILE_WIDTH 32
void matrix_multiplication( float* m, float* n, float* out_mat, int width) {
int size = width * width * sizeof(float);
float* md, *nd, *od;
// Transfrer m and n to device memory
cudaMalloc( (void**) &md, size);
cudaMemcpy( md, m, size, cudaMemcpyHostToDevice);
cudaMalloc( (void**) &nd, size);
cudaMemcpy( nd, n, size, cudaMemcpyHostToDevice);
// allocate output matrix on device
cudaMalloc( (void**) &od, size);
// kernel invocation
dim3 dimBlock( TILE_WIDTH, TILE_WIDTH);
dim3 dimGrid( width/TILE_WIDTH,width/TILE_WIDTH);
matrix_multiplication_kernel<<<dimGrid, dimBlock>>>( md, nd, od, width);
// transfer output matrix from device to host
cudaMemcpy( out_mat, od, size, cudaMemcpyDeviceToHost);
// free device memory
cudaFree(md);
cudaFree(nd);
cudaFree(od);
}
__global__ void matrix_multiplication_kernel( float* md, float* nd, float* od, int width) {
__shared__ float mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float nds[TILE_WIDTH][TILE_WIDTH];
// cache block & thread indices for faster access
int bx( blockIdx.x); int tx( threadIdx.x);
int by( blockIdx.y); int ty( threadIdx.y);
// identify row & column of the matrix element to work on
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
// stores the od element that is computed by the thread
float v(0);
// loop over md and nd tiles required to compute v
for( int i=0; i<width/TILE_WIDTH; ++i) {
// collaborative loading of md and nd tiles into shared memory
mds[ty][tx] = md[row*width + (i*TILE_WIDTH + tx)];
nds[ty][tx] = nd[ (i*TILE_WIDTH + ty)*width + col];
__syncthreads();
for( int j=0; j<TILE_WIDTH; ++j)
v += mds[ty][j] * nds[j][tx];
__syncthreads();
}
od[row*width + col] = v;
}
__global__ void matrix_multiplication_kernel_old( float* md, float* nd, float* od, int width) {
// cache block & thread indices for faster access
int bx( blockIdx.x); int tx( threadIdx.x);
int by( blockIdx.y); int ty( threadIdx.y);
// stores the od element that is computed by the thread
float v(0);
for( int i=0; i<width; ++i) {
v += md[ty*width+i] * nd[i*width + tx];
};
od[ty*width+tx] = v;
} |
14,018 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <CL/cl.h>
#include <math.h>
#define MARGIN (1e-6)
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
}
// This is a macro for checking the error variable.
#define CHK_ERROR(err) if (err != CL_SUCCESS) fprintf(stderr, "Error: %s\n", clGetErrorString(err));
// A errorCode to string converter (forward declaration)
const char *clGetErrorString(int errorCode) {
switch (errorCode) {
case 0: return "CL_SUCCESS";
case -1: return "CL_DEVICE_NOT_FOUND";
case -2: return "CL_DEVICE_NOT_AVAILABLE";
case -3: return "CL_COMPILER_NOT_AVAILABLE";
case -4: return "CL_MEM_OBJECT_ALLOCATION_FAILURE";
case -5: return "CL_OUT_OF_RESOURCES";
case -6: return "CL_OUT_OF_HOST_MEMORY";
case -7: return "CL_PROFILING_INFO_NOT_AVAILABLE";
case -8: return "CL_MEM_COPY_OVERLAP";
case -9: return "CL_IMAGE_FORMAT_MISMATCH";
case -10: return "CL_IMAGE_FORMAT_NOT_SUPPORTED";
case -12: return "CL_MAP_FAILURE";
case -13: return "CL_MISALIGNED_SUB_BUFFER_OFFSET";
case -14: return "CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST";
case -15: return "CL_COMPILE_PROGRAM_FAILURE";
case -16: return "CL_LINKER_NOT_AVAILABLE";
case -17: return "CL_LINK_PROGRAM_FAILURE";
case -18: return "CL_DEVICE_PARTITION_FAILED";
case -19: return "CL_KERNEL_ARG_INFO_NOT_AVAILABLE";
case -30: return "CL_INVALID_VALUE";
case -31: return "CL_INVALID_DEVICE_TYPE";
case -32: return "CL_INVALID_PLATFORM";
case -33: return "CL_INVALID_DEVICE";
case -34: return "CL_INVALID_CONTEXT";
case -35: return "CL_INVALID_QUEUE_PROPERTIES";
case -36: return "CL_INVALID_COMMAND_QUEUE";
case -37: return "CL_INVALID_HOST_PTR";
case -38: return "CL_INVALID_MEM_OBJECT";
case -39: return "CL_INVALID_IMAGE_FORMAT_DESCRIPTOR";
case -40: return "CL_INVALID_IMAGE_SIZE";
case -41: return "CL_INVALID_SAMPLER";
case -42: return "CL_INVALID_BINARY";
case -43: return "CL_INVALID_BUILD_OPTIONS";
case -44: return "CL_INVALID_PROGRAM";
case -45: return "CL_INVALID_PROGRAM_EXECUTABLE";
case -46: return "CL_INVALID_KERNEL_NAME";
case -47: return "CL_INVALID_KERNEL_DEFINITION";
case -48: return "CL_INVALID_KERNEL";
case -49: return "CL_INVALID_ARG_INDEX";
case -50: return "CL_INVALID_ARG_VALUE";
case -51: return "CL_INVALID_ARG_SIZE";
case -52: return "CL_INVALID_KERNEL_ARGS";
case -53: return "CL_INVALID_WORK_DIMENSION";
case -54: return "CL_INVALID_WORK_GROUP_SIZE";
case -55: return "CL_INVALID_WORK_ITEM_SIZE";
case -56: return "CL_INVALID_GLOBAL_OFFSET";
case -57: return "CL_INVALID_EVENT_WAIT_LIST";
case -58: return "CL_INVALID_EVENT";
case -59: return "CL_INVALID_OPERATION";
case -60: return "CL_INVALID_GL_OBJECT";
case -61: return "CL_INVALID_BUFFER_SIZE";
case -62: return "CL_INVALID_MIP_LEVEL";
case -63: return "CL_INVALID_GLOBAL_WORK_SIZE";
case -64: return "CL_INVALID_PROPERTY";
case -65: return "CL_INVALID_IMAGE_DESCRIPTOR";
case -66: return "CL_INVALID_COMPILER_OPTIONS";
case -67: return "CL_INVALID_LINKER_OPTIONS";
case -68: return "CL_INVALID_DEVICE_PARTITION_COUNT";
case -69: return "CL_INVALID_PIPE_SIZE";
case -70: return "CL_INVALID_DEVICE_QUEUE";
case -71: return "CL_INVALID_SPEC_ID";
case -72: return "CL_MAX_SIZE_RESTRICTION_EXCEEDED";
case -1002: return "CL_INVALID_D3D10_DEVICE_KHR";
case -1003: return "CL_INVALID_D3D10_RESOURCE_KHR";
case -1004: return "CL_D3D10_RESOURCE_ALREADY_ACQUIRED_KHR";
case -1005: return "CL_D3D10_RESOURCE_NOT_ACQUIRED_KHR";
case -1006: return "CL_INVALID_D3D11_DEVICE_KHR";
case -1007: return "CL_INVALID_D3D11_RESOURCE_KHR";
case -1008: return "CL_D3D11_RESOURCE_ALREADY_ACQUIRED_KHR";
case -1009: return "CL_D3D11_RESOURCE_NOT_ACQUIRED_KHR";
case -1010: return "CL_INVALID_DX9_MEDIA_ADAPTER_KHR";
case -1011: return "CL_INVALID_DX9_MEDIA_SURFACE_KHR";
case -1012: return "CL_DX9_MEDIA_SURFACE_ALREADY_ACQUIRED_KHR";
case -1013: return "CL_DX9_MEDIA_SURFACE_NOT_ACQUIRED_KHR";
case -1093: return "CL_INVALID_EGL_OBJECT_KHR";
case -1092: return "CL_EGL_RESOURCE_NOT_ACQUIRED_KHR";
case -1001: return "CL_PLATFORM_NOT_FOUND_KHR";
case -1057: return "CL_DEVICE_PARTITION_FAILED_EXT";
case -1058: return "CL_INVALID_PARTITION_COUNT_EXT";
case -1059: return "CL_INVALID_PARTITION_NAME_EXT";
case -1094: return "CL_INVALID_ACCELERATOR_INTEL";
case -1095: return "CL_INVALID_ACCELERATOR_TYPE_INTEL";
case -1096: return "CL_INVALID_ACCELERATOR_DESCRIPTOR_INTEL";
case -1097: return "CL_ACCELERATOR_TYPE_NOT_SUPPORTED_INTEL";
case -1000: return "CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR";
case -1098: return "CL_INVALID_VA_API_MEDIA_ADAPTER_INTEL";
case -1099: return "CL_INVALID_VA_API_MEDIA_SURFACE_INTEL";
case -1100: return "CL_VA_API_MEDIA_SURFACE_ALREADY_ACQUIRED_INTEL";
case -1101: return "CL_VA_API_MEDIA_SURFACE_NOT_ACQUIRED_INTEL";
default: return "CL_UNKNOWN_ERROR";
}
}
double Uniform(){
return rand()%100*0.00001;
}
double gen_random(int id, int iter, int NUM_PARTICLES) {
return 1e-3*((1234*id+iter) % NUM_PARTICLES);
}
typedef struct bbb{
double x,y,z;
double asda; // for alignment
}_double3;
typedef struct _Particle{
_double3 position,velocity;
}Particle;
Particle* init(int NUM_PARTICLES){
int nBytes= sizeof(Particle)*NUM_PARTICLES;
Particle* particles;
particles=(Particle*)malloc(nBytes);
for (int i=0;i<NUM_PARTICLES;i++){
particles[i].position.x=Uniform();
particles[i].position.y=Uniform();
particles[i].position.z=Uniform();
particles[i].velocity.x=Uniform()/4;
particles[i].velocity.y=Uniform()/4;
particles[i].velocity.z=Uniform()/4;
}
return particles;
}
void one_timestep_cpu(Particle* particles,int iter,int N) {
for(int id=0;id<N;id++)
{
particles[id].position.x+=particles[id].velocity.x;
particles[id].position.y+=particles[id].velocity.y;
particles[id].position.z+=particles[id].velocity.z;
particles[id].velocity.x+=gen_random(id, iter, N)/5;
particles[id].velocity.y+=gen_random(id, iter, N)/4;
particles[id].velocity.z+=gen_random(id, iter, N)/3;
}
}
//TODO: Write your kernel here
const char *mykernel = "\
typedef struct _Particle{\
double3 position,velocity;\
}Particle;\
double gen_random(int id, int iter, int NUM_PARTICLES) {\
return 1e-3*((1234*id+iter) % NUM_PARTICLES);\
}\
__kernel void launch(__global Particle* particles,__global int* Nt,__global int* NUM_ITERATIONSt){\
int id =get_global_id(0);\
int N=*Nt;\
int NUM_ITERATIONS=*NUM_ITERATIONSt;\
if(id<N)\
for(int i=0;i<NUM_ITERATIONS;i++){\
particles[id].position.x+=particles[id].velocity.x;\
particles[id].position.y+=particles[id].velocity.y;\
particles[id].position.z+=particles[id].velocity.z;\
particles[id].velocity.x+=gen_random(id, i, N)/5;\
particles[id].velocity.y+=gen_random(id, i, N)/4;\
particles[id].velocity.z+=gen_random(id, i, N)/3;\
}\
}\
";
int main(int argc, char **argv) {
cl_platform_id *platforms; cl_uint n_platform;
// Find OpenCL Platforms
cl_int err = clGetPlatformIDs(0, NULL, &n_platform); CHK_ERROR(err);
platforms = (cl_platform_id *)malloc(sizeof(cl_platform_id) * n_platform);
err = clGetPlatformIDs(n_platform, platforms, NULL); CHK_ERROR(err);
// Find and sort devices
cl_device_id *device_list; cl_uint n_devices;
err = clGetDeviceIDs(platforms[0], CL_DEVICE_TYPE_GPU, 0, NULL, &n_devices); CHK_ERROR(err);
device_list = (cl_device_id *)malloc(sizeof(cl_device_id) * n_devices);
err = clGetDeviceIDs(platforms[0], CL_DEVICE_TYPE_GPU, n_devices, device_list, NULL); CHK_ERROR(err);
// Create and initialize an OpenCL context
cl_context context = clCreateContext(NULL, n_devices, device_list, NULL, NULL, &err); CHK_ERROR(err);
// Create a command queue
cl_command_queue cmd_queue = clCreateCommandQueue(context, device_list[0], 0, &err); CHK_ERROR(err);
double start,gpu_time=0,cpu_time=0;
int NUM_PARTICLES = atoi(argv[1]);
int NUM_ITERATIONS = atoi(argv[2]);
int BLOCK_SIZE = atoi(argv[3]);
// printf("NUM_PARTICLES:%d\nNUM_ITERATIONS:%d\nBLOCK_SIZE:%d\n",NUM_PARTICLES,NUM_ITERATIONS,BLOCK_SIZE);
int nBytes=sizeof(Particle)*NUM_PARTICLES;
int grid_size =(NUM_PARTICLES+BLOCK_SIZE-1)/BLOCK_SIZE;
Particle* particles=init(NUM_PARTICLES);
Particle* res=(Particle*)malloc(nBytes);
start=cpuSecond();
cl_mem p_dev = clCreateBuffer(context, CL_MEM_READ_WRITE, nBytes, NULL, &err); CHK_ERROR(err);
cl_mem p_N = clCreateBuffer(context, CL_MEM_READ_ONLY, sizeof(int), NULL, &err); CHK_ERROR(err);
cl_mem p_N_it = clCreateBuffer(context, CL_MEM_READ_ONLY, sizeof(int), NULL, &err); CHK_ERROR(err);
err = clEnqueueWriteBuffer(cmd_queue, p_dev, CL_TRUE, 0, nBytes, particles, 0, NULL, NULL); CHK_ERROR(err);
err = clEnqueueWriteBuffer(cmd_queue, p_N, CL_TRUE, 0, sizeof(int), &NUM_PARTICLES, 0, NULL, NULL); CHK_ERROR(err);
err = clEnqueueWriteBuffer(cmd_queue, p_N_it, CL_TRUE, 0, sizeof(int), &NUM_ITERATIONS, 0, NULL, NULL); CHK_ERROR(err);
gpu_time+=cpuSecond()-start;
start=cpuSecond();
for(int i=0;i<NUM_ITERATIONS;i++)
one_timestep_cpu(particles,i,NUM_PARTICLES);
cpu_time+=cpuSecond()-start;
printf("CPU costs:%lf\n",cpu_time);
int id=1000;
//printf("CPU:v:\n%f,%f,%f\n",particles[id].velocity.x,particles[id].velocity.y,particles[id].velocity.z);
//printf("p:\n%f,%f,%f\n",particles[id].position.x,particles[id].position.y,particles[id].position.z);
/* Insert your own code here */
cl_program program = clCreateProgramWithSource(context, 1, (const char **)&mykernel, NULL, &err);
err = clBuildProgram(program, 1, device_list, NULL, NULL, NULL);CHK_ERROR(err);
cl_kernel kernel = clCreateKernel(program, "launch", &err);CHK_ERROR(err);
err = clSetKernelArg(kernel, 0, sizeof(cl_mem), (void *)&p_dev); CHK_ERROR(err);
err = clSetKernelArg(kernel, 1, sizeof(cl_mem), (void *)&p_N); CHK_ERROR(err);
err = clSetKernelArg(kernel, 2, sizeof(cl_mem), (void *)&p_N_it); CHK_ERROR(err);
size_t workgroup_size = BLOCK_SIZE;
size_t n_workitem = grid_size*BLOCK_SIZE;
// printf("n_workitem=%ld,workgroup_size=%ld\n",n_workitem,workgroup_size);
start = cpuSecond();
err = clEnqueueNDRangeKernel(cmd_queue, kernel, 1, NULL, &n_workitem, &workgroup_size, 0, NULL, NULL);CHK_ERROR(err);
err = clEnqueueReadBuffer(cmd_queue, p_dev, CL_TRUE, 0, nBytes, res, 0, NULL, NULL); CHK_ERROR(err);
clFinish(cmd_queue);
gpu_time+=cpuSecond()-start;
printf("Done! GPU costs %lf\n", gpu_time);
err = clFlush(cmd_queue); CHK_ERROR(err);
err = clFinish(cmd_queue); CHK_ERROR(err);
// test the result
int c = 0;
for (int i=0;i<NUM_PARTICLES;i++){
double xCPU = particles[i].position.x;
double yCPU = particles[i].position.y;
double zCPU = particles[i].position.z;
double xGPU = res[i].position.x;
double yGPU = res[i].position.y;
double zGPU = res[i].position.z;
if(fabs(xCPU - xGPU) > MARGIN | fabs(yCPU - yGPU) > MARGIN | fabs(zCPU - zGPU) > MARGIN)
c++;
}
// printf("mismatch:%d\n",c);
// Finally, release all that we have allocated.
err = clReleaseCommandQueue(cmd_queue); CHK_ERROR(err);
err = clReleaseContext(context); CHK_ERROR(err);
free(platforms);
free(device_list);
return 0;
} |
14,019 | void __global__ mandel_gpu(int disp_width, int disp_height, int *image, int max_iter, int blk_size) {
double scale_real, scale_imag;
double x, y, u, v, u2, v2;
scale_real = 3.5 / (double)disp_width;
scale_imag = 3.5 / (double)disp_height;
int j = blockIdx.y * blockDim.y + threadIdx.y; // WIDTH
int i = blockIdx.x * blockDim.x + threadIdx.x; // HEIGHT
int idx = i * disp_height + j;
// check if inside picture
if (i >= disp_width || j >= disp_height) return;
//
x = ((double)i * scale_real) - 2.25;
y = ((double)j * scale_imag) - 1.75;
u = 0.0;
v = 0.0;
u2 = 0.0;
v2 = 0.0;
int iter = 0;
while ( u2 + v2 < 4.0 && iter < max_iter ) {
v = 2 * v * u + y;
u = u2 - v2 + x;
u2 = u*u;
v2 = v*v;
iter ++;
}
// if we exceed max_iter, reset to zero
iter = iter == max_iter ? 0 : iter;
image[idx] = iter;
}
|
14,020 | #include <stdio.h>
__global__ void print_hello() {
printf("hello from thread %d of block %d\n", threadIdx.x, blockIdx.x);
}
int main() {
print_hello<<<3, 5>>>();
cudaDeviceSynchronize();
return 0;
}
|
14,021 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void vec_add(int* A, int* B, int* C, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size) {
C[index] = A[index] + B[index];
}
}
int main(int argc, char *argv[]) {
if(argc != 3) {
//Prendo il numero di elementi dell'array e il numero di thread che voglio per ogni blocco
fprintf(stderr,"Usage: %s <array size> <threads per block>\n", argv[0]);
return EXIT_FAILURE;
}
//Variabili necessarie al calcolo
int array_size, thread, *a, *b, *c, *d;
int *gpu_a, *gpu_b, *gpu_c;
array_size = atoi(argv[1]);
thread = atoi(argv[2]);
//Alloco memoria per gli array sulla CPU
a = (int *) malloc(array_size*sizeof(int));
b = (int *) malloc(array_size*sizeof(int));
c = (int *) malloc(array_size*sizeof(int));
d = (int *) malloc(array_size*sizeof(int)); //questo mi serve per controllo
//Alloco la memoria sulla GPU
gpuErrchk(cudaMalloc(&gpu_a, array_size*sizeof(int)));
gpuErrchk(cudaMalloc(&gpu_b, array_size*sizeof(int)));
gpuErrchk(cudaMalloc(&gpu_c, array_size*sizeof(int)));
//Riempio i vettori
for(int i=0; i<array_size; i++) {
a[i] = rand();
b[i] = rand();
c[i] = 0;
}
//Copio i vettori sulla GPU
gpuErrchk(cudaMemcpy(gpu_a, a, array_size*sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(gpu_b, b, array_size*sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(gpu_c, c, array_size*sizeof(int), cudaMemcpyHostToDevice));
//Faccio in modo che block_size sia sempre intero
int block_size;
if(array_size % thread == 0) {
block_size = array_size/thread;
}
else {
block_size = (array_size/thread) + 1;
}
//Eseguo sulla GPU la somma
vec_add<<<block_size, thread>>>(gpu_a, gpu_b, gpu_c, array_size);
//Copio il risultato sulla CPU
gpuErrchk(cudaMemcpy(c, gpu_c, array_size*sizeof(int), cudaMemcpyDeviceToHost));
//Sommo gli array sulla CPU
for(int i=0; i<array_size; i++) {
d[i] = a[i] + b[i];
}
//Stampo i vettori
for(int i=0; i<array_size; i++) {
if(c[i] != d[i])
printf("DIFFERENT --> Index: %d CPU / GPU = %d / %d\n", i, d[i], c[i]);
}
//Libero la memoria
free(a); free(b); free(c); free(d);
cudaFree(gpu_a); cudaFree(gpu_b); cudaFree(gpu_c);
return EXIT_SUCCESS;
}
|
14,022 | #include "includes.h"
__global__ void DecodeValues(float* superposition, int symbolSize, float* output, float* reliability, int numOfValues, int squaredMode, float* dirX, float* dirY, float* negDirX, float* negDirY, float* originX, float* originY)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (threadId >= numOfValues)
return;
output[threadId] = 0;
reliability[threadId] = 0;
float* dir = threadId == 0 ? dirX : dirY;
float* negDir = threadId == 0 ? negDirX : negDirY;
float* origin = threadId == 0 ? originX : originY;
for (int i = 0; i < symbolSize; i++)
{
// output = s.d - s.n = s.dir
// one of the values s.d or s.n will be (very close to) zero
output[threadId] += superposition[i] * dir[i] - superposition[i] * negDir[i];
// rel = s.o
reliability[threadId] += superposition[i] * origin[i];
}
// rel = s.o + s.dir
reliability[threadId] += fabs(output[threadId]);
// output = s.dir / (s.o + s.dir)
output[threadId] /= reliability[threadId];
// Since s = dir*t + o*(1-t) + noise, we get
// s.dir = dir.dir*t + o.dir*(1-t) + dir.noise = t + 0 + dir.noise
// s.o = o.dir*t + o.o*(1-t) + o.noise = 0 + (1-t) + o.noise
// output = t + dir.noise / (1 + dir.noise + o.noise)
// Note that dir.noise and o.noise should be very close to zero.
// This should make the decoding more precise when noise has similar dot product to dir and o.
} |
14,023 |
//======================================
// Utility function
//======================================
__device__ double u(const double c, const double h, const double l,
const double ssigma, const double ppsi, const int uti,
const double kkappa, const double tthetalab, const double eetalab){
double utility = 0.0;
if(uti == 1){
// CES
utility = (powf(powf(ppsi*powf(c, kkappa) + (1-ppsi)*powf(h, kkappa), (1/kkappa)), 1-ssigma) / (1-ssigma)) - (tthetalab*pow(l, 1 + eetalab)/(1 + eetalab)) ;
} else if(uti == 2){
// Utility function 2
utility = powf(powf(c, ppsi)*powf(h, 1-ppsi), 1-ssigma) / (1-ssigma);
}
if(c <= 0 || h <= 0){
utility = powf(-10, 15);
}
return(utility);
}
__device__ double mortg_function(const double m, const double Pm, const double oomega, const double h, const double Ph, const double repay_coeff){
double mortgage = 0.0;
if(m*(1+repay_coeff) <= (1-oomega)*Ph*h){
// if(m*Pm <= (1-oomega)*Ph*h){
mortgage = m*Pm;
} else{
mortgage = -10000.0;
}
return(mortgage);
}
__device__ double maximumab(const double a, const double b){
double ans = a;
if(b >= a){
ans = b;
}
return(ans);
}
//======================================
// Pricing function
//======================================
__global__ void Pmort(const int T, const int na, const int nm, const int nh, const int nd, const int ny,
const double rrho, const double r, const double Ph, const double ddeltabar, const double sunk, const double interm, const double rec_probab,
const double *P,
const double *dgrid,
const double *mgrid,
const double *hgrid,
const double *rgrid,
const double *survival,
const double *repay_coeff,
const int it,
const int* Def,
const int* Renew,
const int* Policya,
double* pricing,
double* pricing_guess){
const int id = threadIdx.x;
const int ih = threadIdx.y;
const int iy = threadIdx.z;
const int im = blockIdx.x;
const int ia = blockIdx.y;
// If mortgage is equal to zero, the price is not relevant.
if(im > 0){
int ind;
int ind2;
int ind3;
int iap = 0;
// For every state variable, I compute the pricing function
ind = it*ny*nd*nh*nm*na + iy*nd*nh*nm*na + id*nh*nm*na + ih*nm*na + im*na + ia;
if(it == T-1){
pricing[ind] = 0.0;
} else{
// Expected value is sum over tomorrow's possible shocks times probabilities: P[iy][iyp]*(1/nd)
for(int iyp=0; iyp<ny; iyp++){
for(int idp=0; idp<nd; idp++){
ind2 = (it+1)*ny*nd*nh*nm*na + iyp*nd*nh*nm*na + idp*nh*nm*na + ih*nm*na + im*na + ia;
iap = Policya[ind2];
ind3 = (it+1)*ny*nd*nh*nm*na + iyp*nd*nh*nm*na + idp*nh*nm*na + ih*nm*na + im*na + iap;
// pricing[ind] = pricing[ind] + ((survival[it]*rrho/(mgrid[im]*(1+r+interm)))*P[iy*ny+iyp]*(1/(double)nd)*(Def[ind2]*(1-Renew[ind2])*Ph*(1-dgrid[idp] - ddeltabar)*hgrid[ih]*(1-sunk) + // If he defaults
pricing[ind] = pricing[ind] + ((survival[it]*rrho/(mgrid[im]*(1+r+interm)))*P[iy*ny+iyp]*(1/(double)nd)*(Def[ind2]*(1-Renew[ind2])*(Ph*(1-dgrid[idp] - ddeltabar)*hgrid[ih]*(1-sunk) - Ph*dgrid[idp]*hgrid[ih]) + // If he defaults
(1-Def[ind2])*((1-Renew[ind2])*(mgrid[im] + pricing_guess[ind3]*mgrid[im]) + // If he pays and continues with mortgage
(1-Def[ind2])*Renew[ind2]*(mgrid[im] + repay_coeff[it+1]*mgrid[im]))));
}
}
}
}
}
//======================================
// Value Function Computation
//======================================
__global__ void vfi(const int T, const int Tretirement, const int na, const int nm, const int nh, const int nr, const int nl,
const int nd, const int ny, const int uti, const double rrho,
const double bbeta, const double Ph, const double q,
const double Pa, const double ddeltabar, const double ssigma,
const double ppsi, const double kkappa, const double tthetalab, const double eetalab,
const double fcost, const double refcost, const double pension, const double sstax, const double ltax,
const double lumpsum, const double oomega, const double rec_probab, const double sunk,
const double *incshock, const double *mortsubsidy,
const double *agrid, const double *mgrid, const double *hgrid, const double *rgrid, const double *lgrid,
const double *dgrid, const double *ygrid, const double *P,
const double *eprocess, const double *survival, const double *repay_coeff,
const int it,
const int equivalent,
const double multiplier,
double* Value,
double* Value_equiv,
int* Default,
int* Renew,
int* Policya,
int* Policym,
int* Policyh,
int* Policyr,
int* Policyl,
double* Policyc,
double* Pricing_guess){
int ind;
int ind1;
int ind2;
int indsubs;
double VV = 0.0;
double VV_eq = 0.0;
// Value normal
double VVk = powf(-10,11); double VVn = powf(-10,11); double VVd = powf(-10,11);
double Vexk = 0.0; double Vexn = 0.0; double Vexd = 0.0;
// Value de consumption equivalent
double VVk_eq = powf(-10,11); double VVn_eq = powf(-10,11); double VVd_eq = powf(-10,11);
double Vexk_eq = 0.0; double Vexn_eq = 0.0; double Vexd_eq = 0.0;
double cck = 0; double ccn = 0; double ccd = 0; // Consumption
int hhk = 0; int hhn = 0; int hhd = 0; // Home ownership
int hrk = 0; int hrn = 0; int hrd = 0; // Home renting
int mmk = 0; int mmn = 0; int mmd = 0; // Mortgage
int aak = 0; int aan = 0; int aad = 0; // Savings
int llk = 0; int lln = 0; int lld = 0; // Labor
double yy;
double aa;
double ll;
double aaprime;
double mm;
double mmprime;
double hh;
double hhprime;
double hhrent;
double ddelta;
double cons;
double pprice;
double mort_received;
double mortgage_subsidy;
double refinance_cost;
// State variables that are parallelized
// const int im = blockIdx.x * blockDim.x + threadIdx.x;
const int im = blockIdx.x;
const int ia = blockIdx.y;
const int id = threadIdx.x;
const int ih = threadIdx.y;
const int iy = threadIdx.z;
aa = agrid[ia];
mm = mgrid[im];
hh = hgrid[ih];
ddelta = dgrid[id];
ind = it*ny*nd*nh*nm*na + iy*nd*nh*nm*na + id*nh*nm*na + ih*nm*na + im*na + ia;
indsubs = it*ny*nh*nm*na + iy*nh*nm*na + ih*nm*na + im*na + ia;
mortgage_subsidy = mortsubsidy[indsubs];
// Control variables
for(int il=0; il<nl; il++){
for(int iap=0; iap<na; iap++){
for(int ihre=0; ihre<nr; ihre++){
ll = lgrid[il];
aaprime = agrid[iap];
hhrent = rgrid[ihre];
if(it < Tretirement){
yy = ygrid[iy]*eprocess[it]*ll*(1-sstax-ltax);
} else{
yy = ygrid[iy]*pension;
}
// Keeping the same mortgage
cons = aa + q*hh + yy*(1-incshock[it]) - mm - q*hhrent - Pa*aaprime - Ph*(ddelta + ddeltabar)*hh - lumpsum;
Vexk = 0.0;
Vexk_eq = 0.0;
if(it < T-1){
for(int idp=0; idp<nd; idp++){
if(it < Tretirement){ // Income uncertainty before retirement
for(int iyp=0; iyp<ny; iyp++){
ind1 = (it+1)*ny*nd*nh*nm*na + iyp*nd*nh*nm*na + idp*nh*nm*na + ih*nm*na + im*na + iap;
ind2 = (it+1)*ny*nd*nh*nm*na + iyp*nd*nh*nm*na + idp*nh*nm*na + ih*nm*na + 0*na + iap;
Vexk = Vexk + P[iy*ny+iyp]*(1/(double)nd)*(rrho*Value[ind1] + // Keeps mortg
(1-rrho)*Value[ind2]); // Mortg disappears
Vexk_eq = Vexk_eq + P[iy*ny+iyp]*(1/(double)nd)*(rrho*Value_equiv[ind1] + // Keeps mortg
(1-rrho)*Value_equiv[ind2]); // Mortg disappears
}
} else{ // Certainty after retirement
ind1 = (it+1)*ny*nd*nh*nm*na + iy*nd*nh*nm*na + idp*nh*nm*na + ih*nm*na + im*na + iap;
ind2 = (it+1)*ny*nd*nh*nm*na + iy*nd*nh*nm*na + idp*nh*nm*na + ih*nm*na + 0*na + iap;
Vexk = Vexk + (1/(double)nd)*(rrho*Value[ind1] + // Keeps mortg
(1-rrho)*Value[ind2]); // Mortg disappears
Vexk_eq = Vexk_eq + (1/(double)nd)*(rrho*Value_equiv[ind1] + // Keeps mortg
(1-rrho)*Value_equiv[ind2]); // Mortg disappears
}
}
}
VV = u(cons, hhrent, ll, ssigma, ppsi, uti, kkappa, tthetalab, eetalab) + bbeta*survival[it]*Vexk;
if(equivalent == 1){
VV_eq = u(cons*(1+multiplier), hhrent, ll, ssigma, ppsi, uti, kkappa, tthetalab, eetalab) + bbeta*survival[it]*Vexk_eq;
}
if(VV > VVk){
VVk = VV;
VVk_eq = VV_eq;
hhk = ih;
hrk = ihre;
mmk = im;
aak = iap;
cck = cons;
llk = il;
}
// Defaulting => Household loses savings
cons = maximumab(aa - rec_probab*((1+repay_coeff[it])*mm - Ph*(1-ddelta - ddeltabar)*hh*(1-sunk)), 0) + yy*(1-incshock[it]) - q*hhrent - Pa*aaprime - lumpsum;
Vexd = 0.0;
Vexd_eq = 0.0;
if(it < T-1){
for(int idp=0; idp<nd; idp++){
if(it < Tretirement){ // Income uncertainty before retirement
for(int iyp=0; iyp<ny; iyp++){
ind1 = (it+1)*ny*nd*nh*nm*na + iyp*nd*nh*nm*na + idp*nh*nm*na + 0*nm*na + 0*na + iap;
Vexd = Vexd + P[iy*ny+iyp]*(1/(double)nd)*Value[ind1];
Vexd_eq = Vexd_eq + P[iy*ny+iyp]*(1/(double)nd)*Value_equiv[ind1];
}
} else{ // Certainty after retirement
ind1 = (it+1)*ny*nd*nh*nm*na + iy*nd*nh*nm*na + idp*nh*nm*na + 0*nm*na + 0*na + iap;
Vexd = Vexd + (1/(double)nd)*Value[ind1];
Vexd_eq = Vexd_eq + (1/(double)nd)*Value_equiv[ind1];
}
}
}
VV = u(cons, hhrent, ll, ssigma, ppsi, uti, kkappa, tthetalab, eetalab) + bbeta*survival[it]*Vexd;
if(equivalent == 1){
VV_eq = u(cons*(1+multiplier), hhrent, ll, ssigma, ppsi, uti, kkappa, tthetalab, eetalab) + bbeta*survival[it]*Vexd_eq;
}
if(VV > VVd){
VVd = VV;
VVd_eq = VV_eq;
hhd = 0;
hrd = ihre;
mmd = 0;
aad = iap;
ccd = cons;
lld = il;
}
// New mortgage
for(int imp=0; imp<nm; imp++){
for(int ihp=0; ihp<nh; ihp++){
if(im == 0){
if(imp > 0){
refinance_cost = fcost; // Issuing new mortgage
} else{
refinance_cost = 0.0;
}
} else{
if(imp > 0){
refinance_cost = refcost; // Refinancing mortgage
} else{
refinance_cost = 0.0; // Paying total debt
}
}
mmprime = mgrid[imp];
hhprime = hgrid[ihp];
ind = it*ny*nd*nh*nm*na + iy*nd*nh*nm*na + id*nh*nm*na + ihp*nm*na + imp*na + iap;
pprice = Pricing_guess[ind];
mort_received = mortg_function(mmprime, pprice, oomega, hhprime, Ph, repay_coeff[it]);
cons = aa + Ph*(1-ddelta - ddeltabar)*hh + q*hhprime + yy*(1-incshock[it]) + mort_received - refinance_cost*(1+repay_coeff[it])*mmprime + mortgage_subsidy - (1+repay_coeff[it])*mm - q*hhrent - Ph*hhprime - Pa*aaprime - lumpsum;
Vexn = 0.0;
Vexn_eq = 0.0;
if(it < T-1){
for(int idp=0; idp<nd; idp++){
if(it < Tretirement){ // Income uncertainty before retirement
for(int iyp=0; iyp<ny; iyp++){
ind1 = (it+1)*ny*nd*nh*nm*na + iyp*nd*nh*nm*na + idp*nh*nm*na + ihp*nm*na + imp*na + iap;
ind2 = (it+1)*ny*nd*nh*nm*na + iyp*nd*nh*nm*na + idp*nh*nm*na + ihp*nm*na + 0*na + iap;
Vexn = Vexn + P[iy*ny+iyp]*(1/(double)nd)*((rrho * Value[ind1]) +
((1-rrho) * Value[ind2]));
Vexn_eq = Vexn_eq + P[iy*ny+iyp]*(1/(double)nd)*((rrho * Value_equiv[ind1]) +
((1-rrho) * Value_equiv[ind2]));
}
} else{ // Certainty after retirement
ind1 = (it+1)*ny*nd*nh*nm*na + iy*nd*nh*nm*na + idp*nh*nm*na + ihp*nm*na + imp*na + iap;
ind2 = (it+1)*ny*nd*nh*nm*na + iy*nd*nh*nm*na + idp*nh*nm*na + ihp*nm*na + 0*na + iap;
Vexn = Vexn + (1/(double)nd)*((rrho * Value[ind1]) +
((1-rrho) * Value[ind2]));
Vexn_eq = Vexn_eq + (1/(double)nd)*((rrho * Value_equiv[ind1]) +
((1-rrho) * Value_equiv[ind2]));
}
}
}
VV = u(cons, hhrent, ll, ssigma, ppsi, uti, kkappa, tthetalab, eetalab) + bbeta*survival[it]*Vexn;
if(equivalent == 1){
VV_eq = u(cons*(1+multiplier), hhrent, ll, ssigma, ppsi, uti, kkappa, tthetalab, eetalab) + bbeta*survival[it]*Vexn_eq;
}
if(VV > VVn){
VVn = VV;
VVn_eq = VV_eq;
hhn = ihp;
hrn = ihre;
mmn = imp;
aan = iap;
ccn = cons;
lln = il;
}
}
}
}
}
}
ind = it*ny*nd*nh*nm*na + iy*nd*nh*nm*na + id*nh*nm*na + ih*nm*na + im*na + ia;
if((VVk >= VVd) & (VVk >= VVn)){
Value[ind] = VVk;
Value_equiv[ind]= VVk_eq;
Policya[ind] = aak;
Policyh[ind] = hhk;
Policyr[ind] = hrk;
Policyl[ind] = llk;
Policym[ind] = mmk;
Policyc[ind] = cck;
Default[ind] = 0;
Renew[ind] = 0;
} else if((VVd > VVk) & (VVd > VVn)){
Value[ind] = VVd;
Value_equiv[ind]= VVd_eq;
Policya[ind] = aad;
Policyh[ind] = hhd;
Policyr[ind] = hrd;
Policyl[ind] = lld;
Policym[ind] = mmd;
Policyc[ind] = ccd;
Default[ind] = 1;
Renew[ind] = 0;
} else{
Value[ind] = VVn;
Value_equiv[ind]= VVn_eq;
Policya[ind] = aan;
Policyh[ind] = hhn;
Policyr[ind] = hrn;
Policyl[ind] = lln;
Policym[ind] = mmn;
Policyc[ind] = ccn;
Default[ind] = 0;
Renew[ind] = 1;
}
}
//==================================================================
// Value Function Computation with different continuation
//==================================================================
__global__ void vfi_continuation(const int T, const int Tretirement, const int na, const int nm, const int nh, const int nr, const int nl,
const int nd, const int ny, const int uti,
const double rrho, const double bbeta, const double Ph_today, const double q, const double Pa, const double ddeltabar_today,
const double ssigma, const double ppsi, const double kkappa, const double tthetalab, const double eetalab,
const double fcost, const double refcost, const double pension, const double sstax, const double ltax,
const double lumpsum, const double oomega, const double rec_probab, const double sunk,
const double *incshock, const double *mortsubsidy,
const double *agrid,
const double *mgrid,
const double *hgrid,
const double *rgrid,
const double *lgrid,
const double *dgrid,
const double *ygrid,
const double *P,
const double *eprocess,
const double *survival,
const double *repay_coeff,
const int it,
const int equivalent,
const double multiplier,
const double* Value_future,
const double* Value_equiv_future,
double* Value,
double* Value_equiv,
int* Default,
int* Renew,
int* Policya,
int* Policym,
int* Policyh,
int* Policyr,
int* Policyl,
double* Policyc,
double* Pricing_guess){
int ind;
int ind1;
int ind2;
int indsubs;
double VV = 0.0;
double VV_eq = 0.0;
double VVk = powf(-10,11); double VVn = powf(-10,11); double VVd = powf(-10,11);
double Vexk = 0.0; double Vexn = 0.0; double Vexd = 0.0;
double VVk_eq = powf(-10,11); double VVn_eq = powf(-10,11); double VVd_eq = powf(-10,11);
double Vexk_eq = 0.0; double Vexn_eq = 0.0; double Vexd_eq = 0.0;
double cck = 0; double ccn = 0; double ccd = 0; // Consumption
int hhk = 0; int hhn = 0; int hhd = 0; // Home ownership
int hrk = 0; int hrn = 0; int hrd = 0; // Home renting
int mmk = 0; int mmn = 0; int mmd = 0; // Mortgage
int aak = 0; int aan = 0; int aad = 0; // Savings
int llk = 0; int lln = 0; int lld = 0; // Labor
double yy;
double aa;
double ll;
double aaprime;
double mm;
double mmprime;
double hh;
double hhprime;
double hhrent;
double ddelta;
double cons;
double pprice;
double mort_received;
double mortgage_subsidy;
double refinance_cost;
// State variables that are parallelized
// const int im = blockIdx.x * blockDim.x + threadIdx.x;
const int im = blockIdx.x;
const int ia = blockIdx.y;
const int id = threadIdx.x;
const int ih = threadIdx.y;
const int iy = threadIdx.z;
aa = agrid[ia];
mm = mgrid[im];
hh = hgrid[ih];
ddelta = dgrid[id];
ind = it*ny*nd*nh*nm*na + iy*nd*nh*nm*na + id*nh*nm*na + ih*nm*na + im*na + ia;
indsubs = it*ny*nh*nm*na + iy*nh*nm*na + ih*nm*na + im*na + ia;
mortgage_subsidy = mortsubsidy[indsubs];
// Control variables
for(int il=0; il<nl; il++){
for(int iap=0; iap<na; iap++){
for(int ihre=0; ihre<nr; ihre++){
ll = lgrid[il];
aaprime = agrid[iap];
hhrent = rgrid[ihre];
if(it < Tretirement){
yy = ygrid[iy]*eprocess[it]*ll*(1-sstax-ltax);
} else{
yy = ygrid[iy]*pension;
}
// Keeping the same mortgage
cons = aa + q*hh + yy*(1-incshock[it]) - mm - q*hhrent - Pa*aaprime - Ph_today*(ddelta + ddeltabar_today)*hh - lumpsum;
Vexk = 0.0;
Vexk_eq = 0.0;
if(it < T-1){
for(int idp=0; idp<nd; idp++){
if(it < Tretirement){ // Income uncertainty before retirement
for(int iyp=0; iyp<ny; iyp++){
ind1 = (it+1)*ny*nd*nh*nm*na + iyp*nd*nh*nm*na + idp*nh*nm*na + ih*nm*na + im*na + iap;
ind2 = (it+1)*ny*nd*nh*nm*na + iyp*nd*nh*nm*na + idp*nh*nm*na + ih*nm*na + 0*na + iap;
Vexk = Vexk + P[iy*ny+iyp]*(1/(double)nd)*(rrho*Value_future[ind1] + // Keeps mortg
(1-rrho)*Value_future[ind2]); // Mortg disappears
Vexk_eq = Vexk_eq + P[iy*ny+iyp]*(1/(double)nd)*(rrho*Value_equiv_future[ind1] + // Keeps mortg
(1-rrho)*Value_equiv_future[ind2]); // Mortg disappears
}
} else{ // Certainty after retirement
ind1 = (it+1)*ny*nd*nh*nm*na + iy*nd*nh*nm*na + idp*nh*nm*na + ih*nm*na + im*na + iap;
ind2 = (it+1)*ny*nd*nh*nm*na + iy*nd*nh*nm*na + idp*nh*nm*na + ih*nm*na + 0*na + iap;
Vexk = Vexk + (1/(double)nd)*(rrho*Value_future[ind1] + // Keeps mortg
(1-rrho)*Value_future[ind2]); // Mortg disappears
Vexk_eq = Vexk_eq + (1/(double)nd)*(rrho*Value_equiv_future[ind1] + // Keeps mortg
(1-rrho)*Value_equiv_future[ind2]); // Mortg disappears
}
}
}
VV = u(cons, hhrent, ll, ssigma, ppsi, uti, kkappa, tthetalab, eetalab) + bbeta*survival[it]*Vexk;
if(equivalent == 1){
VV_eq = u(cons*(1+multiplier), hhrent, ll, ssigma, ppsi, uti, kkappa, tthetalab, eetalab) + bbeta*survival[it]*Vexk_eq;
}
if(VV > VVk){
VVk = VV;
VVk_eq = VV_eq;
hhk = ih;
hrk = ihre;
mmk = im;
aak = iap;
cck = cons;
llk = il;
}
// Defaulting => Household loses savings
cons = maximumab(aa - rec_probab*((1+repay_coeff[it])*mm - Ph_today*(1-ddelta - ddeltabar_today)*hh*(1-sunk)), 0) + yy*(1-incshock[it]) - q*hhrent - Pa*aaprime - lumpsum;
Vexd = 0.0;
Vexd_eq = 0.0;
if(it < T-1){
for(int idp=0; idp<nd; idp++){
if(it < Tretirement){ // Income uncertainty before retirement
for(int iyp=0; iyp<ny; iyp++){
ind1 = (it+1)*ny*nd*nh*nm*na + iyp*nd*nh*nm*na + idp*nh*nm*na + iap;
Vexd = Vexd + P[iy*ny+iyp]*(1/(double)nd)*Value_future[ind1];
Vexd_eq = Vexd_eq + P[iy*ny+iyp]*(1/(double)nd)*Value_equiv_future[ind1];
}
} else{ // Certainty after retirement
ind1 = (it+1)*ny*nd*nh*nm*na + iy*nd*nh*nm*na + idp*nh*nm*na + iap;
Vexd = Vexd + (1/(double)nd)*Value_future[ind1];
Vexd_eq = Vexd_eq + (1/(double)nd)*Value_equiv_future[ind1];
}
}
}
VV = u(cons, hhrent, ll, ssigma, ppsi, uti, kkappa, tthetalab, eetalab) + bbeta*survival[it]*Vexd;
if(equivalent == 1){
VV_eq = u(cons*(1+multiplier), hhrent, ll, ssigma, ppsi, uti, kkappa, tthetalab, eetalab) + bbeta*survival[it]*Vexd_eq;
}
if(VV > VVd){
VVd = VV;
VVd_eq = VV_eq;
hhd = 0;
hrd = ihre;
mmd = 0;
aad = iap;
ccd = cons;
lld = il;
}
// New mortgage
for(int imp=0; imp<nm; imp++){
for(int ihp=0; ihp<nh; ihp++){
if(im == 0){
if(imp > 0){
refinance_cost = fcost; // Issuing new mortgage
} else{
refinance_cost = 0.0;
}
} else{
if(imp > 0){
refinance_cost = refcost; // Refinancing mortgage
} else{
refinance_cost = 0.0; // Paying total debt
}
}
mmprime = mgrid[imp];
hhprime = hgrid[ihp];
ind = it*ny*nd*nh*nm*na + iy*nd*nh*nm*na + id*nh*nm*na + ihp*nm*na + imp*na + iap;
pprice = Pricing_guess[ind];
mort_received = mortg_function(mmprime, pprice, oomega, hhprime, Ph_today, repay_coeff[it]);
cons = aa + Ph_today*(1-ddelta - ddeltabar_today)*hh + q*hhprime + yy*(1-incshock[it]) - refinance_cost*(1+repay_coeff[it])*mmprime + mort_received + mortgage_subsidy - (1+repay_coeff[it])*mm - q*hhrent - Ph_today*hhprime - Pa*aaprime - lumpsum;
Vexn = 0.0;
Vexn_eq = 0.0;
if(it < T-1){
for(int idp=0; idp<nd; idp++){
if(it < Tretirement){ // Income uncertainty before retirement
for(int iyp=0; iyp<ny; iyp++){
ind1 = (it+1)*ny*nd*nh*nm*na + iyp*nd*nh*nm*na + idp*nh*nm*na + ihp*nm*na + imp*na + iap;
ind2 = (it+1)*ny*nd*nh*nm*na + iyp*nd*nh*nm*na + idp*nh*nm*na + ihp*nm*na + 0*na + iap;
Vexn = Vexn + P[iy*ny+iyp]*(1/(double)nd)*((rrho * Value_future[ind1]) +
((1-rrho) * Value_future[ind2]));
Vexn_eq = Vexn_eq + P[iy*ny+iyp]*(1/(double)nd)*((rrho * Value_equiv_future[ind1]) +
((1-rrho) * Value_equiv_future[ind2]));
}
} else{ // Certainty after retirement
ind1 = (it+1)*ny*nd*nh*nm*na + iy*nd*nh*nm*na + idp*nh*nm*na + ihp*nm*na + imp*na + iap;
ind2 = (it+1)*ny*nd*nh*nm*na + iy*nd*nh*nm*na + idp*nh*nm*na + ihp*nm*na + 0*na + iap;
Vexn = Vexn + (1/(double)nd)*((rrho * Value_future[ind1]) +
((1-rrho) * Value_future[ind2]));
Vexn_eq = Vexn_eq + (1/(double)nd)*((rrho * Value_equiv_future[ind1]) +
((1-rrho) * Value_equiv_future[ind2]));
}
}
}
VV = u(cons, hhrent, ll, ssigma, ppsi, uti, kkappa, tthetalab, eetalab) + bbeta*survival[it]*Vexn;
if(equivalent == 1){
VV_eq = u(cons*(1+multiplier), hhrent, ll, ssigma, ppsi, uti, kkappa, tthetalab, eetalab) + bbeta*survival[it]*Vexn_eq;
}
if(VV > VVn){
VVn = VV;
VVn_eq = VV_eq;
hhn = ihp;
hrn = ihre;
mmn = imp;
aan = iap;
ccn = cons;
lln = il;
}
}
}
}
}
}
ind = it*ny*nd*nh*nm*na + iy*nd*nh*nm*na + id*nh*nm*na + ih*nm*na + im*na + ia;
if((VVk >= VVd) & (VVk >= VVn)){
Value[ind] = VVk;
Value_equiv[ind]= VVk_eq;
Policya[ind] = aak;
Policyh[ind] = hhk;
Policyr[ind] = hrk;
Policyl[ind] = llk;
Policym[ind] = mmk;
Policyc[ind] = cck;
Default[ind] = 0;
Renew[ind] = 0;
} else if((VVd > VVk) & (VVd >= VVn)){
Value[ind] = VVd;
Value_equiv[ind]= VVd_eq;
Policya[ind] = aad;
Policyh[ind] = hhd;
Policyr[ind] = hrd;
Policyl[ind] = lld;
Policym[ind] = mmd;
Policyc[ind] = ccd;
Default[ind] = 1;
Renew[ind] = 0;
} else{
Value[ind] = VVn;
Value_equiv[ind]= VVn_eq;
Policya[ind] = aan;
Policyh[ind] = hhn;
Policyr[ind] = hrn;
Policyl[ind] = lln;
Policym[ind] = mmn;
Policyc[ind] = ccn;
Default[ind] = 0;
Renew[ind] = 1;
}
} |
14,024 | __global__ void set_boundary_values_from_edges(
int N,
long * vol_id,
long * edge_id,
double * boundary_values,
double * edge_values)
{
const int k =
threadIdx.x+threadIdx.y*blockDim.x+
(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
if ( k >= N )
return;
int id = 3*vol_id[k] + edge_id[k];
boundary_values[k] = edge_values[id];
}
|
14,025 | __global__ void vecAdd_kernel(const int *A, const int *B, int *C)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
C[idx] = A[idx] + B[idx];
}
void vecAdd(const int *A, const int *B, int *C, int N)
{
vecAdd_kernel <<< 1, N >>> (A, B, C);
} |
14,026 | //#include "QC_LDPC_CSS.h"
//#include <chrono>
//// See https://arxiv.org/pdf/quant-ph/0701020.pdf for construction
//
//QC_LDPC_CSS::QC_LDPC_CSS(int J, int K, int L, int P, int sigma, int tau) :
// _numEqsX(J * P), _numEqsZ(K * P), _numVars(L * P), _P(P),
// _hHC_vec(J * P, std::vector<int>(L * P)), _hHD_vec(K * P, std::vector<int>(L * P)),
// // allocate host memory for parity check matrices
// _pcmX_h(J * P, L * P), _pcmZ_h(K * P, L * P),
// // allocate host and device memory for syndromes
// _syndromeX_h(_numEqsX,0), _syndromeX_d(_numEqsX,0),
// _syndromeZ_h(_numEqsZ,0), _syndromeZ_d(_numEqsZ,0),
// // allocate host and device memory for var node updates
// _varNodesX(_numVars,_numEqsX,0), _varNodesZ(_numVars,_numEqsZ,0),
// _varNodesX_d(_numVars,_numEqsX,0), _varNodesZ_d(_numVars,_numEqsZ,0),
// // allocate host and device memory for check node updates
// _eqNodesX(_numEqsX,_numVars,0), _eqNodesZ(_numEqsZ,_numVars,0),
// _eqNodesX_d(_numEqsX,_numVars,0), _checkNodesZ_d(_numEqsZ,_numVars,0),
// // allocate host and device memory for index matrices
// _eqNodeVarIndicesX(_numEqsX, L), _eqNodeVarIndicesZ(_numEqsZ, L),
// _eqNodeVarIndicesX_d(_numEqsX, L), _eqNodeVarIndicesZ_d(_numEqsZ, L),
// _varNodeEqIndicesX(_numVars,J), _varNodeEqIndicesZ(_numVars, K),
// _varNodeEqIndicesX_d(_numVars, J), _varNodeEqIndicesZ_d(_numVars, K),
// _errorGenerator(_numVars)
//{
// int i, j, k, l, t, p, invSigma;
//
// // index matrices for parity check matrices _pcmX_h and _pcmZ_h
// IntArray2d_h hHC(J, L);
// IntArray2d_h hHD(K, L);
//
// // construct the cyclic set from which HC and HD will be made
// cusp::array1d<int, cusp::host_memory> ZP(P - 1);
// for (i = 0; i < P - 1; ++i) ZP[i] = i + 1;
// print(ZP);
//
// // find sigma^(-1). It is the element of ZP that when multiplied by sigma = 1
// for (i = 0; ZP[i] * sigma % P != 1; ++i); // loop through ZP until the inverse element is found.
// invSigma = ZP[i];
//
// // Build parity check matrices for HC and HD on the host since this is a one shot operation.
// // Time to transfer data to the gpu will make this inefficient.
// for (j = 0; j < J; ++j)
// {
// for (l = 0; l < L; ++l)
// {
// t = 1;
// if (l < L / 2)
// {
// p = -j + l;
// // find the correct power of sigma (or inverse sigma if p is negative)
// if (p < 0) for (i = 0; i < -p; ++i) t = (t * invSigma) % P; // sigma^(-k) = (sigma^(-1))^k
// else for (i = 0; i < p; ++i) t = (t * sigma) % P;
// }
// else
// {
// p = j - 1 + l;
// // find the correct power of sigma (or inverse sigma if p is negative)
// if (p < 0) for (i = 0; i < -p; ++i) t = (t * invSigma) % P;
// else for (i = 0; i < p; ++i) t = (t * sigma) % P;
// t = P - (tau * t) % P; // -(tau*sigma^p) = P - (tau*sigma^p)
// }
// hHC(j, l) = t;
// }
// }
//
// for (k = 0; k < K; ++k)
// {
// for (l = 0; l < L; ++l)
// {
// t = 1;
// if (l < L / 2)
// {
// p = -k - 1 + l;
// // find the correct power of sigma (or inverse sigma if p is negative)
// if (p < 0) for (i = 0; i < -p; ++i) t = (t * invSigma) % P; // sigma^(-k) = (sigma^(-1))^k
// else for (i = 0; i < p; ++i) t = (t * sigma) % P;
// t = (tau * t) % P;
// }
// else
// {
// p = k + l;
// // find the correct power of sigma (or inverse sigma if p is negative)
// if (p < 0) for (i = 0; i < -p; ++i) t = (t * invSigma) % P;
// else for (i = 0; i < p; ++i) t = (t * sigma) % P;
// t = P - (t); // -(sigma^p) = P - (sigma^p)
// }
// hHD(k, l) = t;
// }
// }
//// print_matrix(hHC);
//// print_matrix(hHD);
//
// int cj, ck, cjR, ckR, cl, c, row, col;
// // Construct the parity check matrix matrix row by row.
// // The matrix is made up of JxL PxP blocks.
// // Each block is a circulant permutation matrix, I(1)^c with c given by HC calculated previously
// // see https://arxiv.org/pdf/quant-ph/0701020.pdf or https://en.wikipedia.org/wiki/Circulant_matrix
// for (row = 0; row < J * P; ++row)
// {
// cj = (int)(row / P); // the row index for HC is the integer part of j/P
// cjR = row % P; // the row within block cj is j%P. P rows per block.
// for (cl = 0; cl < L; ++cl)
// {
// c = hHC(cj, cl); //this is the power for the circulant permutation matrix, I(1)^c
// // cjR=0, c=1, block column index for non-zero entry = 1
// // cjR=1, c=1, block column index for non-zero entry = 2
// // cjR=P, c=1, block column index for non-zero entry = 0
// // block column index = (c + cjR) % P
// // offset block column index by block width P: offset = cl * P
// // column index = (c + cjR) % P + (cl * P);
// col = (c + cjR) % P + (cl * P);
// int index = row * _numVars + col;
// _hHC_vec[row][col] = 1;
// _pcmX_h.values[index] = 1; // set value of non-zero value i
// }
// }
//
// for (row = 0; row < K * P; ++row)
// {
// ck = (int)(row / P); // the row index for HD is the integer part of k/P
// ckR = row % P; // the row within block ck is k%P. P rows per block.
// for (cl = 0; cl < L; ++cl)
// {
// c = hHD(ck, cl); //this is the power for the circulant permutation matrix, I(1)^c
// col = (c + ckR) % P + (cl * P);
// int index = row * _numVars + col;
// _hHD_vec[row][col] = 1;
// _pcmZ_h.values[index] = 1; // set value of non-zero value i
// }
// }
//
// // set index arrays and device pointers
// SetIndexArrays(_eqNodeVarIndicesX, _varNodeEqIndicesX, _pcmX_h);
// thrust::copy(_eqNodeVarIndicesX.values.begin(), _eqNodeVarIndicesX.values.end(), _eqNodeVarIndicesX_d.values.begin());
// thrust::copy(_varNodeEqIndicesX.values.begin(), _varNodeEqIndicesX.values.end(), _varNodeEqIndicesX_d.values.begin());
// _eqNodeVarIndicesX_d_ptr = thrust::raw_pointer_cast(&_eqNodeVarIndicesX_d.values[0]);
// _varNodeEqIndicesX_d_ptr = thrust::raw_pointer_cast(&_varNodeEqIndicesX_d.values[0]);
//
// SetIndexArrays(_eqNodeVarIndicesZ, _varNodeEqIndicesZ, _pcmZ_h);
// thrust::copy(_eqNodeVarIndicesZ.values.begin(), _eqNodeVarIndicesZ.values.end(), _eqNodeVarIndicesZ_d.values.begin());
// thrust::copy(_varNodeEqIndicesZ.values.begin(), _varNodeEqIndicesZ.values.end(), _varNodeEqIndicesZ_d.values.begin());
// _eqNodeVarIndicesZ_d_ptr = thrust::raw_pointer_cast(&_eqNodeVarIndicesZ_d.values[0]);
// _varNodeEqIndicesZ_d_ptr = thrust::raw_pointer_cast(&_varNodeEqIndicesZ_d.values[0]);
//
// _numEqsPerVarX = _varNodeEqIndicesX.num_cols;
// _numVarsPerEqX = _eqNodeVarIndicesX.num_cols;
// _numEqsPerVarZ = _varNodeEqIndicesZ.num_cols;
// _numVarsPerEqZ = _eqNodeVarIndicesZ.num_cols;
//
// // set device memory pointers for pre-allocated device matrices
// _syndromeX_d_ptr = thrust::raw_pointer_cast(&_syndromeX_d[0]);
// _syndromeZ_d_ptr = thrust::raw_pointer_cast(&_syndromeZ_d[0]);
//
// _varNodesX_d_ptr = thrust::raw_pointer_cast(&_varNodesX_d.values[0]);
// _varNodesZ_d_ptr = thrust::raw_pointer_cast(&_varNodesZ_d.values[0]);
//
// _eqNodesX_d_ptr = thrust::raw_pointer_cast(&_eqNodesX_d.values[0]);
// _eqNodesZ_d_ptr = thrust::raw_pointer_cast(&_checkNodesZ_d.values[0]);
//
// // We now have parity check matrices hPHC and hPHD on the host. https://arxiv.org/pdf/quant-ph/0701020.pdf
// // These satisfy the constraints that the girth of their respective Tanner graphs are >= 6
// // and they have a "twisted relation", i.e. dual(D) is in C.
//}
//
//QC_LDPC_CSS::~QC_LDPC_CSS()
//{
//}
//
//void QC_LDPC_CSS::SetIndexArrays(IntArray2d_h& checkNodeVarIndices, IntArray2d_h& varNodeEqIndices, IntArray2d_h& parityCheckMatrix)
//{
// // set device index matrices for var node and check node updates
// // each equation will include L variables.
// // each variable will be involved in J equations
// // loop over all check node equations in the parity check matrix for X errors
// int numEqs = parityCheckMatrix.num_rows;
// int n = parityCheckMatrix.num_cols;
// std::vector<std::vector<int>> cnVarIndices(numEqs, std::vector<int>());
// std::vector<std::vector<int>> vnEqIndices(n, std::vector<int>());
// // loop over all equations
// for (int eqIdx = 0; eqIdx < numEqs; ++eqIdx)
// {
// // loop over all variables
// for (int varIdx = 0; varIdx < n; ++varIdx)
// {
// int pcmIdx = eqIdx * n + varIdx;
// // if the entry in the pcm is 1, this check node involves this variable. set the index entry
// if (parityCheckMatrix.values[pcmIdx])
// {
// cnVarIndices[eqIdx].push_back(varIdx);
// vnEqIndices[varIdx].push_back(eqIdx);
// }
// }
// }
// // copy data into provided array containers
// auto index = 0;
// for (auto i = 0; i<cnVarIndices.size(); ++i)
// {
// for(auto j=0; j<cnVarIndices[0].size(); ++j)
// {
// checkNodeVarIndices.values[index] = cnVarIndices[i][j];
// ++index;
// }
// }
// index = 0;
// for (auto i = 0; i<vnEqIndices.size(); ++i)
// {
// for (auto j = 0; j<vnEqIndices[0].size(); ++j)
// {
// varNodeEqIndices.values[index] = vnEqIndices[i][j];
// ++index;
// }
// }
//}
//
//void QC_LDPC_CSS::WriteToFile(IntArray2d_h vec, const char* str)
//{
// std::ofstream file;
// file.open(str, std::ios::app);
// if (file.is_open()) {
// std::cout << "Writing to file " << str << std::endl;
// for (auto i = 0; i < vec.num_rows; ++i)
// {
// for (auto j = 0; j < vec.num_cols; ++j) {
// int index = i*vec.num_cols + j;
// auto v = vec.values[index];
// file << v << " ";
// }
// file << "\n";
// }
// file << "\n\n";
// file.close();
// }
// else
// {
// std::cout << "Failed to open file " << str << std::endl;
// }
//}
//
//void QC_LDPC_CSS::WriteToFile(cusp::array2d<float, cusp::host_memory, cusp::row_major> vec, const char* str)
//{
// std::ofstream file;
// file.open(str, std::ios::app);
// if (file.is_open()) {
// std::cout << "Writing to file " << str << std::endl;
// for (auto i = 0; i < vec.num_rows; ++i)
// {
// for (auto j = 0; j < vec.num_cols; ++j) {
// int index = i*vec.num_cols + j;
// auto v = vec.values[index];
// file << std::fixed << std::setprecision(3) << v << " ";
// }
// file << "\n";
// }
// file << "\n\n";
// file.close();
// }
// else
// {
// std::cout << "Failed to open file " << str << std::endl;
// }
//}
//
//void QC_LDPC_CSS::WriteToFile(std::vector<std::vector<float>> vec, const char* str)
//{
// std::ofstream file;
// file.open(str, std::ios::app);
// if (file.is_open()) {
// std::cout << "Writing to file " << str << std::endl;
// for (auto i = 0; i < vec.size(); ++i)
// {
// for (auto j = 0; j < vec[i].size(); ++j) {
// auto v = vec[i][j];
// file << std::fixed << std::setprecision(3) << v << " ";
// }
// file << "\n";
// //file << v << ",";
// }
// file << "\n\n";
// file.close();
// }else
// {
// std::cout << "Failed to open file " << str << std::endl;
// }
//}
//
//void QC_LDPC_CSS::WriteToFile(std::vector<int> vec, const char* str)
//{
// std::ofstream file;
// file.open(str, std::ios::app);
// if (file.is_open()) {
// std::cout << "Writing to file " << str << std::endl;
// for (auto i = 0; i < vec.size(); ++i)
// {
// auto v = vec[i];
// file << std::fixed << std::setprecision(3) << v << " ";
// }
// file << "\n";
// file.close();
// }
// else
// {
// std::cout << "Failed to open file " << str << std::endl;
// }
//}
//
///*
//Given a set of x errors and z errors, this will attempt to decode the errors
//and will return a success / failure code.
//See paper for algorithm:
//We will use a Belief-propogation decoding scheme.
//*/
//QC_LDPC_CSS::ErrorCode QC_LDPC_CSS::DecodeCUDA(std::vector<int> syndromeX, std::vector<int> syndromeZ, float errorProbability,
// std::vector<int> &xErrors, std::vector<int> &zErrors, int maxIterations)
//{
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
//// float varUpdateKernelTime = 0;
//// float eqUpdateKernelTime = 0;
//// std::chrono::microseconds memCopyTime(0);
// std::chrono::microseconds checkConvergenceTime(0);
// std::chrono::microseconds updateTime(0);
// std::chrono::microseconds initTime(0);
// std::chrono::microseconds decodeTime(0);
// std::chrono::microseconds completeTime(0);
//
// auto begin = std::chrono::high_resolution_clock::now();
//
// // We will first decode xErrors and then zErrors
// // An NxM parity check matrix H can be viewed as a bipartite graph with
// // N symbol nodes and M parity check nodes. Each symbol node is connected
// // to ds parity-check nodes, and each parity-check node is connected to dc
// // symbol nodes.
// float p = 2.0f / 3.0f * errorProbability; // a priori probability for x/z OR y error
// float high = 0.99f;
// float low = 0.01f;
//
// // clear var node and check node arrays, and set syndrome arrays
// for (int i = 0; i < _varNodesX.num_entries; ++i) _varNodesX.values[i] = 0;
// for (int i = 0; i < _varNodesZ.num_entries; ++i) _varNodesZ.values[i] = 0;
// int numVarsPerEq = _eqNodeVarIndicesX.num_cols;
// for (int eqIdx = 0; eqIdx<_numEqsX; ++eqIdx)
// {
// for (int j = 0; j<numVarsPerEq; ++j)
// {
// int idx = eqIdx * numVarsPerEq + j;
// int varIdx = _eqNodeVarIndicesX.values[idx];
// int varNodeIdx = varIdx * _numEqsX + eqIdx;
// _varNodesX.values[varNodeIdx] = p;
// }
// }
// for (int eqIdx = 0; eqIdx<_numEqsZ; ++eqIdx)
// {
// for (int j = 0; j<_eqNodeVarIndicesZ.num_cols; ++j)
// {
// int idx = eqIdx * numVarsPerEq + j;
// int varIdx = _eqNodeVarIndicesZ.values[idx];
// int varNodeIdx = varIdx * _numEqsX + eqIdx;
// _varNodesZ.values[varNodeIdx] = p;
// }
// }
// for (int i = 0; i < _eqNodesX.num_entries; ++i) _eqNodesX.values[i] = 0.0f;
// for (int i = 0; i < _eqNodesZ.num_entries; ++i) _eqNodesZ.values[i] = 0.0f;
//
// // copy host data to device
// thrust::copy(_varNodesX.values.begin(), _varNodesX.values.end(), _varNodesX_d.values.begin());
// thrust::copy(_varNodesZ.values.begin(), _varNodesZ.values.end(), _varNodesZ_d.values.begin());
// thrust::copy(_eqNodesX.values.begin(), _eqNodesX.values.end(), _eqNodesX_d.values.begin());
// thrust::copy(_eqNodesZ.values.begin(), _eqNodesZ.values.end(), _checkNodesZ_d.values.begin());
// thrust::copy(syndromeX.begin(), syndromeX.end(), _syndromeX_d.begin());
// thrust::copy(syndromeZ.begin(), syndromeZ.end(), _syndromeZ_d.begin());
//
//
// auto N = maxIterations; // maximum number of iterations
//// bool xConverge = false;
//// bool zConverge = false;
//
// //dim3 eqNodeGridDimX(_numEqsX); // number of blocks.
// //dim3 eqNodeBlockDimX(_numEqsX,_numVarsPerEqX); // number of threads per block
//
// //dim3 eqNodeGridDimZ(_numEqsZ);
// //dim3 eqNodeBlockDimZ(_numEqsZ,_numVarsPerEqZ);
//
// //dim3 varNodeGridDimX(_numVars);
// //dim3 varNodeBlockDimX(_numEqsPerVarX);
// //auto varNodeMemSizeX = _numEqsPerVarX * sizeof(float);
//
// //dim3 varNodeGridDimZ(_numVars);
// //dim3 varNodeBlockDimZ(_numEqsPerVarZ);
// //auto varNodeMemSizeZ = _numEqsPerVarX * sizeof(float);
//
// auto finish = std::chrono::high_resolution_clock::now();
// auto duration = std::chrono::duration_cast<std::chrono::microseconds>(finish - begin);
// initTime += duration;
//
// begin = std::chrono::high_resolution_clock::now();
// // launch a single warp of 32 threads.
// beliefPropogation_kernel << <1, 1 >> > (_eqNodesX_d_ptr, _varNodesX_d_ptr, _eqNodeVarIndicesX_d_ptr, _varNodeEqIndicesX_d_ptr,
// _syndromeX_d_ptr, p, _numVars, _numEqsX, _numVarsPerEqX, _numEqsPerVarX, N);
//
// // launch a single warp of 32 threads.
// beliefPropogation_kernel << <1, 1 >> > (_eqNodesZ_d_ptr, _varNodesZ_d_ptr, _eqNodeVarIndicesZ_d_ptr, _varNodeEqIndicesZ_d_ptr,
// _syndromeZ_d_ptr, p, _numVars, _numEqsZ, _numVarsPerEqZ, _numEqsPerVarZ, N);
//
// cudaDeviceSynchronize();
//
// finish = std::chrono::high_resolution_clock::now();
// duration = std::chrono::duration_cast<std::chrono::microseconds>(finish - begin);
// decodeTime += duration;
//
// begin = std::chrono::high_resolution_clock::now();
//
// thrust::copy(_varNodesX_d.values.begin(), _varNodesX_d.values.end(), _varNodesX.values.begin());
// thrust::copy(_varNodesZ_d.values.begin(), _varNodesZ_d.values.end(), _varNodesZ.values.begin());
//
// // accumulate the error estimates into a single vector
// std::vector<int> finalEstimatesX(_varNodesX.num_rows, 0);
// std::vector<int> finalEstimatesZ(_varNodesZ.num_rows, 0);
//
// // check for correct error decoding
// ErrorCode code = SUCCESS;
// // check convergence errors
// for (auto varIdx = 0; varIdx < _varNodesX.num_rows; ++varIdx) {
// for (auto eqIdx = 0; eqIdx < _varNodesX.num_cols; ++eqIdx) {
// int index = varIdx * _varNodesX.num_cols + eqIdx;
// if (_varNodesX.values[index] >= 0.5f) // best guess of error
// {
// finalEstimatesX[varIdx] = 1;
// break;
// }
// }
// }
// for (auto varIdx = 0; varIdx < _varNodesZ.num_rows; ++varIdx) {
// for (auto eqIdx = 0; eqIdx < _varNodesZ.num_cols; ++eqIdx) {
// int index = varIdx * _varNodesZ.num_cols + eqIdx;
// if (_varNodesZ.values[index] >= 0.5f) // best guess of error
// {
// finalEstimatesZ[varIdx] = 1;
// break;
// }
// }
// }
// // check for convergence failure
// if (!CheckConvergence(_varNodesX, high, low)) {
// code = code | CONVERGENCE_FAIL_X;
// }
// if (!CheckConvergence(_varNodesZ, high, low)) {
// code = code | CONVERGENCE_FAIL_Z;
// }
// // check syndrome errors
// auto xS = GetXSyndrome(finalEstimatesX);
// if (!std::equal(syndromeX.begin(), syndromeX.end(), xS.begin())) { code = code | SYNDROME_FAIL_X; }
//
// auto zS = GetZSyndrome(finalEstimatesZ);
// if (!std::equal(syndromeZ.begin(), syndromeZ.end(), zS.begin())) { code = code | SYNDROME_FAIL_Z; }
//
// xErrors = finalEstimatesX;
// zErrors = finalEstimatesZ;
// finish = std::chrono::high_resolution_clock::now();
// duration = std::chrono::duration_cast<std::chrono::microseconds>(finish - begin);
// completeTime += duration;
//
//
//// std::cout << "VarNode update kernel execution time: " << varUpdateKernelTime * 1000 << " micro-seconds." << std::endl;
//// std::cout << "EqNode update kernel execution time: " << eqUpdateKernelTime * 1000 << " micro-seconds." << std::endl;
//// std::cout << "MemCopyTime: " << memCopyTime.count() << " micro-seconds." << std::endl;
//// std::cout << "Check convergence time: " << checkConvergenceTime.count() << " micro-seconds." << std::endl;
// std::cout << "Init time: " << initTime.count() << " micro-seconds." << std::endl;
// std::cout << "Decode time: " << decodeTime.count() << " micro-seconds." << std::endl;
// std::cout << "Complete time: " << completeTime.count() << " micro-seconds." << std::endl;
// std::cout << "Check Convergence time: " << checkConvergenceTime.count() << " micro-seconds." << std::endl;
// std::cout << "Update time: " << updateTime.count() << " micro-seconds." << std::endl;
//
//
// return code;
//}
//
///*
// Given a set of x errors and z errors, this will attempt to decode the errors
// and will return a success / failure code.
// See paper for algorithm:
// We will use a Belief-propogation decoding scheme.
//*/
//QC_LDPC_CSS::ErrorCode QC_LDPC_CSS::DecodeCPU(std::vector<int> syndromeX, std::vector<int> syndromeZ, float errorProbability,
// std::vector<int> &xErrors, std::vector<int> &zErrors, int maxIterations)
//{
// // We will first decode xErrors and then zErrors
// // An NxM parity check matrix H can be viewed as a bipartite graph with
// // N symbol nodes and M parity check nodes. Each symbol node is connected
// // to ds parity-check nodes, and each parity-check node is connected to dc
// // symbol nodes.
// float p = 2.0f / 3.0f * errorProbability; // a priori probability for x/z OR y error
// float high = 0.99f;
// float low = 0.01f;
// // array of probability estimates to send to each check node. there are _numEqsX variables, and _numVars check nodes
// /* std::vector<std::vector<float>> varNodeEstimatesX(_numEqsX, std::vector<float>(_numVars, p));
// std::vector<std::vector<float>> varNodeEstimatesZ(_numEqsZ, std::vector<float>(_numVars, p));*/
//
// // each var node has a list of estimates from each check node.
// std::vector<std::vector<float>> varNodeEstimatesX(_numVars, std::vector<float>(_numEqsX, p));
// std::vector<std::vector<float>> varNodeEstimatesZ(_numVars, std::vector<float>(_numEqsZ, p));
//
// // each check node has a list of beliefs for the value of each var node.
// std::vector<std::vector<float>> checkNodeBeliefsX(_numEqsX, std::vector<float>(_numVars));
// std::vector<std::vector<float>> checkNodeBeliefsZ(_numEqsZ, std::vector<float>(_numVars));
//
// //WriteToFile(varNodeEstimatesX, "results/xEstimates.txt");
// //WriteToFile(checkNodeBeliefsX, "results/xBeliefs.txt");
//
// auto N = maxIterations; // maximum number of iterations
// bool xConverge = false;
// bool zConverge = false;
// for (auto n = 0; n < N; n++)
// {
// if (xConverge && zConverge) break;
// if(!xConverge)
// {
// EqNodeUpdate(varNodeEstimatesX, checkNodeBeliefsX, _hHC_vec, syndromeX);
// VarNodeUpdate(varNodeEstimatesX, checkNodeBeliefsX, _hHC_vec, p, n == N - 1);
// //WriteToFile(varNodeEstimatesX, "results/xEstimates.txt");
// //WriteToFile(checkNodeBeliefsX, "results/xBeliefs.txt");
// if (n % 10 == 0)
// {
// xConverge = CheckConvergence(varNodeEstimatesX, high, low);
// }
// }
//
// if (!zConverge)
// {
// EqNodeUpdate(varNodeEstimatesZ, checkNodeBeliefsZ, _hHD_vec, syndromeZ);
// VarNodeUpdate(varNodeEstimatesZ, checkNodeBeliefsZ, _hHD_vec, p, n == N - 1);
// if (n % 10 == 0)
// {
// zConverge = CheckConvergence(varNodeEstimatesZ, high, low);
// }
// }
//
//
// }
// // accumulate the error estimates into a single vector
// std::vector<int> finalEstimatesX(varNodeEstimatesX.size(), 0);
// std::vector<int> finalEstimatesZ(varNodeEstimatesZ.size(), 0);
//
// // check for correct error decoding
// ErrorCode code = SUCCESS;
// // check convergence errors
// for (auto i = 0; i < varNodeEstimatesX.size(); ++i) {
// for (auto j = 0; j < varNodeEstimatesX[i].size(); ++j) {
// if (varNodeEstimatesX[i][j] != 0.0f) {
// if(varNodeEstimatesX[i][j] > high) finalEstimatesX[i] = 1;
// else if (varNodeEstimatesX[i][j] < low) finalEstimatesX[i] = 0;
// else {
// finalEstimatesX[i] = -1;
// code = code | CONVERGENCE_FAIL_X;
// }
// break;
// }
// }
// }
// for (auto i = 0; i < varNodeEstimatesZ.size(); ++i) {
// for (auto j = 0; j < varNodeEstimatesZ[i].size(); ++j) {
// if (varNodeEstimatesZ[i][j] != 0.0f) {
// if (varNodeEstimatesZ[i][j] > high) finalEstimatesZ[i] = 1;
// else if (varNodeEstimatesZ[i][j] < low) finalEstimatesZ[i] = 0;
// else {
// finalEstimatesZ[i] = -1;
// code = code | CONVERGENCE_FAIL_Z;
// }
// break;
// }
// }
// }
// // check syndrome errors
// if (code == SUCCESS) {
// auto xS = GetXSyndrome(finalEstimatesX);
// if (!std::equal(syndromeX.begin(), syndromeX.end(), xS.begin())) { code = code | SYNDROME_FAIL_X; }
//
// auto zS = GetZSyndrome(finalEstimatesZ);
// if (!std::equal(syndromeZ.begin(), syndromeZ.end(), zS.begin())) { code = code | SYNDROME_FAIL_Z; }
// }
//
// xErrors = finalEstimatesX;
// zErrors = finalEstimatesZ;
//
// return code;
//}
//
//QC_LDPC_CSS::ErrorCode QC_LDPC_CSS::DecodeCPU2(std::vector<int> xSyndrome, std::vector<int> zSyndrome, float errorProbability, std::vector<int>& xErrors, std::vector<int>& zErrors, int maxIterations)
//{
// // We will first decode xErrors and then zErrors
// // An NxM parity check matrix H can be viewed as a bipartite graph with
// // N symbol nodes and M parity check nodes. Each symbol node is connected
// // to ds parity-check nodes, and each parity-check node is connected to dc
// // symbol nodes.
// float p = 2.0f / 3.0f * errorProbability; // a priori probability for x/z OR y error
// float high = 0.99f;
// float low = 0.01f;
//
// // clear var node and check node arrays, and set syndrome arrays
// for (int i = 0; i < _varNodesX.num_entries; ++i) _varNodesX.values[i] = 0;
// for (int i = 0; i < _varNodesZ.num_entries; ++i) _varNodesZ.values[i] = 0;
// int numVarsPerEq = _eqNodeVarIndicesX.num_cols;
// for(int eqIdx=0; eqIdx<_numEqsX; ++eqIdx)
// {
// for(int j=0; j<numVarsPerEq; ++j)
// {
// int idx = eqIdx * numVarsPerEq + j;
// int varIdx = _eqNodeVarIndicesX.values[idx];
// int varNodeIdx = varIdx * _numEqsX + eqIdx;
// _varNodesX.values[varNodeIdx] = p;
// }
// }
// for (int eqIdx = 0; eqIdx<_numEqsZ; ++eqIdx)
// {
// for (int j = 0; j<_eqNodeVarIndicesZ.num_cols; ++j)
// {
// int idx = eqIdx * numVarsPerEq + j;
// int varIdx = _eqNodeVarIndicesZ.values[idx];
// int varNodeIdx = varIdx * _numEqsX + eqIdx;
// _varNodesZ.values[varNodeIdx] = p;
// }
// }
// for (int i = 0; i < _eqNodesX.num_entries; ++i) _eqNodesX.values[i] = 0.0f;
// for (int i = 0; i < _eqNodesZ.num_entries; ++i) _eqNodesZ.values[i] = 0.0f;
// for (int i = 0; i < xSyndrome.size(); ++i) _syndromeX_h[i] = xSyndrome[i];
// for (int i = 0; i < zSyndrome.size(); ++i) _syndromeZ_h[i] = zSyndrome[i];
//
// auto N = maxIterations; // maximum number of iterations
// bool xConverge = false;
// bool zConverge = false;
// //WriteToFile(_varNodesX, "results/varX_CPU.txt");
// //WriteToFile(_eqNodesX, "results/eqX_CPU.txt");
// for (auto n = 0; n < N; n++)
// {
// if (xConverge && zConverge) break;
// if (!xConverge)
// {
// EqNodeUpdate(_eqNodesX,_varNodesX,_eqNodeVarIndicesX, _syndromeX_h);
// VarNodeUpdate(_eqNodesX, _varNodesX, _varNodeEqIndicesX ,p, n == N - 1);
// //WriteToFile(_varNodesX, "results/varX_CPU.txt");
// //WriteToFile(_eqNodesX, "results/eqX_CPU.txt");
// if (n % 10 == 0)
// {
// xConverge = CheckConvergence(_varNodesX, high, low);
// }
// }
//
// if (!zConverge)
// {
// EqNodeUpdate(_eqNodesZ, _varNodesZ, _eqNodeVarIndicesZ, _syndromeZ_h);
// VarNodeUpdate(_eqNodesZ, _varNodesZ, _varNodeEqIndicesZ , p, n == N - 1);
// if (n % 10 == 0)
// {
// zConverge = CheckConvergence(_varNodesZ, high, low);
// }
// }
// }
// // accumulate the error estimates into a single vector
// std::vector<int> finalEstimatesX(_varNodesX.num_rows, 0);
// std::vector<int> finalEstimatesZ(_varNodesZ.num_rows, 0);
//
// // check for correct error decoding
// ErrorCode code = SUCCESS;
// // check convergence errors
// for (auto varIdx = 0; varIdx < _varNodesX.num_rows; ++varIdx) {
// for (auto eqIdx = 0; eqIdx < _varNodesX.num_cols; ++eqIdx) {
// int index = varIdx * _varNodesX.num_cols + eqIdx;
// if(_varNodesX.values[index] >= 0.5f) // best guess of error
// {
// finalEstimatesX[varIdx] = 1;
// break;
// }
// }
// }
// for (auto varIdx = 0; varIdx < _varNodesZ.num_rows; ++varIdx) {
// for (auto eqIdx = 0; eqIdx < _varNodesZ.num_cols; ++eqIdx) {
// int index = varIdx * _varNodesZ.num_cols + eqIdx;
// if (_varNodesZ.values[index] >= 0.5f) // best guess of error
// {
// finalEstimatesZ[varIdx] = 1;
// break;
// }
// }
// }
// // check for convergence failure
// if (!CheckConvergence(_varNodesX, high, low)) {
// code = code | CONVERGENCE_FAIL_X;
//// WriteToFile(_varNodesX, "results/convXCPU.txt");
// }
// if (!CheckConvergence(_varNodesZ, high, low)) code = code | CONVERGENCE_FAIL_Z;
// // check syndrome errors
// auto xS = GetXSyndrome(finalEstimatesX);
// if (!std::equal(xSyndrome.begin(), xSyndrome.end(), xS.begin())) { code = code | SYNDROME_FAIL_X; }
//
// auto zS = GetZSyndrome(finalEstimatesZ);
// if (!std::equal(zSyndrome.begin(), zSyndrome.end(), zS.begin())) { code = code | SYNDROME_FAIL_Z; }
//
// xErrors = finalEstimatesX;
// zErrors = finalEstimatesZ;
//
// return code;
//}
//
//void QC_LDPC_CSS::EqNodeUpdate(FloatArray2d_h &eqNodes, FloatArray2d_h varNodes, IntArray2d_h eqNodeVarIndices, IntArray1d_h syndrome)
//{
// // For a check node interested in variables a,b,c,d to estimate the updated probability for variable a
// // syndrome = 0: even # of errors -> pa' = pb(1-pc)(1-pd) + pc(1-pb)(1-pd) + pd(1-pb)(1-pc) + pb*pc*pd
// // = 0.5 * (1 - (1-2pb)(1-2pc)(1-2pd))
// // syndrome = 1: odd # of errors -> pa' = (1-pb)(1-pc)(1-pd) + pb*pc*(1-pd) + pb*(1-pc)*pd + (1-pb)*pc*pd
// // = 0.5 * (1 + (1-2pb)(1-2pc)(1-2pd))
// int numEqs = eqNodes.num_rows;
// int numVarsPerEq = eqNodeVarIndices.num_cols;
// int n = varNodes.num_rows;
// for (auto eqIdx = 0; eqIdx < numEqs; ++eqIdx) // loop over check nodes (parity equations)
// {
// int firstVarIdx = eqIdx*numVarsPerEq;
// // loop over variables to be updated for this check node
// for (auto i = 0; i < numVarsPerEq; ++i)
// {
// int index = firstVarIdx + i; // 1d array index to look up the variable index
// int varIdx = eqNodeVarIndices.values[index]; // variable index under investigation for this eq
// float product = 1.0f; // reset product
// // loop over all other variables in the equation, accumulate (1-2p) terms
// for (auto k = 0; k < numVarsPerEq; ++k)
// {
// if (k == i) continue; // skip the variable being updated
// int otherIndex = firstVarIdx + k; // 1d array index to look up the variable index
// int otherVarIdx = eqNodeVarIndices.values[otherIndex];
//
// // the index holding the estimate beinng used for this eq
// int varNodesIndex = otherVarIdx * numEqs + eqIdx;
// float value = varNodes.values[varNodesIndex]; // belief value for this variable and this eq
// product *= (1.0f - 2.0f*value);
// }
// int cnIdx = eqIdx * n + varIdx; // index for value within the check node array to update
// if (syndrome[eqIdx]) {
// eqNodes.values[cnIdx] = 0.5 * (1.0f + product); // syndrome = 1 -> odd parity
// }
// else {
// eqNodes.values[cnIdx] = 0.5f * (1.0f - product); // syndrome = 0 -> even parity
// }
// }
// }
// // WriteToFile(eqNodeBeliefs, "results/CheckNodeBeliefs.txt");
//}
//
//void QC_LDPC_CSS::EqNodeUpdate(std::vector<std::vector<float>>& varNodeEstimates,
// std::vector<std::vector<float>>& eqNodeBeliefs,
// std::vector<std::vector<int>> parityCheckMatrix,
// std::vector<int> syndrome)
//{
// // For a check node interested in variables a,b,c,d to estimate the updated probability for variable a
// // syndrome = 0: even # of errors -> pa' = pb(1-pc)(1-pd) + pc(1-pb)(1-pd) + pd(1-pb)(1-pc) + pb*pc*pd
// // = 0.5 * (1 - (1-2pb)(1-2pc)(1-2pd))
// // syndrome = 1: odd # of errors -> pa' = (1-pb)(1-pc)(1-pd) + pb*pc*(1-pd) + pb*(1-pc)*pd + (1-pb)*pc*pd
// // = 0.5 * (1 + (1-2pb)(1-2pc)(1-2pd))
// int numEqs = eqNodeBeliefs.size();
// int n = varNodeEstimates.size();
//
// for (auto eqIdx = 0; eqIdx < numEqs; ++eqIdx) // loop over check nodes (parity equations)
// {
// for (auto varIdx = 0; varIdx < n; ++varIdx) // loop over variables to be updated for this check node
// {
// eqNodeBeliefs[eqIdx][varIdx] = 0.0f; // not necessary, makes file output nicer.
// if (!parityCheckMatrix[eqIdx][varIdx]) continue; // if the parity check matrix is 0, the eq doesn't involve this var
// float product = 1.0f; // reset product
// for (auto otherVarIdx = 0; otherVarIdx < n; ++otherVarIdx) // loop over all other variables, accumulate (1-2p) terms
// {
// if (!parityCheckMatrix[eqIdx][otherVarIdx]) continue; // skip zeros
// if (otherVarIdx == varIdx) continue; // skip the variable being updated
// product *= (1.0f - 2.0f*varNodeEstimates[otherVarIdx][eqIdx]);
// }
// if(syndrome[eqIdx]) eqNodeBeliefs[eqIdx][varIdx] = 0.5 * (1.0f + product); // syndrome = 1 -> odd parity
// else eqNodeBeliefs[eqIdx][varIdx] = 0.5f * (1.0f - product); // syndrome = 0 -> even parity
// }
// }
//// WriteToFile(eqNodeBeliefs, "results/CheckNodeBeliefs.txt");
//}
//
//void QC_LDPC_CSS::VarNodeUpdate(FloatArray2d_h eqNodes, FloatArray2d_h& varNodes, IntArray2d_h varNodeEqIndices, float errorProbability, bool last)
//{
// // For a variable node connected to check nodes 1,2,3,4 use the following formula to send an estimate to var node 1
// // p1' = K*pch*p2*p3*p4 (pch is the channel error probability. ignore the estimate received from check node 1 unless last)
// // where K = 1/[(1-pch)(1-p2)(1-p3)(1-p4)... + pch*p2*p3*p4...]
// int numEqs = eqNodes.num_rows;
// int n = varNodes.num_rows;
// int numEqsPerVar = varNodeEqIndices.num_cols;
//
// for (auto varIdx = 0; varIdx < n; ++varIdx) // loop over all variables
// {
// int firstVarNode = varIdx * numEqs; // start of entries in VarNodes array for this variable
// int firstEqIndices = varIdx * numEqsPerVar; // starting point for first equation in the index list for this var.
// for (auto j = 0; j < numEqsPerVar; ++j) // loop over all equations for this variable
// {
// // find the index of the equation estimate being updated
// int index = firstEqIndices + j;
// int eqIdx = varNodeEqIndices.values[index];
//
// // 1d index for var nodes entry being updated
// int varNodesIdx = firstVarNode + eqIdx;
//
// // start with a priori channel error probability
// float prodP = errorProbability;
// float prodOneMinusP = 1.0f - errorProbability;
//
// // calculate the updated probability for this check node based on belief estimates of all OTHER check nodes
// for (auto k = 0; k < numEqsPerVar; ++k)
// {
// int index2 = firstEqIndices + k; // 1d index for entry in the index array
// int otherEQIdx = varNodeEqIndices.values[index2];
//
// if (otherEQIdx == eqIdx && !last) continue;
// // 1d index for check nodes belief being used
// int checkNodesIdx = otherEQIdx * n + varIdx;
// float p = eqNodes.values[checkNodesIdx];
//
// prodOneMinusP *= (1.0f - p);
// prodP *= p;
// }
// float value = prodP / (prodOneMinusP + prodP);
// varNodes.values[varNodesIdx] = value;
// }
// }
//}
//
//void QC_LDPC_CSS::VarNodeUpdate(std::vector<std::vector<float>>& varNodeEstimates,
// std::vector<std::vector<float>>& eqNodeBeliefs,
// std::vector<std::vector<int>> parityCheckMatrix,
// float errorProbability, bool last)
//{
// // For a variable node connected to check nodes 1,2,3,4 use the following formula to send an estimated probability to node 1
// // p1' = K*pch*p2*p3*p4 (pch is the channel error probability. ignore the estimate received from check node 1)
// // where K = 1/[(1-p1)(1-p2)(1-p3)... + p1*p2*p3...]
// int numEqs = eqNodeBeliefs.size();
// int n = varNodeEstimates.size();
// for (auto varIdx = 0; varIdx < n; ++varIdx) // loop over all variables
// {
// for (auto eqIdx = 0; eqIdx < numEqs; ++eqIdx) // loop over all equations
// {
// varNodeEstimates[varIdx][eqIdx] = 0.0f; // not necessary, makes output nicer
// if (!parityCheckMatrix[eqIdx][varIdx]) continue; // skip equations that this variable isn't involved in
//
// float prodP = errorProbability; // start with a priori channel error probability
// float prodOneMinusP = 1.0f - errorProbability;
// // calculate the updated probability for this check node based on belief estimates of all OTHER check nodes
// for (auto otherEqIdx = 0; otherEqIdx < numEqs; ++otherEqIdx) // loop over all equation estimates
// {
// if (otherEqIdx == eqIdx && !last) continue; // skip the belief estimate from j to update the probability sent to j
// if (!parityCheckMatrix[otherEqIdx][varIdx]) continue; // skip equations that this variable isn't involved in
// float p = eqNodeBeliefs[otherEqIdx][varIdx];
//
// prodOneMinusP *= (1.0f - p);
// prodP *= p;
// }
// float value = prodP / (prodOneMinusP + prodP);
//// std::cout << "Setting var: " << i << " eq: " << j << " value: " << value << std::endl;
// varNodeEstimates[varIdx][eqIdx] = value;
// }
// }
//// WriteToFile(varNodeEstimates, "results/VariableNodeEstimates.txt");
//}
//
//std::vector<int> QC_LDPC_CSS::GetXSyndrome(std::vector<int> xErrors)
//{
// std::vector<int> syndrome(_numEqsX);
// for (int row = 0; row < _numEqsX; ++row)
// {
// auto x = 0;
// for (int col = 0; col < _numVars; ++col)
// {
// x += _hHC_vec[row][col] * xErrors[col];
// }
// syndrome[row] = x % 2;
// }
// return syndrome;
//}
//
//std::vector<int> QC_LDPC_CSS::GetZSyndrome(std::vector<int> zErrors)
//{
// std::vector<int> syndrome(_numEqsX);
// for (int row = 0; row < _numEqsX; ++row)
// {
// auto x = 0;
// for (int col = 0; col < _numVars; ++col)
// {
// x += _hHD_vec[row][col] * zErrors[col];
// }
// syndrome[row] = x % 2;
// }
// return syndrome;
//}
//
//void QC_LDPC_CSS::InitVarNodesArray(FloatArray2d_h& varNodes_h, FloatArray2d_d& varNodes_d, const IntArray2d_h& parityCheckMatrix, const int NUM_CONCURRENT_THREADS, float errorProbability)
//{
// int n = parityCheckMatrix.num_cols;
// int numEqs = parityCheckMatrix.num_rows;
// int size = n*numEqs;
// for (int varIdx = 0; varIdx < n; ++varIdx)
// {
// for (int eqIdx = 0; eqIdx < numEqs; ++eqIdx)
// {
// int pcmIdx = eqIdx * n + varIdx;
// for (int n = 0; n<NUM_CONCURRENT_THREADS; ++n)
// {
// if (!parityCheckMatrix.values[pcmIdx]) continue;
// int varNodesIdx = n*size + varIdx*numEqs + eqIdx;
// varNodes_h.values[varNodesIdx] = errorProbability;
// }
// }
// }
// thrust::copy(varNodes_h.values.begin(), varNodes_h.values.end(), varNodes_d.values.begin());
//}
//
//void QC_LDPC_CSS::SetDeviceSyndrome(const std::vector<int>& syndrome_h, const IntArray2d_d& syndrome_d)
//{
// for(int i=0; i<syndrome_h.size(); ++i)
// {
// syndrome_d.values[i] = syndrome_h[i];
// }
//}
//
//QC_LDPC_CSS::Statistics QC_LDPC_CSS::GetStatistics(int errorWeight, int numErrors, float errorProbability, int maxIterations)
//{
// //float p = 2 / 3 * errorProbability;
// //const int NUM_CONCURRENT_THREADS = 32;
// //std::vector<int> xErrors(_numVars, 0);
// //std::vector<int> zErrors(_numVars, 0);
//
// //// set up host and device memory for calculations
// //IntArray2d_h xSyndromeArray_h(NUM_CONCURRENT_THREADS,_numEqsX);
// //IntArray2d_d xSyndromeArray_d(NUM_CONCURRENT_THREADS,_numEqsX);
// //int* xSyndrome_d_ptrs[NUM_CONCURRENT_THREADS];
// //for (int i = 0; i < NUM_CONCURRENT_THREADS; ++i)
// // xSyndrome_d_ptrs[i] = thrust::raw_pointer_cast(&xSyndromeArray_d.values[i*_numEqsX]);
//
// //IntArray2d_h zSyndromeArray_h(NUM_CONCURRENT_THREADS,_numEqsZ);
// //IntArray2d_d zSyndromeArray_d(NUM_CONCURRENT_THREADS,_numEqsZ);
// //int* zSyndrome_d_ptrs[NUM_CONCURRENT_THREADS];
// //for (int i = 0; i < NUM_CONCURRENT_THREADS; ++i)
// // xSyndrome_d_ptrs[i] = thrust::raw_pointer_cast(&xSyndromeArray_d.values[i*_numEqsX]);
//
// //int size = _numVars * _numEqsX;
// //FloatArray2d_h varNodesX_h(NUM_CONCURRENT_THREADS, size,0.0f);
// //FloatArray2d_d varNodesX_d(NUM_CONCURRENT_THREADS, size, 0.0f);
// //float* varNodesX_d_ptrs[NUM_CONCURRENT_THREADS];
// //for (int i = 0; i < NUM_CONCURRENT_THREADS; ++i)
// // varNodesX_d_ptrs[i] = thrust::raw_pointer_cast(&varNodesX_d.values[i*size]);
//
// //FloatArray2d_h eqNodesX_h(NUM_CONCURRENT_THREADS, size, 0.0f);
// //FloatArray2d_d eqNodesX_d(NUM_CONCURRENT_THREADS, size, 0.0f);
// //float* eqNodesX_d_ptrs[NUM_CONCURRENT_THREADS];
// //for (int i = 0; i < NUM_CONCURRENT_THREADS; ++i)
// // eqNodesX_d_ptrs[i] = thrust::raw_pointer_cast(&eqNodesX_d.values[i*size]);
//
// //size = _numVars * _numEqsZ;
// //FloatArray2d_h varNodesZ_h(NUM_CONCURRENT_THREADS, size,0.0f);
// //FloatArray2d_d varNodesZ_d(NUM_CONCURRENT_THREADS, size, 0.0f);
// //float* varNodesZ_d_ptrs[NUM_CONCURRENT_THREADS];
// //for (int i = 0; i < NUM_CONCURRENT_THREADS; ++i)
// // varNodesZ_d_ptrs[i] = thrust::raw_pointer_cast(&varNodesZ_d.values[i*size]);
// //
// //FloatArray2d_h eqNodesZ_h(NUM_CONCURRENT_THREADS, size, 0.0f);
// //FloatArray2d_d eqNodesZ_d(NUM_CONCURRENT_THREADS, size, 0.0f);
// //float* eqNodesZ_d_ptrs[NUM_CONCURRENT_THREADS];
// //for (int i = 0; i < NUM_CONCURRENT_THREADS; ++i)
// // eqNodesZ_d_ptrs[i] = thrust::raw_pointer_cast(&eqNodesZ_d.values[i*size]);
//
// //for (int i = 0; i < numErrors; ++i) {
// // InitVarNodesArray(varNodesX_h, varNodesX_d, _pcmX_h, NUM_CONCURRENT_THREADS, p);
// // InitVarNodesArray(varNodesZ_h, varNodesZ_d, _pcmZ_h, NUM_CONCURRENT_THREADS, p);
// // for (int j = 0; j < NUM_CONCURRENT_THREADS; ++j) {
// // _errorGenerator.GenerateError(xErrors, zErrors, errorWeight);
// // SetDeviceSyndrome(GetXSyndrome(xErrors), xSyndromeArray_d);
// // SetDeviceSyndrome(GetZSyndrome(zErrors), zSyndromeArray_d);
// // }
// //}
// return Statistics();
//}
//
//bool QC_LDPC_CSS::CheckConvergence(const std::vector<std::vector<float>>& estimates, float high, float low)
//{
// // loop over all estimates
// for (auto i = 0; i < estimates.size(); ++i) {
// for (auto j = 0; j < estimates[i].size(); ++j) {
// if (estimates[i][j] != 0.0f) {
// // if any estimate is between the bounds we have failed to converge
// if (estimates[i][j] > low && estimates[i][j] < high) return false;
// }
// }
// }
// return true;
//}
//
//bool QC_LDPC_CSS::CheckConvergence(const cusp::array2d<float,cusp::host_memory,cusp::row_major>& estimates, float high, float low)
//{
// // loop over all estimates
// for (auto i = 0; i < estimates.num_rows; ++i) {
// for (auto j = 0; j < estimates.num_cols; ++j) {
// int index = i * estimates.num_cols + j;
// if (estimates.values[index] != 0.0f) {
// // if any estimate is between the bounds we have failed to converge
// if (estimates.values[index] > low && estimates.values[index] < high) return false;
// }
// }
// }
// return true;
//} |
14,027 | #include "scheduler.cuh"
__device__ void sched::FIFO::schedule(int time, List<Processor *> processors, List<job*> jobs, int no_of_processors)
{
for (int i = 0; i<jobs.size(); i++)
{
_queue.push(jobs.pop());
}
for (int i = 0; i<no_of_processors; i++)
{
if (processors[i]->is_idle())
{
if (!_queue.isEmpty())
{
job *jb = _queue.pop();
processors[i]->load(jb);
}
}
}
}
__device__ void sched::FIFO::run()
{
//schedule();
} |
14,028 | #include "includes.h"
__global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= N) return;
int in_index = i;
int in_w = i%w;
i = i / w;
int in_h = i%h;
i = i / h;
int in_c = i%c;
i = i / c;
int b = i%batch;
int out_c = c / (stride*stride);
int c2 = in_c % out_c;
int offset = in_c / out_c;
int w2 = in_w*stride + offset % stride;
int h2 = in_h*stride + offset / stride;
int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
out[in_index] = x[out_index];
} |
14,029 | #include <stdio.h>
#include <cuda_runtime.h>
#include <math.h>
static double a = 1.0E-10;
void FillMatrix(double *matrixA, double *matrixB, int size);
__global__ void Multiply(double* A, double* B, double* C, int N)
{
double result; // Acumula la suma del renglon por la columna
int index; // Indice del vector
int ix; // Indica el renglon
int iy; // Toma valores solo entre 0 a N-1
int k; // Iterador
index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < N * N)
{
ix = index / N;
iy = index % N;
result = 0.0;
for(k = 0; k < N; k++)
result += A[k + N * ix] * B[k * N + iy ];
C[iy + N * ix] = result;
}
}
__global__ void AddMatrix(double* C, int N, double* result)
{
long i;
long j;
for(i = 0; i < N; i++)
for(j = 0; j < N; j++)
*result += C[i + N * j];
}
int main(int argc, char *argv[])
{
//Variables
int N; // Tamaño de la matriz cuadrada.
size_t size; // Tamaño total en memoria.
size_t sizeDouble; // Tamaño en memoria del tipo doble.
double* h_matrixA; // Matriz A en el equipo.
double* h_matrixB; // Matriz B en el equipo.
double* h_matrixC; // Matriz C (resultado) en el equipo.
double* d_matrixA; // Matriz A en la memoria de la GPU.
double* d_matrixB; // Matriz B en la memoria de la GPU.
double* d_matrixC; // Matriz C (resultado) en la memoria de la GPU.
double* h_result; // Sumatoria de los valores de la multiplicacion de matrices en el equipo.
double* d_result; // Sumatoria de los valores de la multiplicacion de matrices en la GPU.
int Tam; // Numero de datos que se manejan
int threads; // Hilos por bloque
int blocks; // Numero de bloques necesario para procesar los datos
double estimation;
double error;
// Verifica si tiene los argumentos necesarios para inicializa el tamaño de las matrices
if (argc < 2)
{
printf("Falta el argumento del tamaño\n");
return -1;
}
// Asigna el valor del primer argumento a la variable de tamaño
sscanf(argv[1], "%d", &N);
// Establece el tamaño total de la matriz en memoria
size = N * sizeof(double) * N;
// Establecec el tamaño del tipo de dato double
sizeDouble = sizeof(double);
// Asigna el numero de hilos y calcula el numero de bloques
Tam = N * N;
cudaDeviceGetAttribute(&threads, cudaDevAttrMaxThreadsPerBlock, 0);
blocks = Tam / threads;
if(Tam % threads > 0) //Si sobran datos, aumenta los bloques en 1
blocks++;
//En la memoria del equipo
h_matrixA = (double*)malloc(size);
h_matrixB = (double*)malloc(size);
h_matrixC = (double*)malloc(size);
h_result = (double*)malloc(sizeDouble);
//En la memoria de la GPU
cudaMalloc(&d_matrixA, size);
cudaMalloc(&d_matrixB, size);
cudaMalloc(&d_matrixC, size);
cudaMalloc(&d_result, sizeDouble);
// Llena las matrices h_matrixA y h_matrixB
FillMatrix(h_matrixA, h_matrixB, N);
// Copia los arreglos de memoria del CPU a memoria de la GPU
cudaMemcpy(d_matrixA, h_matrixA, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_matrixB, h_matrixB, size, cudaMemcpyHostToDevice);
// Mandar llamar la multiplicacion de matrices.
Multiply<<<blocks, threads >>>(d_matrixA, d_matrixB, d_matrixC, N);
// Inicializa la variable d_result con 0.0 y lo copia a la memoria de la GPU
*h_result = 0.0;
cudaMemcpy(d_result, h_result, sizeDouble, cudaMemcpyHostToDevice);
// Suma los valores de la multiplicacion de matrices
AddMatrix<<<1, 1>>>(d_matrixC, N, d_result);
//Copia el resultado de la suma de los elementos de la matriz en la memoria
cudaMemcpy(h_result, d_result, sizeDouble, cudaMemcpyDeviceToHost);
// Calculo estimado con la formula a^2*N^3.
estimation = pow(N, 3) * pow(a, 2);
// Calcula el % de error.
error = fabs(*h_result - estimation) / estimation * 100.0;
// Imprime el % de error.
printf("Error %.15le N = %d\n", error, N);
// Libera espacio del equipo
cudaFree(d_matrixA);
cudaFree(d_matrixB);
cudaFree(d_matrixC);
cudaFree(d_result);
// Libera espacio de la tarjeta de video
free(h_matrixA);
free(h_matrixB);
free(h_matrixC);
free(h_result);
}
void FillMatrix(double *matrixA, double *matrixB, int N)
{
int i;
int j;
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
{
matrixA[(i * N) + j] = a;
matrixB[(i * N) + j] = a;
}
} |
14,030 | #include <cuda.h>
#include "kernels.cu"
#include <stdio.h>
int forward_step_wrap(float *weight, float *bias, float *a, float *a_new, unsigned int columns, unsigned int rows) {
float *weight_D, *bias_D, *a_D, *res1_D;
if (cudaMalloc((void **)&weight_D, columns*rows*sizeof(float)) != cudaSuccess) {
return -1;
}
if (cudaMalloc((void **)&bias_D, rows*sizeof(float)) != cudaSuccess) {
return -2;
}
if (cudaMalloc((void **)&a_D, ((columns>rows)*columns + (rows>=columns)*rows)*sizeof(float)) != cudaSuccess) {
return -3;
}
if (cudaMalloc((void **)&res1_D, 64*rows*sizeof(float)) != cudaSuccess) {
return -4;
}
dim3 block(32, 32, 1);
dim3 grid(8, 8, rows);
cudaMemcpy(weight_D, weight, columns*rows*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(a_D, a, columns*sizeof(float), cudaMemcpyHostToDevice);
forward_step1<<<grid, block>>>(weight_D, a_D, res1_D, columns);
cudaMemcpy(bias_D, bias, rows*sizeof(float), cudaMemcpyHostToDevice);
block.x = 8;
block.y = 8;
grid.x = 1;
grid.y = 1;
forward_step2<<<grid, block>>>(res1_D, bias_D, a_D);
cudaMemcpy(a_new, a_D, rows*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(weight_D);
cudaFree(bias_D);
cudaFree(a_D);
cudaFree(res1_D);
return 0;
}
int output_error_wrap(float *aL, float *y, float *deltaL, unsigned int output_size) {
float *aL_D, *y_D, *deltaL_D;
if (cudaMalloc((void **)&aL_D, output_size*sizeof(float)) != cudaSuccess) {
return -1;
}
if (cudaMalloc((void **)&y_D, output_size*sizeof(float)) != cudaSuccess) {
return -2;
}
if (cudaMalloc((void **)&deltaL_D, output_size*sizeof(float)) != cudaSuccess) {
return -3;
}
dim3 block(1, 1, 1);
dim3 grid(1, 1, output_size);
cudaMemcpy(aL_D, aL, output_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(y_D, y, output_size*sizeof(float), cudaMemcpyHostToDevice);
output_error<<<grid, block>>>(aL_D, y_D, deltaL_D);
cudaMemcpy(deltaL, deltaL_D, output_size*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(aL_D);
cudaFree(y_D);
cudaFree(deltaL_D);
return 0;
}
int backward_step_wrap(float *weight, float *a, float *delta, float *delta_new, unsigned int columns, unsigned int rows) {
float *weight_D, *a_D, *delta_D, *res1_D;
if (cudaMalloc((void **)&weight_D, columns*rows*sizeof(float)) != cudaSuccess) {
return -1;
}
if (cudaMalloc((void **)&a_D, columns*sizeof(float)) != cudaSuccess) {
return -2;
}
if (cudaMalloc((void **)&delta_D, ((columns>rows)*columns + (rows>=columns)*rows)*sizeof(float)) != cudaSuccess) {
return -3;
}
if (cudaMalloc((void **)&res1_D, 64*columns*sizeof(float)) != cudaSuccess) {
return -4;
}
dim3 block(32, 32, 1);
dim3 grid(8, 8, columns);
cudaMemcpy(weight_D, weight, columns*rows*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(delta_D, delta, rows*sizeof(float), cudaMemcpyHostToDevice);
backward_step1<<<grid, block>>>(weight_D, delta_D, res1_D, columns, rows);
cudaMemcpy(a_D, a, columns*sizeof(float), cudaMemcpyHostToDevice);
block.x = 8;
block.y = 8;
grid.x = 1;
grid.y = 1;
backward_step2<<<grid, block>>>(res1_D, a_D, delta_D);
cudaMemcpy(delta_new, delta_D, columns*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(weight_D);
cudaFree(a_D);
cudaFree(delta_D);
cudaFree(res1_D);
return 0;
}
//changed needs to be tested
int sum_weights_wrap_test(float *weightGs, float *result1, unsigned int size, unsigned int samples) {
float *weightGs_D, *result_D;
unsigned int gridx = samples/1024+1;
if (cudaMalloc((void **)&weightGs_D, size*samples*sizeof(float)) != cudaSuccess) {
return -1;
}
if (cudaMalloc((void **)&result_D, size*gridx*sizeof(float)) != cudaSuccess) {
return -2;
}
dim3 block(32, 32, 1);
dim3 grid(gridx, size, 1);
cudaMemcpy(weightGs_D, weightGs, size*samples*sizeof(float), cudaMemcpyHostToDevice);
sum_of_1024<<<grid, block>>>(weightGs_D, result_D, size, samples);
grid.x = 1;
sum_of_1024<<<grid, block>>>(result_D, result_D, size, gridx);
cudaMemcpy(result1, result_D, size*gridx*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(weightGs_D);
cudaFree(result_D);
return 0;
}
int gradient_descent_wrap(float *w_or_b, float *wG_or_bG, unsigned int columns, unsigned int rows, unsigned int samples, float heta) {
float *w_or_b_D, *wG_or_bG_D;
unsigned int size = columns*rows;
if (cudaMalloc((void **)&w_or_b_D, size*sizeof(float)) != cudaSuccess) {
return -1;
}
if (cudaMalloc((void **)&wG_or_bG_D, size*sizeof(float)) != cudaSuccess) {
return -2;
}
dim3 block(32, 32, 1);
dim3 grid(columns/32+1, rows/32+1, 1);
cudaMemcpy(w_or_b_D, w_or_b, size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(wG_or_bG_D, wG_or_bG, size*sizeof(float), cudaMemcpyHostToDevice);
grad_desc<<<grid, block>>>(w_or_b_D, wG_or_bG_D, size, samples, heta);
cudaMemcpy(w_or_b, w_or_b_D, size*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(w_or_b_D);
cudaFree(wG_or_bG_D);
return 0;
}
int weight_gradient_wrap(float *a, float *delta, float *weightG, /*size of a*/unsigned int columns, /*size of delta*/unsigned int rows) {
float *a_D, *delta_D, *weightG_D;
if (cudaMalloc((void **)&a_D, columns*sizeof(float)) != cudaSuccess) {
return -1;
}
if (cudaMalloc((void **)&delta_D, rows*sizeof(float)) != cudaSuccess) {
return -2;
}
if (cudaMalloc((void **)&weightG_D, columns*rows*sizeof(float)) != cudaSuccess) {
return -3;
}
dim3 block(32, 32, 1);
dim3 grid(columns/32+1, rows/32+1, 1);
cudaMemcpy(a_D, a, columns*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(delta_D, delta, rows*sizeof(float), cudaMemcpyHostToDevice);
weight_gradient<<<grid, block>>>(a_D, delta_D, weightG_D, columns, rows);
cudaError error = cudaGetLastError();
if (error != cudaSuccess) {
printf("Error: %s", cudaGetErrorString(error));
}
cudaMemcpy(weightG, weightG_D, columns*rows*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(a_D);
cudaFree(delta_D);
cudaFree(weightG_D);
return 0;
} |
14,031 | #include<iostream>
int main(void) {
cudaDeviceProp prop;
int dev;
cudaGetDevice( &dev );
std::cout << "ID of current CUDA device: " << dev << std::endl;
memset( &prop, 0, sizeof( cudaDeviceProp ) );
prop.major = 1;
prop.minor = 3;
cudaChooseDevice( &dev, &prop );
std::cout << "ID of CUDA device closest to revision 1.3: " << dev << std::endl;
cudaSetDevice( dev );
return 0;
}
|
14,032 | #include <stdint.h>
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
#include <sys/time.h>
#include <time.h>
#include <stdlib.h>
#include <sys/mman.h>
static void HandleError( cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define HANDLE_NULL( a ) {if (a == NULL) { \
printf( "Host memory failed in %s at line %d\n", \
__FILE__, __LINE__ ); \
exit( EXIT_FAILURE );}}
inline double diff_s(struct timeval start, struct timeval end)
{
return ((double) (end.tv_usec - start.tv_usec) / 1000000 + (double) (end.tv_sec - start.tv_sec));
}
__global__ void kernel(uint64_t *in, uint64_t *out)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
out[tid] = in[tid]+tid;
}
__global__ void nullKernel(int *memory)
{
}
void verify(uint64_t *in, uint64_t *out, int numBytes)
{
int error = 0;
for(int i =0; i<numBytes; i++)
if(out[i]!=in[i]+i)
error = 1;
if(error)
printf("ERROR in verification!\n");
else
printf("SUCCESS!\n");
}
void cpu_compute(uint64_t *in, uint64_t *out, int numBytes)
{
for(int i =0; i<numBytes; i++)
out[i]=in[i]+i;
}
int main( int argc, char *argv[] )
{
uint64_t *in, *out, *in_d, *out_d;
int ITERATIONS = 1;
int numBytes = 1024*1024;
struct timeval tv1, tv2;
int opt;
int benchmarkType = 0;
while ((opt = getopt(argc, argv, "m:b:i:")) != -1) {
switch (opt) {
case 'm':
numBytes = atoi(optarg);
//assert(numBytes%16 == 0 && numBytes<=1024);
break;
case 'b':
benchmarkType = atoi(optarg);
break;
case 'i':
ITERATIONS = atoi(optarg);
break;
default: /* '?' */
break;
}
}
int num_of_blocks=1;
int num_of_threads_per_block=numBytes;
if(numBytes>1024){
num_of_blocks = 1024;
num_of_threads_per_block = numBytes/1024;
}
HANDLE_ERROR(cudaFree(0));
switch (benchmarkType) {
case 0: {// default with data copy
in = (uint64_t *)malloc(sizeof(uint64_t)*numBytes);
out = (uint64_t *)malloc(sizeof(uint64_t)*numBytes);
assert(in);
assert(out);
HANDLE_ERROR( cudaMalloc( &in_d, sizeof(uint64_t)*numBytes) );
HANDLE_ERROR( cudaMalloc( &out_d, sizeof(uint64_t)*numBytes) );
for(int k=0;k< numBytes ;k++){
in[k]=1;
}
gettimeofday(&tv1, NULL);
for(int i=0; i<ITERATIONS; i++) {
cpu_compute(in, out, numBytes);
HANDLE_ERROR( cudaMemcpy(in_d,in, sizeof(uint64_t)*numBytes,cudaMemcpyDefault) );
kernel<<<num_of_blocks,num_of_threads_per_block>>>(in_d,out_d);
HANDLE_ERROR( cudaMemcpy(out,out_d, sizeof(uint64_t)*numBytes,cudaMemcpyDefault) );
}
verify(in,out,numBytes);
gettimeofday(&tv2, NULL);
HANDLE_ERROR( cudaGetLastError());
double elapsedTimeSeconds = diff_s(tv1,tv2);
printf("Default (including copy overhead) = %f ms\n",elapsedTimeSeconds*1e3/(float)ITERATIONS);
free(in);
free(out);
cudaFree(in_d);
cudaFree(out_d);
break;
}
case 1: {// cudaHostAlloc
HANDLE_ERROR( cudaHostAlloc( &in, sizeof(uint64_t)*numBytes,0) );
HANDLE_ERROR( cudaHostAlloc( &out, sizeof(uint64_t)*numBytes,0) );
for(int k=0;k< numBytes ;k++){
in[k]=1;
}
gettimeofday(&tv1, NULL);
for(int i=0; i<ITERATIONS; i++) {
cpu_compute(in, out, numBytes);
kernel<<<num_of_blocks,num_of_threads_per_block>>>(in,out);
// HANDLE_ERROR(cudaDeviceSynchronize());
}
verify(in,out,numBytes);
gettimeofday(&tv2, NULL);
HANDLE_ERROR( cudaGetLastError());
double elapsedTimeSeconds = diff_s(tv1,tv2);
printf("cudaHostAlloc = %f ms\n",elapsedTimeSeconds*1e3/(float)ITERATIONS);
cudaFreeHost(in);
cudaFreeHost(out);
break;
}
case 2: {// cudaMallocManaged
HANDLE_ERROR( cudaMallocManaged( &in, sizeof(uint64_t)*numBytes) );
HANDLE_ERROR( cudaMallocManaged( &out, sizeof(uint64_t)*numBytes) );
for(int k=0;k< numBytes ;k++){
in[k]=1;
}
gettimeofday(&tv1, NULL);
for(int i=0; i<ITERATIONS; i++) {
cpu_compute(in, out, numBytes);
kernel<<<num_of_blocks,num_of_threads_per_block>>>(in,out);
HANDLE_ERROR(cudaDeviceSynchronize());
}
verify(in,out,numBytes);
gettimeofday(&tv2, NULL);
HANDLE_ERROR( cudaGetLastError());
double elapsedTimeSeconds = diff_s(tv1,tv2);
printf("cudaMallocManaged = %f ms\n",elapsedTimeSeconds*1e3/(float)ITERATIONS);
cudaFree(in);
cudaFree(out);
break;
}
case 3: {// ideal (discarding overhead of data copy)
in = (uint64_t *)malloc(sizeof(uint64_t)*numBytes);
out = (uint64_t *)malloc(sizeof(uint64_t)*numBytes);
assert(in);
assert(out);
for(int k=0;k< numBytes ;k++){
in[k]=1;
}
HANDLE_ERROR( cudaMalloc( &in_d, sizeof(uint64_t)*numBytes) );
HANDLE_ERROR( cudaMalloc( &out_d, sizeof(uint64_t)*numBytes) );
HANDLE_ERROR( cudaMemcpy(in_d,in, sizeof(uint64_t)*numBytes,cudaMemcpyDefault) );
gettimeofday(&tv1, NULL);
for(int i=0; i<ITERATIONS; i++) {
cpu_compute(in, out, numBytes);
kernel<<<num_of_blocks,num_of_threads_per_block>>>(in_d,out_d);
}
gettimeofday(&tv2, NULL);
double temp_elapsedTimeSeconds = diff_s(tv1,tv2);
HANDLE_ERROR( cudaMemcpy(out,out_d, sizeof(uint64_t)*numBytes,cudaMemcpyDefault) );
gettimeofday(&tv1, NULL);
verify(in,out,numBytes);
gettimeofday(&tv2, NULL);
HANDLE_ERROR( cudaGetLastError());
double elapsedTimeSeconds = temp_elapsedTimeSeconds + diff_s(tv1,tv2);
printf("Ideal (excluding copy overhead) = %f ms\n",elapsedTimeSeconds*1e3/(float)ITERATIONS);
free(in);
free(out);
cudaFree(in_d);
cudaFree(out_d);
break;
}
}
cudaDeviceReset();
return 0;
}
|
14,033 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void errorKernel(float* Q, float* A, float* B, float* C, int n, int k){
int i = blockIdx.y * blockDim.y + threadIdx.y;
int t = blockIdx.x * blockDim.x + threadIdx.x;
int q = blockIdx.z * blockDim.z + threadIdx.z;
float sum = 0.0f;
for(int j=0;j<k;j++){
sum += A[i*k+j]*B[t*k+j]*C[q*k+j];
}
Q[n*n*q+n*i+t] = sum;
}
__global__ void factorAKernel ( float *T, float *Q, float *A, float *B, float *C, float *A_n, int n, int k)
{
//printf("");
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
float sum_n = 0.1f, sum_d = 0.1f;
float temp = 0.0f;
for(int t=0; t<n; t++){
for(int q=0; q<n; q++){
temp = B[t*k+j]*C[q*k+j];
/*
if(i==0 && j==0){
printf("(t,q)=(%d %d) num=%f den=%f\n",t,q,temp*(T[n*n*q+n*i+t]/ Q[n*n*q+n*i+t]),temp);
}
*/
// ugly fix
//if(Q[n*n*q+n*i+t] < 0.00000001)
// sum_n += temp;
//else
sum_n += temp*(T[n*n*q+n*i+t]/ Q[n*n*q+n*i+t]);
sum_d += temp;
}
}
A_n[i*k+j] = A[i*k+j]*(sum_n/sum_d);
}
__global__ void factorBKernel ( float *T, float *Q, float *A, float *B, float *C, float *B_n, int n, int k){
int t = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
float sum_n = 0.1f, sum_d = 0.1f;
float temp = 0.0f;
for(int i=0;i<n;i++){
for(int q=0;q<n;q++){
temp = A[i*k+j]*C[q*k+j];
/*
if(t==0 && j==0){
printf("(i,q)=(%d %d) num=%f den=%f\n",i,q,temp*(T[n*n*q+n*i+t]/ Q[n*n*q+n*i+t]),temp);
}
*/
//if(Q[n*n*q+n*i+t] < 0.00000001)
// sum_n += temp;
//else
sum_n += temp*(T[n*n*q+n*i+t]/ Q[n*n*q+n*i+t]);
sum_d += temp;
}
}
//if(t==0 && j==0) printf("res = %f * %f / %f\n",B[t*k+j],sum_n,sum_d);
B_n[t*k+j] = B[t*k+j]*(sum_n/sum_d);
}
__global__ void factorCKernel ( float *T, float *Q, float *A, float *B, float *C, float *C_n, int n, int k){
int q = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
float sum_n = 0.1f, sum_d = 0.1f;
float temp = 0.0f;
for(int t=0;t<n;t++){
for(int i=0;i<n;i++){
temp = A[i*k+j]*B[t*k+j];
//if(Q[n*n*q+n*i+t] < 0.00000001)
// sum_n += temp;
//else
sum_n += temp*T[n*n*q+n*i+t]/ Q[n*n*q+n*i+t];
sum_d += temp;
}
}
C_n[q*k+j] = C[q*k+j]*sum_n/sum_d;
}
float* buildTensor(float* A, float* B, float* C, int n, int k){
int size = n*n*n;
float* T = new float[size];
for(int i=0;i<n;i++){
for(int t=0;t<n;t++){
for(int q=0;q<n;q++){
T[n*n*q+n*i+t] = 0;
for(int j=0;j<k;j++){
T[n*n*q+n*i+t] += A[i*k+j]*B[t*k+j]*C[q*k+j];
}
}
}
}
return T;
}
float* buildTensorExample(){
float A[] = {1.0f,2.0f,3.0f,4.0f};
float B[] = {5.0f,6.0f,7.0f,8.0f};
float C[] = {9.0f,10.0f,11.0f,12.0f};
int n=2, k=2;
return buildTensor(A,B,C,n,k);
}
int main ( int argc, char * argv [] )
{
int n=2, k=2;
//int n,k;
//scanf("%d",&n);
//scanf("%d",&k);
int size = n*n*n;
int numBytesT = size * sizeof ( float );
int numBytesABC = (n*k) * sizeof(float);
float * A = new float [n*k];
float * B = new float [n*k];
float * C = new float [n*k];
for(int i=0;i<(n*k);i++){
A[i] = 1.0f;//(float)(rand()%10000) + 1.0f;
B[i] = 1.0f;//(float)(rand()%10000) + 1.0f;
C[i] = 1.0f;//(float)(rand()%10000) + 1.0f;
}
//float * T = new float [size];
//float *Q = new float[size];
/*
for ( int i = 0; i < size; i++ ){
T[i] = (float)(rand()%10000) +1.0f;
//Q[i] = T[i];//(float)(rand()%100);
//printf("%f ",T[i]);
}
*/
/*
float* A_n = new float[n*k];
float* B_n = new float[n*k];
float* C_n = new float[n*k];
for(int i=0;i<(n*k);i++){
A_n[i] = B_n[i] = C_n[i]= 0.0f;
}
*/
float *Q = buildTensor(A,B,C,n,k);
float *T = buildTensorExample();
float *T_c = NULL, *A_c = NULL, *B_c = NULL, *C_c = NULL, *Q_c = NULL, *A_n_c = NULL, *B_n_c = NULL, *C_n_c = NULL;
cudaMalloc ( (void**)&T_c, numBytesT );
cudaMalloc( (void**)&Q_c, numBytesT);
cudaMalloc ( (void**)&A_c, numBytesABC );
cudaMalloc ( (void**)&B_c, numBytesABC );
cudaMalloc ( (void**)&C_c, numBytesABC );
cudaMalloc ( (void**)&A_n_c, numBytesABC );
cudaMalloc ( (void**)&B_n_c, numBytesABC );
cudaMalloc ( (void**)&C_n_c, numBytesABC );
dim3 threads = dim3(k, n);
dim3 blocks = dim3(1, 1);
cudaEvent_t start, stop;
float gpuTime = 0.0f;
cudaEventCreate ( &start );
cudaEventCreate ( &stop );
cudaEventRecord ( start, 0 );
cudaDeviceSynchronize();
cudaMemcpy ( T_c, T, numBytesT, cudaMemcpyHostToDevice );
cudaMemcpy ( Q_c, Q, numBytesT, cudaMemcpyHostToDevice );
cudaMemcpy ( A_c, A, numBytesABC, cudaMemcpyHostToDevice );
cudaMemcpy ( B_c, B, numBytesABC, cudaMemcpyHostToDevice );
cudaMemcpy ( C_c, C, numBytesABC, cudaMemcpyHostToDevice );
/*
cudaDeviceSynchronize();
cudaMemcpy ( A_n_c, A_n, numBytesABC, cudaMemcpyHostToDevice );
cudaMemcpy ( B_n_c, B_n, numBytesABC, cudaMemcpyHostToDevice );
cudaMemcpy ( C_n_c, C_n, numBytesABC, cudaMemcpyHostToDevice );
*/
/*
cudaDeviceSynchronize();
factorAKernel<<<blocks, threads>>>(T_c,Q_c,A_c,B_c,C_c,A_n_c,n,k);
cudaDeviceSynchronize();
factorBKernel<<<blocks, threads>>>(T_c,Q_c,A_n_c,B_c,C_c,B_n_c,n,k);
cudaDeviceSynchronize();
factorCKernel<<<blocks, threads>>>(T_c,Q_c,A_n_c,B_n_c,C_c,C_n_c,n,k);
cudaDeviceSynchronize();
cudaMemcpy ( A, A_n_c, numBytesABC, cudaMemcpyDeviceToHost );
cudaMemcpy ( B, B_n_c, numBytesABC, cudaMemcpyDeviceToHost );
cudaMemcpy ( C, C_n_c, numBytesABC, cudaMemcpyDeviceToHost );
cudaDeviceSynchronize();
*/
bool flag = true;
for(int i=0;i<1;i++){
if(flag){
cudaDeviceSynchronize();
errorKernel<<<dim3(1,1,1),dim3(n,n,n)>>>(Q_c,A_c,B_c,C_c,n,k);
cudaDeviceSynchronize();
factorAKernel<<<blocks, threads>>>(T_c,Q_c,A_c,B_c,C_c,A_n_c,n,k);
cudaDeviceSynchronize();
errorKernel<<<dim3(1,1,1),dim3(n,n,n)>>>(Q_c,A_n_c,B_c,C_c,n,k);
cudaDeviceSynchronize();
factorBKernel<<<blocks, threads>>>(T_c,Q_c,A_n_c,B_c,C_c,B_n_c,n,k);
cudaDeviceSynchronize();
errorKernel<<<dim3(1,1,1),dim3(n,n,n)>>>(Q_c,A_n_c,B_n_c,C_c,n,k);
cudaDeviceSynchronize();
factorCKernel<<<blocks, threads>>>(T_c,Q_c,A_n_c,B_n_c,C_c,C_n_c,n,k);
cudaDeviceSynchronize();
}
else{
cudaDeviceSynchronize();
errorKernel<<<dim3(1,1,1),dim3(n,n,n)>>>(Q_c,A_n_c,B_n_c,C_n_c,n,k);
cudaDeviceSynchronize();
factorAKernel<<<blocks, threads>>>(T_c,Q_c,A_n_c,B_n_c,C_n_c,A_c,n,k);
cudaDeviceSynchronize();
cudaDeviceSynchronize();
errorKernel<<<dim3(1,1,1),dim3(n,n,n)>>>(Q_c,A_c,B_n_c,C_n_c,n,k);
factorBKernel<<<blocks, threads>>>(T_c,Q_c,A_c,B_n_c,C_n_c,B_c,n,k);
cudaDeviceSynchronize();
cudaDeviceSynchronize();
errorKernel<<<dim3(1,1,1),dim3(n,n,n)>>>(Q_c,A_c,B_c,C_n_c,n,k);
factorCKernel<<<blocks, threads>>>(T_c,Q_c,A_c,B_c,C_n_c,C_c,n,k);
cudaDeviceSynchronize();
}
flag = !flag;
}
cudaDeviceSynchronize();
if(flag == false){
cudaMemcpy ( A, A_n_c, numBytesABC, cudaMemcpyDeviceToHost );
cudaMemcpy ( B, B_n_c, numBytesABC, cudaMemcpyDeviceToHost );
cudaMemcpy ( C, C_n_c, numBytesABC, cudaMemcpyDeviceToHost );
}
else{
cudaMemcpy ( A, A_c, numBytesABC, cudaMemcpyDeviceToHost );
cudaMemcpy ( B, B_c, numBytesABC, cudaMemcpyDeviceToHost );
cudaMemcpy ( C, C_c, numBytesABC, cudaMemcpyDeviceToHost );
}
cudaMemcpy ( Q, Q_c, numBytesT, cudaMemcpyDeviceToHost );
cudaDeviceSynchronize();
cudaEventRecord ( stop, 0 );
cudaEventSynchronize ( stop );
cudaEventElapsedTime ( &gpuTime, start, stop );
printf("\ntime spent executing by the GPU: %.2f millseconds\n", gpuTime );
printf("Matrix A\n");
for ( int i = 0; i < (n*k); i++ ) printf ( "%f ", A[i] );
printf("\n");
printf("Matrix B\n");
for ( int i = 0; i < (n*k); i++ ) printf ( "%f ", B[i] );
printf("\n");
printf("Matrix C\n");
for ( int i = 0; i < (n*k); i++ ) printf ( "%f ", C[i] );
printf("\n");
printf("Tensor Q\n");
for(int i=0;i<(n*n*n);i++) printf("%f ", Q[i]);
printf("\n");
cudaEventDestroy ( start );
cudaEventDestroy ( stop );
cudaFree(T_c);
cudaFree(Q_c);
cudaFree(A_c);
cudaFree(B_c);
cudaFree(C_c);
cudaFree(A_n_c);
cudaFree(B_n_c);
cudaFree(C_n_c);
delete[] T;
delete[] A;
delete[] B;
delete[] C;
delete[] Q;
return 0;
} |
14,034 | #include <cstdio>
#include <iostream>
#include <limits>
#include <string>
using namespace std;
#define CSC(call) do { cudaError_t res = call; if (res != cudaSuccess) { fprintf(stderr, "CUDA Error in %s:%d: %s\n", __FILE__, __LINE__, cudaGetErrorString(res)); exit(0); } } while (0)
typedef long long ll;
const int BLOCK_SIZE = 512; // must be power of 2
const int GRID_SIZE = 32768;
const ll INF = numeric_limits<ll>::max();
__device__ void swap(ll *a, ll *b) {
ll tmp = *a;
*a = *b;
*b = tmp;
}
__global__ void oddEvenBlockSort(ll *arr, int len) {
int arrOffset = blockIdx.x * BLOCK_SIZE;
if (arrOffset >= len) {
return;
}
__shared__ ll block[BLOCK_SIZE];
int idx = threadIdx.x;
int sortIndx = 2 * idx;
for (int i = 0; i < 2; i++) {
block[sortIndx + i] = arr[arrOffset + sortIndx + i];
}
for (int k = 0; k < BLOCK_SIZE / 2; k++) {
__syncthreads();
if (sortIndx + 1 < BLOCK_SIZE) {
if (block[sortIndx] > block[sortIndx + 1]) {
swap(block + sortIndx, block + sortIndx + 1);
}
}
__syncthreads();
if (sortIndx + 2 < BLOCK_SIZE) {
if (block[sortIndx + 1] > block[sortIndx + 2]) {
swap(block + sortIndx + 1, block + sortIndx + 2);
}
}
}
__syncthreads();
for (int i = 0; i < 2; i++) {
arr[arrOffset + sortIndx + i] = block[sortIndx + i];
}
}
__global__ void bitonicMerge(ll *arr, int len, bool oddPhase) {
int arrOffset = blockIdx.x * BLOCK_SIZE * 2;
if (oddPhase) {
arrOffset += BLOCK_SIZE;
}
if (arrOffset + BLOCK_SIZE * 2 > len) {
return;
}
__shared__ ll block[BLOCK_SIZE * 2];
int idx = threadIdx.x;
int sortIndx = 2 * idx;
for (int i = 0; i < 2; i++) {
block[sortIndx + i] = arr[arrOffset + sortIndx + i];
}
__syncthreads();
if (idx < BLOCK_SIZE && block[idx] > block[BLOCK_SIZE * 2 - idx - 1]) {
swap(block + idx, block + BLOCK_SIZE * 2 - idx - 1);
}
int tmpIdx;
int step = BLOCK_SIZE / 2;
while (step != 0) {
__syncthreads();
if ((idx / step) % 2 == 0) {
tmpIdx = idx;
}
else {
tmpIdx = idx - step + BLOCK_SIZE;
}
if (block[tmpIdx] > block[tmpIdx + step]) {
swap(block + tmpIdx, block + tmpIdx + step);
}
step /= 2;
}
__syncthreads();
for (int i = 0; i < 2; i++) {
arr[arrOffset + sortIndx + i] = block[sortIndx + i];
}
}
int main() {
ios_base::sync_with_stdio(false);
int n;
fread(&n, sizeof(int), 1, stdin);
int len = n;
if (n % BLOCK_SIZE != 0) {
len += BLOCK_SIZE - n % BLOCK_SIZE;
}
ll *arr = new ll[len];
int elem;
for (int i = 0; i < n; i++) {
fread(&elem, sizeof(int), 1, stdin);
arr[i] = elem;
}
for (int i = n; i < len; i++) {
arr[i] = INF;
}
ll *devArr;
CSC(cudaMalloc(&devArr, sizeof(ll) * len));
CSC(cudaMemcpy(devArr, arr, sizeof(ll) * len, cudaMemcpyHostToDevice));
oddEvenBlockSort<<<GRID_SIZE, BLOCK_SIZE / 2>>>(devArr, len);
CSC(cudaGetLastError());
if (len > BLOCK_SIZE) {
for (int step = 0; step < len / BLOCK_SIZE; step++) {
bitonicMerge<<<GRID_SIZE, BLOCK_SIZE>>>(devArr, len, step & 1);
CSC(cudaGetLastError());
}
}
CSC(cudaMemcpy(arr, devArr, sizeof(ll) * len, cudaMemcpyDeviceToHost));
CSC(cudaFree(devArr));
for (int i = 0; i < n; i++) {
elem = (int)arr[i];
fwrite(&elem, sizeof(int), 1, stdout);
}
delete[] arr;
return 0;
}
|
14,035 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#include <string.h>
#include <unistd.h>
#include <sys/time.h>
#define BLOCK_SIZE 256
#define STR_SIZE 256
#define HALO 1 // halo width along one direction when advancing to the next iteration
#define CUDA_CALL_SAFE(f) \
do \
{ \
cudaError_t _cuda_error = f; \
if (_cuda_error != cudaSuccess) \
{ \
fprintf(stderr, \
"%s, %d, CUDA ERROR: %s %s\n", \
__FILE__, \
__LINE__, \
cudaGetErrorName(_cuda_error), \
cudaGetErrorString(_cuda_error) \
); \
abort(); \
exit(EXIT_FAILURE); \
} \
} while (0)
static inline double time_diff(struct timeval tv_start, struct timeval tv_end)
{
return (double)(tv_end.tv_sec - tv_start.tv_sec) * 1000.0 + (double)(tv_end.tv_usec - tv_start.tv_usec) / 1000.0;
}
void run(int argc, char** argv);
long rows, cols;
int *data;
int *result;
long pyramid_height = 1;
char *folder;
char *filepath;
FILE *fp;
struct timeval tv_start, tv_end;
double kernel_time = 0; // in ms
double writefile_time = 0; // in ms
double readfile_time = 0; // in ms
double d2h_memcpy_time = 0; // in ms
double h2d_memcpy_time = 0; // in ms
void init(int argc, char** argv)
{
if (argc == 3)
{
cols = atol(argv[1]);
rows = cols;
folder = argv[2];
}
else
{
printf("Usage: %s <rows/cols> <folder>\n", argv[0]);
exit(EXIT_FAILURE);
}
data = (int *)malloc(sizeof(int) * rows * cols);
if (!data)
{
fprintf(stderr, "Cannot allocate data.\n");
exit(EXIT_FAILURE);
}
result = (int *)malloc(sizeof(int) * cols);
if (!result)
{
fprintf(stderr, "Cannot allocate result.\n");
exit(EXIT_FAILURE);
}
filepath = (char *)malloc(sizeof(char) * (strlen(folder) + 128));
if (!filepath)
{
fprintf(stderr, "Cannot allocate filepath");
exit(EXIT_FAILURE);
}
gettimeofday(&tv_start, NULL);
sprintf(filepath, "%s/data.mem", folder);
if ((fp = fopen(filepath, "rb")) == 0)
{
fprintf(stderr, "%s was not opened\n", filepath);
exit(EXIT_FAILURE);
}
if (fread(data, sizeof(int) * rows * cols, 1, fp) != 1)
{
fprintf(stderr, "Cannot read from %s\n", filepath);
exit(EXIT_FAILURE);
}
fclose(fp);
gettimeofday(&tv_end, NULL);
readfile_time += time_diff(tv_start, tv_end);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void dynproc_kernel(
long iteration,
int *gpuWall,
int *gpuSrc,
int *gpuResults,
long cols,
long rows,
long startStep,
long border)
{
__shared__ int prev[BLOCK_SIZE];
__shared__ int result[BLOCK_SIZE];
long bx = (long)blockIdx.x;
long tx = (long)threadIdx.x;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
long small_block_cols = BLOCK_SIZE-iteration*HALO*2;
// calculate the boundary for the block according to
// the boundary of its small block
long blkX = small_block_cols*bx-border;
long blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
long xidx = blkX+tx;
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
long validXmin = (blkX < 0) ? -blkX : 0;
long validXmax = (blkXmax > cols-1) ? BLOCK_SIZE-1-(blkXmax-cols+1) : BLOCK_SIZE-1;
long W = tx-1;
long E = tx+1;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool isValid = IN_RANGE(tx, validXmin, validXmax);
if(IN_RANGE(xidx, 0, cols-1)){
prev[tx] = gpuSrc[xidx];
}
__syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012
bool computed;
for (long i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
isValid){
computed = true;
long left = prev[W];
long up = prev[tx];
long right = prev[E];
long shortest = MIN(left, up);
shortest = MIN(shortest, right);
long index = cols*(startStep+i)+xidx;
result[tx] = shortest + gpuWall[index];
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
prev[tx]= result[tx];
__syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
gpuResults[xidx]=result[tx];
}
}
/*
compute N time steps
*/
int calc_path(int *gpuWall, int *gpuResult[2], long rows, long cols, \
long pyramid_height, long blockCols, long borderCols)
{
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(blockCols);
int src = 1, dst = 0;
for (long t = 0; t < rows-1; t+=pyramid_height) {
int temp = src;
src = dst;
dst = temp;
dynproc_kernel<<<dimGrid, dimBlock>>>(
MIN(pyramid_height, rows-t-1),
gpuWall, gpuResult[src], gpuResult[dst],
cols,rows, t, borderCols);
CUDA_CALL_SAFE(cudaThreadSynchronize());
}
return dst;
}
int main(int argc, char** argv)
{
run(argc,argv);
return EXIT_SUCCESS;
}
void run(int argc, char** argv)
{
init(argc, argv);
/* --------------- pyramid parameters --------------- */
long borderCols = (pyramid_height)*HALO;
long smallBlockCol = BLOCK_SIZE-(pyramid_height)*HALO*2;
long blockCols = cols/smallBlockCol+((cols%smallBlockCol==0)?0:1);
printf("pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: %d\nblockGrid:[%d]\ntargetBlock:[%d]\n",\
pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols, smallBlockCol);
int *gpuWall, *gpuResult[2];
long size = rows*cols;
CUDA_CALL_SAFE(cudaMalloc((void**)&gpuResult[0], sizeof(int)*cols));
CUDA_CALL_SAFE(cudaMalloc((void**)&gpuResult[1], sizeof(int)*cols));
CUDA_CALL_SAFE(cudaMalloc((void**)&gpuWall, sizeof(int)*(size-cols)));
gettimeofday(&tv_start, NULL);
CUDA_CALL_SAFE(cudaMemcpy(gpuResult[0], data, sizeof(int)*cols, cudaMemcpyHostToDevice));
CUDA_CALL_SAFE(cudaMemcpy(gpuWall, data+cols, sizeof(int)*(size-cols), cudaMemcpyHostToDevice));
gettimeofday(&tv_end, NULL);
h2d_memcpy_time += time_diff(tv_start, tv_end);
gettimeofday(&tv_start, NULL);
int final_ret = calc_path(gpuWall, gpuResult, rows, cols, pyramid_height, blockCols, borderCols);
gettimeofday(&tv_end, NULL);
kernel_time += time_diff(tv_start, tv_end);
gettimeofday(&tv_start, NULL);
CUDA_CALL_SAFE(cudaMemcpy(result, gpuResult[final_ret], sizeof(int)*cols, cudaMemcpyDeviceToHost));
gettimeofday(&tv_end, NULL);
d2h_memcpy_time += time_diff(tv_start, tv_end);
gettimeofday(&tv_start, NULL);
sprintf(filepath, "%s/result.cudamemcpy.mem", folder);
if ((fp = fopen(filepath, "wb")) == 0)
{
fprintf(stderr, "%s was not opened\n", filepath);
exit(EXIT_FAILURE);
}
if (fwrite(result, sizeof(int) * cols, 1, fp) != 1)
{
fprintf(stderr, "Cannot write to %s\n", filepath);
exit(EXIT_FAILURE);
}
fflush(fp);
fsync(fileno(fp));
fclose(fp);
gettimeofday(&tv_end, NULL);
writefile_time += time_diff(tv_start, tv_end);
cudaFree(gpuWall);
cudaFree(gpuResult[0]);
cudaFree(gpuResult[1]);
free(data);
free(result);
free(filepath);
printf("==> header: kernel_time (ms),writefile_time (ms),d2h_memcpy_time (ms),readfile_time (ms),h2d_memcpy_time (ms)\n");
printf("==> data: %f,%f,%f,%f,%f\n", kernel_time, writefile_time, d2h_memcpy_time, readfile_time, h2d_memcpy_time);
}
|
14,036 | #include <cuda.h>
#include <algorithm>
__device__ int get_global_index(void) {
return blockIdx.x * blockDim.x + threadIdx.x;
}
__global__ void kernel(void) {
while(1);
}
int main(int argc, char **argv) {
int block_size = 128;
int grid_size = 1;
int gpu_num;
unsigned long int bytes = 8e9; // size of memory to occupy
float* data;
cudaGetDeviceCount(&gpu_num);
if (argc > 1) {
for (int i = 1; i < argc; i++) {
cudaSetDevice(atoi(argv[i]));
cudaMalloc((void**)&data, bytes);
kernel<<<grid_size, block_size>>>();
}
}
else {
for (int i = 0; i < gpu_num; i++) {
cudaSetDevice(i);
cudaMalloc((void**)&data, bytes);
kernel<<<grid_size, block_size>>>();
}
}
cudaDeviceSynchronize();
return 0;
}
|
14,037 | #include<stdio.h>
#define THREADS_PER_BLOCK 1024
#define MEDIUM_THREADS_PER_BLOCK 120
#define SMALL_THREADS_PER_BLOCK 8
__global__ void add(int *a, int *b, int *c, int n) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < n)
c[index] = a[index] + b[index];
}
void random_ints(int *a, int l, int c){
int i, j;
for (i = 0; i < l; ++i){
for (j = 0; j < c; ++j) {
a[i * c + j] = i + j;
}
}
}
int main(void) {
int *a, *b, *c, nl, nc;
int *d_a, *d_b, *d_c;
int n, blockSize, gridSize, i, size;
long long int sum;
scanf("%d %d", &nl, &nc);
n = nl * nc;
size = sizeof(int) * n;
//allocate space for device copies of a, b, c
cudaMalloc(&d_a, size);
cudaMalloc(&d_b, size);
cudaMalloc(&d_c, size);
//allocate space for host copies of a, b, c and setup input values
a = (int *) malloc(size); random_ints(a, nl, nc);
b = (int *) malloc(size); random_ints(b, nl, nc);
c = (int *) malloc(size);
//copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
if (n < THREADS_PER_BLOCK) {
if (n < MEDIUM_THREADS_PER_BLOCK) {
blockSize = SMALL_THREADS_PER_BLOCK;
if (n < blockSize) blockSize = 1;
} else {
blockSize = MEDIUM_THREADS_PER_BLOCK;
}
} else {
blockSize = THREADS_PER_BLOCK;
}
gridSize = (int) ceil((float)n/blockSize);
//Launch add() kernel on GPU with N threads
add<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
//copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
sum = 0;
for(i=0; i < n; i++) {
sum += c[i];
}
printf("%lli\n", sum);
//cleanup
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
14,038 | #include <iostream>
#define N 500
using namespace std;
__global__ void dot( int *a, int *b, int *c )
{
// Shared memory for results of multiplication
__shared__ int temp[N];
temp[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
__syncthreads();
// Thread 0 sums the pairwise products
if( 0 == threadIdx.x )
{
int sum = 0;
for( int i = 0; i < N; i++ )
sum += temp[i];
*c = sum;
}
}
int main()
{
int h_a[N], h_b[N], h_c;
int *d_a, *d_b, *d_c;
cudaMalloc( (void**) &d_a, N*sizeof(int) );
cudaMalloc( (void**) &d_b, N*sizeof(int) );
cudaMalloc( (void**) &d_c, sizeof(int) );
for (int i=0; i<N; i++)
{
h_a[i] = 1;
h_b[i] = i;
}
cudaMemcpy( d_a, h_a, N*sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( d_b, h_b, N*sizeof(int), cudaMemcpyHostToDevice );
dot<<< 1,N >>>(d_a, d_b, d_c);
cudaMemcpy( &h_c, d_c, sizeof(int), cudaMemcpyDeviceToHost );
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cout << h_c << endl;
return 0;
} |
14,039 | #include "includes.h"
//-----------------------------------------
// Autor: Farias
// Data : January 2012
// Goal : Image treatment
//-----------------------------------------
/***************************************************************************************************
Includes
***************************************************************************************************/
/***************************************************************************************************
Defines
***************************************************************************************************/
#define ELEM(i,j,DIMX_) (i+(j)*(DIMX_))
#define BLOCK_SIZE 16
/***************************************************************************************************
Functions
***************************************************************************************************/
using namespace std;
/**************************************************************************************************/
__global__ void filter2( int width, int height, unsigned char *src, unsigned char *dest ) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
int aux, idx;
__shared__ int pesos[3][3];
// Setando Pesos
pesos[0][0] = 0; pesos[0][1] = 2; pesos[0][2] = 0;
pesos[1][0] = 2; pesos[1][1] = 4; pesos[1][2] = 2;
pesos[2][0] = 0; pesos[2][1] = 2; pesos[2][2] = 0;
if(i > 0 && j > 0 && i < width - 1 && j < height - 1) {
for (int k = 0; k < 3; ++k)
{
aux = 0;
for (int lin = 0; lin < 3; lin++)
{
for (int col = 0; col < 3; col++){
idx = 3*ELEM( i + lin - 1, j + col - 1, width );
aux += pesos[lin][col]*src[ idx+k ];
}
}
aux /= 12;
idx = 3*ELEM( i, j , width );
dest[ idx+k ] = (unsigned char)aux;
}
}
} |
14,040 |
// This is not really C++-code but pretty plain C code, but we compile it
// as C++ so we can integrate with CUDA seamlessly.
// If you plan on submitting your solution for the Parallel Sorting Contest,
// please keep the split into main file and kernel file, so we can easily
// insert other data.
__device__ static void exchange(int *i, int *j)
{
int k;
k = *i;
*i = *j;
*j = k;
}
__global__ void bitonic_sort_gpu(int *data, int j, int k)
{
const int tid = threadIdx.x + blockDim.x * blockIdx.x;
int ixj = tid^j; // Calculate indexing!
if ((ixj) > tid)
{
if ((tid & k) == 0 && data[tid] > data[ixj])
exchange(&data[tid], &data[ixj]);
if ((tid & k) != 0 && data[tid] < data[ixj])
exchange(&data[tid], &data[ixj]);
}
}
// No, this is not GPU code yet but just a copy of the CPU code, but this
// is where I want to see your GPU code!
void bitonic_gpu(int *data, int N)
{
int* d_data;
cudaMalloc((void**)&d_data, sizeof(int) * N);
cudaMemcpy(d_data, data, sizeof(int) * N, cudaMemcpyHostToDevice);
dim3 gridDim(1, 1);
dim3 blockSize(N, 1);
uint j, k;
// Outer loop, double size for each step.
for (k = 2; k <= N; k = 2*k) {
// Inner loop, half size for each step
for (j = k >> 1; j > 0; j = j >> 1) {
bitonic_sort_gpu <<<gridDim, blockSize>>>(d_data, j, k);
cudaThreadSynchronize();
}
}
cudaMemcpy(data, d_data, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaFree(d_data);
}
|
14,041 | // nvcc can compile straight C
#include <stdio.h>
int main () {
printf("Hello World!\n");
}
|
14,042 | #include <iostream>
#include <cstdlib>
using namespace std;
void createMatrix(int* myMat,int row,int col)
{
for(int i=0;i<row;i++)
{
for(int j=0;j<col;j++)
{
myMat[i*col+j] = rand()%10;
}
}
}
void printMatrix(int* myMat,int row,int col)
{
for(int i=0;i<row;i++)
{
for(int j=0;j<col;j++)
{
cout<<myMat[i*col+j]<<" ";
}
cout<<endl;
}
}
__global__
void multiplyMatrix(int* matA,int* matB,int* resultMat,int rowA,int rowB,int colB)
{
int i = threadIdx.x;
for(int j=0;j<colB;j++)
{
int sum = 0;
for(int k=0;k<rowB;k++)
{
sum+=(matA[i*rowB+k]*matB[k*colB+j]);
}
resultMat[i*colB+j] = sum;
}
}
void multiplyMatrixSerial(int* matA, int* matB, int* resultMat, int rowA, int rowB, int colB)
{
for (int i = 0; i<rowA; i++)
{
for (int j = 0; j<colB; j++)
{
int sum = 0;
for (int k = 0; k<rowB; k++)
{
sum += (matA[i*rowB + k] * matB[k*colB + j]);
}
resultMat[i*colB + j] = sum;
}
}
}
int main()
{
int rowA = 3;
int rowB = 4;
int colB = 3;
//allocate memory in host
int *matA = new int[rowA*rowB*sizeof(int)];
int *matB = new int[rowB*colB*sizeof(int)];
int *matC = new int[rowA*colB*sizeof(int)];
//allocate memory in device
int *dA, *dB, *dC;
cudaMalloc((void**)&dA,rowA*rowB*sizeof(int));
cudaMalloc((void**)&dB,rowB*colB*sizeof(int));
cudaMalloc((void**)&dC,rowA*colB*sizeof(int));
cout<<"Creating matrix..."<<endl;
createMatrix(matA,rowA,rowB);
createMatrix(matB,rowB,colB);
cout<<"Creating matrix completed"<<endl;
//copy from host to device
cudaMemcpy(dA,matA,rowA*rowB*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dB,matB,rowB*colB*sizeof(int),cudaMemcpyHostToDevice);
cout<<"MatrixA: "<<endl;
printMatrix(matA,rowA,rowB);
cout<<"MatrixB: "<<endl;
printMatrix(matB,rowB,colB);
//each thread will compute a row of elements in result matrix
multiplyMatrix <<<1,rowA>>> (dA,dB,dC,rowA,rowB,colB);
//copy result from device to host
cudaMemcpy(matC,dC,rowA*colB*sizeof(int),cudaMemcpyDeviceToHost);
cout<<"The parallel result matrix is: "<<endl;
printMatrix(matC,rowA,colB);
multiplyMatrixSerial(matA,matB,matC,rowA,rowB,colB);
cout << "The serial result matrix is: " << endl;
printMatrix(matC, rowA, colB);
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
delete[] matA;
delete[] matB;
delete[] matC;
return 0;
} |
14,043 | #include "includes.h"
__global__ void applyLinearFunction(int *size, const short *x, short *y, short *a, short *b) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
y[ix] = *a + *b * x[ix];
}
} |
14,044 | #include <iostream>
#include <cstring>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <unistd.h>
#define size 21
using namespace std;
__global__ void jogo(bool* env) {
int x = threadIdx.x;
int y = threadIdx.y;
// mapeaia as bordas da posição analisada
int wrapNorth = ((size + y - 1) % size) * size;
int wrapSouth = ((size + y + 1) % size) * size;
int wrapEast = (size + x + 1) % size;
int wrapWest = (size + x - 1) % size;
// conta quantos existem
int count = 0;
if (env[y * size + wrapEast]) count++;
if (env[y * size + wrapWest]) count++;
if (env[wrapNorth + wrapEast]) count ++;
if (env[wrapNorth + wrapWest]) count++;
if (env[wrapSouth + wrapEast]) count++;
if (env[wrapSouth + wrapWest]) count++;
if (env[wrapNorth + x]) count++;
if (env[wrapSouth + x]) count++;
__syncthreads(); //garante que as threads estejam sincronizadas para realizar o calculo de vizinhos vivos
if(count < 2 || count > 3)
env[y * size + x] = false;
if(count == 3)
env[y * size + x] = true;
}
void print(bool* env) {
for(int i = 0; i < size * size; i++) {
cout << (env[i] ? '#' : ' ');
if (!(i % size)) cout << endl;
}
}
int main(){
int parada = 0;
bool env[size * size]; //linearizei o vetor
env[ 5*size + 7] = true;
env[ 6*size + 8] = true;
env[ 8*size +8] = true;
env[ 6*size +6] = true;
env[ 8*size +10] = true;
env[ 9*size +10] = true;
env[ 8*size +11] = true;
env[10*size +11] = true;
env[10*size +12] = true;
bool* dEnv;
cudaMalloc((void**) &dEnv, size * size * sizeof(bool)); //aloca vetor em cuda
cudaMemcpy(dEnv, env, size * size * sizeof(bool), cudaMemcpyHostToDevice); //copia o vetor para cuda
dim3 golThreads(size, size); //define tamanho das threads
while (parada < 100) { //define o numero de parada para 100
system("clear");
jogo<<<1, golThreads>>>(dEnv); //chamada do kernel
cudaMemcpy(env, dEnv, size * size * sizeof(bool), cudaMemcpyDeviceToHost); //copia valor do vetor de volta a cpu
print(env);
usleep(100000);
parada++;
}
} |
14,045 | #include <stdio.h>
#include <math.h>
#include <cuda_runtime.h>
//TODO: BIG INT is needed
__global__ void decyrption(int *M, int *C, int *d, int *N){
int i = threadIdx.x;
M[i] = pow(M[i], d[i]);
M[i] = M[i] % N[i];
}
int main(){
int C[4] = {541, 795, 1479, 2753};
int M[4];
int d[4] = {1019, 1019, 1019, 1019};
int N[4] = {3337, 3337, 3337, 3337};
int *C_GPU, *M_GPU, *d_GPU, *N_GPU;
int size = 4 * sizeof(int);
cudaMalloc((void **)&C_GPU, size);
cudaMalloc((void **)&M_GPU, size);
cudaMalloc((void **)&d_GPU, size);
cudaMalloc((void **)&N_GPU, size);
cudaMemcpy(C_GPU, C, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_GPU, d, size, cudaMemcpyHostToDevice);
cudaMemcpy(N_GPU, N, size, cudaMemcpyHostToDevice);
decyrption<<<1, 4>>>(M_GPU, C_GPU, d_GPU, N_GPU);
cudaMemcpy(M, M_GPU, size, cudaMemcpyDeviceToHost);
cudaFree(M_GPU);
cudaFree(C_GPU);
cudaFree(d_GPU);
cudaFree(N_GPU);
int i;
for (i = 0; i < 4; i++){
printf("The result is %d, %d\n", M[i], C[i]);
}
} |
14,046 | #include <stdint.h>
#include <cuda.h>
__global__
void loop(uint32_t *a, uint32_t *b, uint32_t *c, uint32_t n)
{
int i, j, k;
for (k = 0; k < 10; k ++){
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
a[i] = i + n + k;
}
}
}
}
|
14,047 | /*
* Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <thrust/complex.h>
///////////////////////////////////////////////////////////////////////////////
// UPFIRDN1D //
///////////////////////////////////////////////////////////////////////////////
template<typename T>
__device__ void _cupy_upfirdn1D( const T *__restrict__ inp,
const T *__restrict__ h_trans_flip,
const int up,
const int down,
const int axis,
const int x_shape_a,
const int h_per_phase,
const int padded_len,
T *__restrict__ out,
const int outW ) {
const int t { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) };
const int stride { static_cast<int>( blockDim.x * gridDim.x ) };
for ( size_t tid = t; tid < outW; tid += stride ) {
#if ( __CUDACC_VER_MAJOR__ >= 11 ) && ( __CUDACC_VER_MINOR__ >= 2 )
__builtin_assume( padded_len > 0 );
__builtin_assume( up > 0 );
__builtin_assume( down > 0 );
__builtin_assume( tid > 0 );
#endif
const int x_idx { static_cast<int>( ( tid * down ) / up ) % padded_len };
int h_idx { static_cast<int>( ( tid * down ) % up * h_per_phase ) };
int x_conv_idx { x_idx - h_per_phase + 1 };
if ( x_conv_idx < 0 ) {
h_idx -= x_conv_idx;
x_conv_idx = 0;
}
T temp {};
int stop = ( x_shape_a < ( x_idx + 1 ) ) ? x_shape_a : ( x_idx + 1 );
for ( int x_c = x_conv_idx; x_c < stop; x_c++ ) {
temp += inp[x_c] * h_trans_flip[h_idx];
h_idx += 1;
}
out[tid] = temp;
}
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_upfirdn1D_float32( const float *__restrict__ inp,
const float *__restrict__ h_trans_flip,
const int up,
const int down,
const int axis,
const int x_shape_a,
const int h_per_phase,
const int padded_len,
float *__restrict__ out,
const int outW ) {
_cupy_upfirdn1D<float>( inp, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 ) _cupy_upfirdn1D_float64( const double *__restrict__ inp,
const double *__restrict__ h_trans_flip,
const int up,
const int down,
const int axis,
const int x_shape_a,
const int h_per_phase,
const int padded_len,
double *__restrict__ out,
const int outW ) {
_cupy_upfirdn1D<double>( inp, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_upfirdn1D_complex64( const thrust::complex<float> *__restrict__ inp,
const thrust::complex<float> *__restrict__ h_trans_flip,
const int up,
const int down,
const int axis,
const int x_shape_a,
const int h_per_phase,
const int padded_len,
thrust::complex<float> *__restrict__ out,
const int outW ) {
_cupy_upfirdn1D<thrust::complex<float>>(
inp, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW );
}
extern "C" __global__ void __launch_bounds__( 512 )
_cupy_upfirdn1D_complex128( const thrust::complex<double> *__restrict__ inp,
const thrust::complex<double> *__restrict__ h_trans_flip,
const int up,
const int down,
const int axis,
const int x_shape_a,
const int h_per_phase,
const int padded_len,
thrust::complex<double> *__restrict__ out,
const int outW ) {
_cupy_upfirdn1D<thrust::complex<double>>(
inp, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW );
}
///////////////////////////////////////////////////////////////////////////////
// UPFIRDN2D //
///////////////////////////////////////////////////////////////////////////////
template<typename T>
__device__ void _cupy_upfirdn2D( const T *__restrict__ inp,
const int inpH,
const T *__restrict__ h_trans_flip,
const int up,
const int down,
const int axis,
const int x_shape_a,
const int h_per_phase,
const int padded_len,
T *__restrict__ out,
const int outW,
const int outH ) {
const int ty { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) };
const int tx { static_cast<int>( blockIdx.y * blockDim.y + threadIdx.y ) };
const int stride_y { static_cast<int>( blockDim.x * gridDim.x ) };
const int stride_x { static_cast<int>( blockDim.y * gridDim.y ) };
for ( int x = tx; x < outH; x += stride_x ) {
for ( int y = ty; y < outW; y += stride_y ) {
int x_idx {};
int h_idx {};
#if ( __CUDACC_VER_MAJOR__ >= 11 ) && ( __CUDACC_VER_MINOR__ >= 2 )
__builtin_assume( padded_len > 0 );
__builtin_assume( up > 0 );
__builtin_assume( down > 0 );
#endif
if ( axis == 1 ) {
#if ( __CUDACC_VER_MAJOR__ >= 11 ) && ( __CUDACC_VER_MINOR__ >= 2 )
__builtin_assume( x > 0 );
#endif
x_idx = ( static_cast<int>( x * down ) / up ) % padded_len;
h_idx = ( x * down ) % up * h_per_phase;
} else {
#if ( __CUDACC_VER_MAJOR__ >= 11 ) && ( __CUDACC_VER_MINOR__ >= 2 )
__builtin_assume( y > 0 );
#endif
x_idx = ( static_cast<int>( y * down ) / up ) % padded_len;
h_idx = ( y * down ) % up * h_per_phase;
}
int x_conv_idx { x_idx - h_per_phase + 1 };
if ( x_conv_idx < 0 ) {
h_idx -= x_conv_idx;
x_conv_idx = 0;
}
T temp {};
int stop = ( x_shape_a < ( x_idx + 1 ) ) ? x_shape_a : ( x_idx + 1 );
for ( int x_c = x_conv_idx; x_c < stop; x_c++ ) {
if ( axis == 1 ) {
temp += inp[y * inpH + x_c] * h_trans_flip[h_idx];
} else {
temp += inp[x_c * inpH + x] * h_trans_flip[h_idx];
}
h_idx += 1;
}
out[y * outH + x] = temp;
}
}
}
extern "C" __global__ void __launch_bounds__( 64 ) _cupy_upfirdn2D_float32( const float *__restrict__ inp,
const int inpH,
const float *__restrict__ h_trans_flip,
const int up,
const int down,
const int axis,
const int x_shape_a,
const int h_per_phase,
const int padded_len,
float *__restrict__ out,
const int outW,
const int outH ) {
_cupy_upfirdn2D<float>(
inp, inpH, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW, outH );
}
extern "C" __global__ void __launch_bounds__( 64 ) _cupy_upfirdn2D_float64( const double *__restrict__ inp,
const int inpH,
const double *__restrict__ h_trans_flip,
const int up,
const int down,
const int axis,
const int x_shape_a,
const int h_per_phase,
const int padded_len,
double *__restrict__ out,
const int outW,
const int outH ) {
_cupy_upfirdn2D<double>(
inp, inpH, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW, outH );
}
extern "C" __global__ void __launch_bounds__( 64 )
_cupy_upfirdn2D_complex64( const thrust::complex<float> *__restrict__ inp,
const int inpH,
const thrust::complex<float> *__restrict__ h_trans_flip,
const int up,
const int down,
const int axis,
const int x_shape_a,
const int h_per_phase,
const int padded_len,
thrust::complex<float> *__restrict__ out,
const int outW,
const int outH ) {
_cupy_upfirdn2D<thrust::complex<float>>(
inp, inpH, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW, outH );
}
extern "C" __global__ void __launch_bounds__( 64 )
_cupy_upfirdn2D_complex128( const thrust::complex<double> *__restrict__ inp,
const int inpH,
const thrust::complex<double> *__restrict__ h_trans_flip,
const int up,
const int down,
const int axis,
const int x_shape_a,
const int h_per_phase,
const int padded_len,
thrust::complex<double> *__restrict__ out,
const int outW,
const int outH ) {
_cupy_upfirdn2D<thrust::complex<double>>(
inp, inpH, h_trans_flip, up, down, axis, x_shape_a, h_per_phase, padded_len, out, outW, outH );
}
|
14,048 | #include <iostream>
#include <cstring>
#include <cstdlib>
#include <utility> //C++11
#include <cuda.h>
#define BLOCK 32
#define ELEM 64
#define STEP 32
using namespace std;
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if(error != cudaSuccess){ \
cerr << "Error:" << __FILE__ << endl; \
cerr << "code : "<< error << " reason : "<<cudaGetErrorString(error) << endl; \
} \
}
void checkResult(float* hostRef,float* devRef,const int N){
float epsilon = 1.0E-4;
bool match = 1;
int i;
for(i=0;i<N;i++){
//printf("host:%d,device:%d\n",hostRef[i],devRef[i]);
if(abs(hostRef[i]-devRef[i])>epsilon){
match = 0;
cout << "Arrays don't match.on count of "<<i<< " element.";
break;
}
}
if(match){
cout <<"Arrays match.";
}
cout << endl;
return;
}
void initializeData(float* A,int size){
//乱数で値を初期化します。
time_t t;
int i;
srand((unsigned int)time(&t));
for(i=0;i<size;i++){
A[i] = (float)(rand()) / 10.0F;
}
return;
}
void print(float* Def,float* Src,float* Rst,const int elem){
for(int i=0;i<elem;i++){
cout << "\t" <<i << " | " << Def[i] << " | " <<Src[i] << " | "<<Rst[i] << endl;
}
}
void Host1DStencil(float* Src,float* Dst){
for(int st=0;st<STEP;st++){
for(int i=0;i<ELEM;i++){
if(i!=0 && i<ELEM-1)
Dst[i] = 0.6*Src[i] + 0.2*(Src[i-1]+Src[i+1]);
}
swap(Src,Dst);
}
}
__global__ void StencilOneStep(float* Src,float* Dst,const int elem){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index!=0&&index<elem-1)
Dst[index] = 0.6*Src[index] + 0.2*(Src[index-1]+Src[index+1]);
}
int main(int argc,char** argv){
float* Src = new float[ELEM];
float* Dst = new float[ELEM];
float* Rst = new float[ELEM];
float* Def = new float[ELEM];
//Srcを乱数で初期化
initializeData(Src,ELEM);
memcpy(Dst,Src,sizeof(float)*ELEM);
memcpy(Def,Src,sizeof(float)*ELEM);
//Deviceメモリの確保
size_t DeviceMemorySize = ELEM*sizeof(float);
float *d_Src,*d_Dst;
CHECK(cudaSetDevice(0));
CHECK(cudaMalloc(&d_Src,DeviceMemorySize));
CHECK(cudaMalloc(&d_Dst,DeviceMemorySize));
CHECK(cudaMemcpy(d_Src,Src,DeviceMemorySize,cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_Dst,Src,DeviceMemorySize,cudaMemcpyHostToDevice));
dim3 block(BLOCK);
dim3 grid((ELEM+block.x-1)/block.x);
cout << "block : "<< block.x << " | grid : " << grid.x << endl;
for(int st=0;st<STEP;st++){
StencilOneStep<<<grid,block>>>(d_Src,d_Dst,ELEM);
swap(d_Src,d_Dst);
}
CHECK(cudaGetLastError());
CHECK(cudaMemcpy(Rst,d_Src,DeviceMemorySize,cudaMemcpyDeviceToHost));
CHECK(cudaFree(d_Src));
CHECK(cudaFree(d_Dst));
Host1DStencil(Src,Dst);
checkResult(Src,Rst,ELEM);
print(Def,Src,Rst,ELEM);
delete Src;
delete Dst;
delete Rst;
delete Def;
return 0;
} |
14,049 | #include <list>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include <cstdlib>
#include <cstdio>
#include <time.h>
#include <cassert>
#define ITERATIONS 25
#ifndef DATA_TYPE
#define DATA_TYPE float
#endif
using namespace std;
int main(int argc, char ** argv){
long total_time = 0;
struct timespec start, stop;
list<DATA_TYPE> int_list;
unsigned long N = 32000;
if(argc > 1){
N = atol(argv[1]);
}
for(int i = 0; i < ITERATIONS; i++){
for(int c = N; c > 0; c--){
int_list.push_back((DATA_TYPE)(rand() % 500000));
}
std::list<DATA_TYPE>::iterator __first = int_list.begin();
std::list<DATA_TYPE>::iterator __last = int_list.end();
size_t __total_size = N;
thrust::device_vector<DATA_TYPE> __device(__total_size);
clock_gettime(CLOCK_REALTIME,&start);
// Not provably continuous memory, so use iterators to copy
thrust::host_vector<DATA_TYPE> __host(__total_size);
unsigned long __idx = 0;
for(; __first != __last; ++__first){
__host[__idx] = *__first;
++__idx;
}
thrust::copy(__host.begin(),__host.end(),__device.begin());
clock_gettime(CLOCK_REALTIME,&stop);
thrust::copy(__device.begin(),__device.end(),__host.begin());
__idx = 0;
for(; __first != __last; ++__first){
*__first = __host[__idx];
++__idx;
}
total_time += ((stop.tv_sec-start.tv_sec)*1000000000) + (stop.tv_nsec - start.tv_nsec);
int_list.clear();
}
int val = 0;
for(std::list<DATA_TYPE>::iterator it = int_list.begin(), e = int_list.end(); it != e; ++it){
assert(*it >= val);
val = *it;
}
printf("%lu\n", total_time);
return 0;
}
|
14,050 | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//#include <cuda.h>
//#include <fstream>
//#include <iostream>
//#include <stdio.h>
//#include <ctime>
//
//using namespace std;
//
//__global__ void matrixMultiply(float * A, float * B, float * C,
// int numARows, int numAColumns,
// int numBRows, int numBColumns,
// int numCRows, int numCColumns) {
// int tx = threadIdx.x + blockIdx.x*blockDim.x;
// int ty = threadIdx.y + blockIdx.y*blockDim.y;
//
// float p = 0;
//
// if (tx < numBColumns && ty < numARows){
// for (int k = 0; k < numAColumns; k++){
// p += A[ty*numAColumns + k]*B[k*numBColumns + tx];
// }
// C[ty*numBColumns + tx] = p;
// }
//}
//
//void calc(char *file)
//{
// float * hostA;
// float * hostB;
// float * hostC;
// float * deviceA;
// float * deviceB;
// float * deviceC;
// int numARows;
// int numAColumns;
// int numBRows;
// int numBColumns;
// int numCRows;
// int numCColumns;
//
// float * result;
//
// Charger le fichier d'entree
// char * in0 = new char();
// strcpy(in0, file);
// strcat(in0, "/input0.raw");
// ifstream fin0(in0);
// fin0 >> numARows >> numAColumns;
// hostA = (float*)malloc(numARows*numAColumns*sizeof(float));
// for (int i = 0; i < numARows*numAColumns; i++){
// fin0 >> hostA[i];
// }
// fin0.close();
//
// char * in1 = new char();
// strcpy(in1, file);
// strcat(in1, "/input1.raw");
// ifstream fin1(in1);
// fin1 >> numBRows >> numBColumns;
// hostB = (float*)malloc(numBRows*numBColumns*sizeof(float));
// for (int i = 0; i < numBRows*numBColumns; i++)
// fin1 >> hostB[i];
// fin1.close();
//
// Initialiser numCRows et numCColumns
// numCRows = numARows;
// numCColumns = numBColumns;
// Allouer hostC
// hostC = (float*)malloc(numCRows*numCColumns*sizeof(float));
//
// Afficher les informations sur la matrice
// Allouer la memoire sur GPU
// cudaMalloc((float**)&deviceA, numARows*numAColumns*sizeof(float));
// cudaMalloc((float**)&deviceB, numBRows*numBColumns*sizeof(float));
// cudaMalloc((float**)&deviceC, numCRows*numCColumns*sizeof(float));
//
// Copier la memoire sur le GPU
// cudaMemcpy(deviceA, hostA, numARows*numAColumns*sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(deviceB, hostB, numBRows*numBColumns*sizeof(float), cudaMemcpyHostToDevice);
//
// Initialise la grille et les dimensions de chaque bloc
// int gridX = ceil((double)numBColumns/16.);
// int gridY = ceil((double)numARows/16.);
// dim3 dimGrid(gridX, gridY, 1);
// dim3 dimBlock(16 , 16, 1);
//
// Execute le kernel
// matrixMultiply<<<dimGrid , dimBlock>>>(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
//
// cudaThreadSynchronize();
//
// Charge le resultat en memoire CPU
// cudaMemcpy(hostC, deviceC, numCRows*numCColumns*sizeof(float), cudaMemcpyDeviceToHost);
//
// TEST
// char * out = new char();
// strcpy(out, file);
// strcat(out, "/output.raw");
// ifstream fout(out) ;
// fout >> numCRows >> numCColumns;
// result = (float*)malloc(numCRows*numCColumns*sizeof(float));
// for (int i = 0; i < numCRows*numCColumns; i++)
// fout >> result[i];
// fout.close();
//
// for (int i = 0; i < numCRows*numCColumns; i++){
// printf("%d %f \n", i, fabs(result[i]-hostC[i]));
// }
// Libere la memoire
// free(hostA);
// free(hostB);
// free(hostC);
// cudaFree(deviceA);
// cudaFree(deviceB);
// cudaFree(deviceC);
// free(result);
//
// printf("\n%d %d\n%d %d\n%d %d\n", numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
// printf("%d %d\n", gridY, gridX);
//}
//
//int main()
//{
// clock_t tbegin, tend;
// tbegin = clock();
// calc("mp2_data/5");
// system("pause");
//
// calc("ex2_data2/0");
// printf("0\n");
//
// calc("ex2_data2/1");
// printf("1\n");
//
// calc("ex2_data2/2");
// printf("2\n");
//
// calc("ex2_data2/3");
// printf("3\n");
//
// calc("ex2_data2/4");
// printf("4\n");
//
// calc("ex2_data2/5");
// printf("5\n");
//
// calc("ex2_data2/6");
// printf("6\n");
//
// calc("ex2_data2/7");
// printf("7\n");
//
// calc("ex2_data2/8");
// printf("8\n");
//
// calc("ex2_data2/9");
// printf("9\n");
// tend = clock();
//
// printf("%f\n", (float)(tend-tbegin)/CLOCKS_PER_SEC);
// system("pause");
// return 0;
//}
|
14,051 | #include "stdio.h"
#include <math.h>
#include <stdlib.h>
//#include "mpi.h"
#include <sys/time.h>
#include <stdint.h>
#include<cuda.h>
#include<math.h>
#include<cuda_runtime.h>
#include<string.h>
__global__ void find_prime(int N,int* a,float* b,int* c)
{
//*p_size = s+1;
//__shared__ cuda_count = 1;
//__shared__ cuda_largest = 2;
int i = blockIdx.x;
//atomicAdd(&cuda_count,i);
//__syncthreads();
//to find a[i] is prime or not
int flag = 0;
for(int j=3;j<=b[i];j=j+2)
{
if(a[i]%j==0)
{
flag = 1;
c[i] = -1;
break;
}
}
}
extern "C" int* invoke_cuda_find_prime(int from,int to,int r)
{
printf("from %d to %d\n",from,to);
int n = to-from+1;
if(from ==0)
{
from = 3;
n = to - from+1;
}
if(from%2==0)
{
from = from+1;
}
int* prime_numbers;
float* sqrt_;
int* is_prime;
int index =0;
int size_of_prime_number=0;
prime_numbers = (int*)malloc(sizeof(int)*n);
sqrt_ = (float*)malloc(sizeof(float)*n);
is_prime = (int*)malloc(sizeof(int)*n);
int* cuda_prime;
float* cuda_sqrt;
int*cuda_is_prime;
int sa=0;
printf("from %d to %d rank %d\n",from,to,r);
for(sa=from;sa<=to;sa++)
{
//printf("s %d\n",sa,r);
if(sa%2!=0)
{
prime_numbers[index] = sa;
sqrt_[index] = sqrt(sa);
is_prime[index] = -1;
printf("s %d prime[%d] %d sqrt %f rank %d \n",sa,index,prime_numbers[index],sqrt(sa),r);
index++;
size_of_prime_number++;
}
}
cudaMalloc ( (void**)&cuda_prime, size_of_prime_number * sizeof (int) );
cudaMalloc ( (void**)&cuda_sqrt, size_of_prime_number * sizeof (float) );
cudaMalloc ( (void**)&cuda_is_prime, size_of_prime_number * sizeof (int) );
cudaMemcpy( cuda_prime, prime_numbers, size_of_prime_number * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( cuda_sqrt, sqrt_, size_of_prime_number * sizeof(float), cudaMemcpyHostToDevice);
find_prime<<<size_of_prime_number,1>>>(size_of_prime_number,cuda_prime,cuda_sqrt,cuda_is_prime);
cudaMemcpy( is_prime, cuda_is_prime , size_of_prime_number * sizeof(int), cudaMemcpyDeviceToHost);
int largest = 0;
int count = 0;
for(int g=0;g<size_of_prime_number;g++)
{
printf("is_prime[%d] = %d number %d\n",g,is_prime[g],prime_numbers[g]);
if(is_prime[g]!=-1)
{ printf("Prime Number %d\n",prime_numbers[g]);
count++;
largest = prime_numbers[g];
}
}
int* b;
b = (int*)malloc(sizeof(int)*3);
b[0] = count;
b[1] = largest;
printf("Count %d\n Largest %d\n",count,largest);
cudaFree(cuda_prime);
cudaFree(cuda_sqrt);
cudaFree(cuda_is_prime);
return b;
}
|
14,052 | #include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <time.h>
using namespace std;
#define N 756
// kernel
__global__ void matrixMulGPU( int * a, int * b, int * c )
{
int val = 0;
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < N && col < N)
{
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
void matrixMulCPU( int * a, int * b, int * c )
{
int val = 0;
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
val = 0;
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
int main()
{
int *a, *b, *c_cpu, *c_gpu;
// Número de bytes de uma matriz N x N
int size = N * N * sizeof (int);
// Aloca memória
cudaMallocManaged (&a, size);
cudaMallocManaged (&b, size);
cudaMallocManaged (&c_cpu, size);
cudaMallocManaged (&c_gpu, size);
// Inicializa memória
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
a[row * N + col] = row;
b[row * N + col] = col+2;
c_cpu[row * N + col] = 0;
c_gpu[row * N + col] = 0;
}
// Bloco de threads 16 x 16
dim3 threads_per_block (16, 16, 1);
dim3 number_of_blocks ((N / threads_per_block.x) + 1, (N / threads_per_block.y) + 1, 1);
// Define 2 eventos CUDA
cudaEvent_t start, end;
// Cria os eventos
cudaEventCreate(&start);
cudaEventCreate(&end);
// Registra o primeiro evento
cudaEventRecord(start);
// Chamada ao kernel
matrixMulGPU <<< number_of_blocks, threads_per_block >>> ( a, b, c_gpu );
// Registra o segundo evento
cudaEventRecord(end);
// Aguarda a GPU finalizar seu trabalho
cudaDeviceSynchronize();
// Calcula o tempo usado no processamento
float elapsed;
cudaEventElapsedTime(&elapsed, start, end);
cout << "Tempo de processamento na GPU igual a " << elapsed << " msec (aproximadamente 0.01108 segundos)" << endl;
clock_t start1, end1;
double cpu_time_used;
start1 = clock();
// Chama a versão para CPU para checar nosso trabalho
matrixMulCPU( a, b, c_cpu );
// Calcula o tempo usado no processamento
end1 = clock();
cpu_time_used = ((double) (end1 - start1)) / CLOCKS_PER_SEC;
cout << "Tempo de processamento na CPU igual a " << cpu_time_used << " sec" << endl;
// Compara as duas respostas para garantir que elas sejam iguais
bool error = false;
for( int row = 0; row < N && !error; ++row )
for( int col = 0; col < N && !error; ++col )
if (c_cpu[row * N + col] != c_gpu[row * N + col])
{
printf("FOUND ERROR at c[%d][%d]\n", row, col);
error = true;
break;
}
if (!error)
printf("Successo! As duas matrizes são iguais, sendo executadas na CPU e na GPU!\n");
// Libera a memória
cudaFree(a);
cudaFree(b);
cudaFree( c_cpu );
cudaFree( c_gpu );
} |
14,053 | //
// CUDA code to compute minimu distance between n points
//
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#define MAX_POINTS 1048576
#define BLOCK_SIZE 1024
int blocks; //log2(num_points);
int threads; //num_points/blocks;
int num_points;
//__device__ float mins[BLOCK_SIZE];
// ----------------------------------------------------------------------------
// Kernel Function to compute distance between all pairs of points
// Input:
// X: X[i] = x-coordinate of the ith point
// Y: Y[i] = y-coordinate of the ith point
// n: number of points
//
// Output:
// D: D[0] = minimum distance
//
__global__ void minimum_distance(float * X, float * Y, float * D, int n) {
//CUDA version of the code
//D[0] = (float)blockDim.x;
//return;
int i = blockIdx.x * blockDim.x + threadIdx.x;
//int j = blockIdx.x * blockDim.x + threadIdx.x +1;
//int i = threadIdx.x;
//int j = threadIdx.x + 1;
if (i == 0) {
D[0] = 999;
}
extern __shared__ float mins[];
__threadfence();
float distance = 999;
//D[0] = distance;
//printf((int)distance);
if (i < n) {
for (int j = i + 1; j < n; j++) {
float dx = X[j] - X[i];
float dy = Y[j] - Y[i];
float Dij = sqrtf(dx * dx + dy * dy);
if (distance > Dij) {
distance = Dij;
}
}
//mins local to the block
mins[threadIdx.x] = distance;
}
//return;
__syncthreads();
if (i < blockDim.x) {
int maxValue = blockDim.x;
if (maxValue > n) {
maxValue = n;
}
int index = threadIdx.x;
for (unsigned int s = maxValue / 2; s > 0; s >>= 1) {
if (index < s) {
if (mins[index] > mins[index + s - 1]
&& mins[index + s - 1] != 0)
mins[index] = mins[index + s - 1];
}
__syncthreads();
}
if (i == 0) {
if (D[0] > mins[0] && mins[0] != 0) {
D[0] = mins[0];
}
}
}
// return;
//now update global minimum
}
// ----------------------------------------------------------------------------
// Main program - initializes points and computes minimum distance
// between the points
//
int main(int argc, char* argv[]) {
// Host Data
float * hVx; // host x-coordinate array
float * hVy; // host y-coordinate array
float * hmin_dist; // minimum value on host
// Device Data
float * dVx; // device x-coordinate array
float * dVy; // device x-coordinate array
float * dmin_dist; // minimum value on device
int i, j, size; //, num_points, blocks, threads;
float dx, dy, Dij, distance;
unsigned int seed = 0;
cudaEvent_t start, stop; // GPU timing variables
struct timeval cpu_start, cpu_stop; // CPU timing variables
float time_array[10];
// Timing initializations
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Check input
if (argc != 2) {
printf("Use: %s <number of points>\n", argv[0]);
exit(0);
}
if ((num_points = atoi(argv[argc - 1])) > MAX_POINTS) {
printf("Maximum number of points allowed: %d\n", MAX_POINTS);
exit(0);
}
// Allocate host coordinate arrays
size = num_points * sizeof(float);
hVx = (float *) malloc(size);
hVy = (float *) malloc(size);
hmin_dist = (float *) malloc(size);
// Initialize points
for (i = 0; i < num_points; i++) {
hVx[i] = (float) (rand_r(&seed)) / (float) (RAND_MAX);
hVy[i] = (float) (rand_r(&seed)) / (float) (RAND_MAX);
}
// Allocate device coordinate arrays
cudaMalloc(&dVx, size);
cudaMalloc(&dVy, size);
cudaMalloc(&dmin_dist, sizeof(float));
// Copy coordinate arrays from host memory to device memory
cudaEventRecord(start, 0);
cudaMemcpy(dVx, hVx, size, cudaMemcpyHostToDevice);
cudaMemcpy(dVy, hVy, size, cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&(time_array[0]), start, stop);
// Invoke kernel
cudaEventRecord(start, 0);
// ------------------------------------------------------------
//
// Invoke kernel function here
//
// ------------------------------------------------------------
threads = 256; //16; //num_points/blocks;
blocks = (num_points + threads - 1) / threads;//num_points/threads; //my_log(num_points);
minimum_distance<<<4, 128>>>(dVx, dVy, dmin_dist, num_points);
//minimum_distance<<<, BLOCK_SIZE>>>(dVx, dVy, dmin_dist, num_points);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&(time_array[1]), start, stop);
// Copy result from device memory to host memory
cudaEventRecord(start, 0);
cudaMemcpy(hmin_dist, dmin_dist, size, cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&(time_array[2]), start, stop);
printf("Number of Points = %d\n", num_points);
printf("GPU Host-to-device = %f ms \n", time_array[0]);
printf("GPU execution time = %f ms \n", time_array[1]);
printf("GPU Device-to-host = %f ms \n", time_array[2]);
printf("Minimum distance (GPU) = %e\n", hmin_dist[0]);
// Compute minimum distance on host to check device computation
gettimeofday(&cpu_start, NULL);
dx = hVx[1] - hVx[0];
dy = hVy[1] - hVy[0];
distance = sqrtf(dx * dx + dy * dy);
for (i = 0; i < num_points; i++) {
for (j = i + 1; j < num_points; j++) {
dx = hVx[j] - hVx[i];
dy = hVy[j] - hVy[i];
Dij = sqrtf(dx * dx + dy * dy);
if (distance > Dij)
distance = Dij;
}
}
gettimeofday(&cpu_stop, NULL);
time_array[3] = 1000 * (cpu_stop.tv_sec - cpu_start.tv_sec)
+ 0.000001 * (cpu_stop.tv_usec - cpu_start.tv_usec);
printf("CPU execution time = %f ms\n", time_array[3]);
printf("Minimum distance (CPU) = %e\n", distance);
// Free device memory
cudaFree(dVx);
cudaFree(dVy);
cudaFree(dmin_dist);
// Free host memory
free(hVx);
free(hVy);
free(hmin_dist);
return 0;
}
|
14,054 | #include "cuda_part.cuh"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#ifdef __CUDACC__
#define KERNEL_ARGS2(grid,block) <<< grid , block >>>
#else
#define KERNEL_ARGS2(grid, block)
#endif
__global__
void cuda_run(void *arr, size_t pitch) {
int id = ((int)blockIdx.x) * ((int)blockDim.x) + (int)(threadIdx.x);
int x = id / 5;
int y = id % 5;
float a, b;
if (id < 25) {
float *row = (float *)((char *)arr + y * pitch);
a = row[x];
row[x] += 1;
b = row[x];
printf("bye %d %f %f\n", id, a, b);
}
}
void setup(float **h_arr) {
void *d_arr;
size_t pitch;
cudaError_t e;
e = cudaMallocPitch(&d_arr, &pitch, 5 * sizeof(float), 5 );
if (e != cudaSuccess) {
std::cout << "Memalloc fail\n";
}
std::cout << pitch << "\n";
e = cudaMemcpy2D(d_arr, pitch, *h_arr, 5 * sizeof(float), 5 * sizeof(float) , 5 , cudaMemcpyHostToDevice);
//e = cudaMemset2D(d_arr, pitch, 1, 5 * sizeof(float), 5);
if (e != cudaSuccess) {
std::cout << "Memcpy fail\n";
}
cuda_run KERNEL_ARGS2(1, 64) (d_arr,pitch);
e = cudaMemcpy2D(*h_arr, 5 * sizeof(float), d_arr, pitch, 5 * sizeof(float), 5 , cudaMemcpyDeviceToHost);
if (e != cudaSuccess) {
std::cout << "2nd Memcpy fail\n";
}
cudaFree(d_arr);
} |
14,055 | #include <unistd.h>
#include <stdio.h>
#include <assert.h>
#include <sys/time.h>
#include <time.h>
static void HandleError( cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define HANDLE_NULL( a ) {if (a == NULL) { \
printf( "Host memory failed in %s at line %d\n", \
__FILE__, __LINE__ ); \
exit( EXIT_FAILURE );}}
inline double diff_s(struct timeval start, struct timeval end)
{
return ((double) (end.tv_usec - start.tv_usec) / 1000000 + (double) (end.tv_sec - start.tv_sec));
}
__global__ void readKernel(int *memory, int *memoryToRead)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
//memory[tid]=memoryToRead[tid];
//__shared__ int temp;
int temp = memoryToRead[tid];
if(!temp)
__syncthreads();
}
__global__ void writeKernel(int *memory)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
memory[tid]=5;
//memory[tid]++;
}
__global__ void nullKernel(int *memory)
{
}
__global__ void initCudaMallocd(int *memory, int N)
{
int tid =threadIdx.x;
if(tid==0){
for(int k=0;k< N*N/(16*16) ;k++)
memory[k]=5;
}
}
void verify(int* memory, int N)
{
int error = 0;
for(int i =0; i<N*N/(16*16); i++){
if(memory[i]!=5){
error = 1;
break;
}
}
if(error)
printf("error in verification\n");
else
printf("verified SUCCESS\n");
}
__global__ void verifyCudaMallocd(int* memory, int N)
{
int tid=threadIdx.x;
if(tid==0) {
int error = 0;
for(int i =0; i<N*N/(16*16); i++){
if(memory[i]!=5){
error = 1;
break;
}
}
if(error)
printf("error in verification\n");
else
printf("verified SUCCESS\n");
}
}
__global__ void pollute_L2_cache(int *junk)
{
int tid=threadIdx.x+blockDim.x*blockIdx.x;
junk[tid]= 5 & 0xDEADBEEF;
}
int
main( int argc, char *argv[] )
{
int *hostAllocd, *cudaMallocd, *cpuMallocd;
int ITERATIONS = 100000;
int numBytes = 1024;
struct timeval tv1, tv2;
int opt;
int read=0; //read benchmark? or write?
int benchmarkType = 0;
int pollute = 0;
while ((opt = getopt(argc, argv, "m:b:i:r:p")) != -1) {
switch (opt) {
case 'm':
numBytes = atoi(optarg);
//assert(numBytes%16 == 0 && numBytes<=1024);
break;
case 'b':
benchmarkType = atoi(optarg);
break;
case 'i':
ITERATIONS = atoi(optarg);
break;
case 'r':
read = atoi(optarg);
break;
case 'p':
pollute = 1;
break;
default: /* '?' */
break;
}
}
cpuMallocd = (int *)malloc(sizeof(int)*numBytes*numBytes/(16*16));
assert(cpuMallocd);
HANDLE_ERROR( cudaHostAlloc( &hostAllocd, sizeof(int)*numBytes*numBytes/(16*16), 0 ) );
int *junk;
HANDLE_ERROR( cudaHostAlloc( &junk, sizeof(int)*512*64, 0 ) );
for(int k=0;k< numBytes ;k++){
cpuMallocd[k]=1;
hostAllocd[k]=1;
}
HANDLE_ERROR( cudaMalloc( &cudaMallocd, sizeof(int)*numBytes) );
HANDLE_ERROR( cudaMemcpy( cudaMallocd,hostAllocd, sizeof(int)*numBytes,cudaMemcpyDefault) );
//int num_of_blocks=1;
//int num_of_threads_per_block=numBytes;
//if(numBytes>1024){
// num_of_blocks = 16;
// num_of_threads_per_block = numBytes/16;
//}
int num_of_blocks,num_of_threads_per_block;
// if(numBytes==1)
// {
// num_of_blocks=1;
// num_of_threads_per_block=1;
// }
// else
// {
// assert(numBytes%2==0);
// num_of_blocks=2;
// num_of_threads_per_block=numBytes/2;
// }
num_of_blocks=numBytes/16;
num_of_threads_per_block=numBytes/16;
//HANDLE_ERROR(cudaDeviceReset()); //this causes kernel launch failure!! check with cuda-memcheck
HANDLE_ERROR(cudaFree(0));
switch (benchmarkType) {
case 0: {//read/Write to hostAlloc'd data
if(read)
{
int *memoryToRead;
HANDLE_ERROR( cudaHostAlloc( &memoryToRead, sizeof(int)*numBytes*numBytes/(16*16), 0 ) );
for(int k=0;k< numBytes*numBytes/(16*16) ;k++)
memoryToRead[k]=5;
gettimeofday(&tv1, NULL);
for(int i = 0; i < ITERATIONS; i++) {
if(pollute) {
pollute_L2_cache<<<512,64>>>(junk);
HANDLE_ERROR( cudaDeviceSynchronize());
}
readKernel<<<num_of_blocks,num_of_threads_per_block>>>(hostAllocd,memoryToRead);
HANDLE_ERROR( cudaDeviceSynchronize());
}
gettimeofday(&tv2, NULL);
cudaFreeHost(memoryToRead);
//verify(hostAllocd,numBytes);
}
else
{
gettimeofday(&tv1, NULL);
for(int i = 0; i < ITERATIONS; i++) {
if(pollute) {
pollute_L2_cache<<<512,64>>>(junk);
HANDLE_ERROR( cudaDeviceSynchronize());
}
writeKernel<<<num_of_blocks,num_of_threads_per_block>>>(hostAllocd);
HANDLE_ERROR( cudaDeviceSynchronize());
}
gettimeofday(&tv2, NULL);
//verify(hostAllocd,numBytes);
}
HANDLE_ERROR( cudaGetLastError());
double elapsedTimeSeconds = diff_s(tv1,tv2);
printf("[%s] Latency including kernel launch overhead = %f us\n",(read==1)?"read":"write",elapsedTimeSeconds*1e6/(float)ITERATIONS);
break;
}
case 1: {//read/Write to cudaMalloc'd data
if(read)
{
int *memoryToRead;
HANDLE_ERROR( cudaMalloc( &memoryToRead, sizeof(int)*numBytes*numBytes/(16*16) ) );
initCudaMallocd<<<1,1>>>(memoryToRead,numBytes);
HANDLE_ERROR( cudaDeviceSynchronize());
gettimeofday(&tv1, NULL);
for(int i = 0; i < ITERATIONS; i++) {
if(pollute) {
pollute_L2_cache<<<512,64>>>(junk);
HANDLE_ERROR( cudaDeviceSynchronize());
}
readKernel<<<num_of_blocks,num_of_threads_per_block>>>(cudaMallocd,memoryToRead);
HANDLE_ERROR( cudaDeviceSynchronize());
}
gettimeofday(&tv2, NULL);
cudaFree(memoryToRead);
//verifyCudaMallocd<<<1,1>>>(cudaMallocd,numBytes);
//HANDLE_ERROR( cudaDeviceSynchronize());
}
else
{
gettimeofday(&tv1, NULL);
for(int i = 0; i < ITERATIONS; i++) {
if(pollute) {
pollute_L2_cache<<<512,64>>>(junk);
HANDLE_ERROR( cudaDeviceSynchronize());
}
writeKernel<<<num_of_blocks,num_of_threads_per_block>>>(cudaMallocd);
HANDLE_ERROR( cudaDeviceSynchronize());
}
gettimeofday(&tv2, NULL);
//verifyCudaMallocd<<<1,1>>>(cudaMallocd,numBytes);
//HANDLE_ERROR( cudaDeviceSynchronize());
}
HANDLE_ERROR( cudaGetLastError());
double elapsedTimeSeconds = diff_s(tv1,tv2);
printf("[%s] Latency including kernel launch overhead = %f us\n",(read==1)?"read":"write",elapsedTimeSeconds*1e6/(float)ITERATIONS);
break;
}
case 2:
{
gettimeofday(&tv1, NULL);
for(int i = 0; i < ITERATIONS; i++) {
nullKernel<<<num_of_blocks,num_of_threads_per_block>>>(0);
HANDLE_ERROR( cudaDeviceSynchronize());
}
gettimeofday(&tv2, NULL);
HANDLE_ERROR( cudaGetLastError());
double elapsedTimeSeconds = diff_s(tv1,tv2);
printf("null kernel launch overhead = %f us\n",elapsedTimeSeconds*1e6/(float)ITERATIONS);
}
case 3: {//read/Write to cpu mallocd data
if(read)
{
int temp;
int *memoryToRead = (int *)malloc(sizeof(int)*numBytes*numBytes/(16*16) );
assert(memoryToRead);
for(int k=0;k< numBytes*numBytes/(16*16) ;k++)
memoryToRead[k]=5;
gettimeofday(&tv1, NULL);
for(int i = 0; i < ITERATIONS; i++) {
for(int j=0; j<numBytes*numBytes/(16*16); j++){
temp=memoryToRead[j];
if(!temp)
cpuMallocd[j]=temp;
}
}
gettimeofday(&tv2, NULL);
free(memoryToRead);
//verify(cpuMallocd,numBytes);
}
else
{
gettimeofday(&tv1, NULL);
for(int i = 0; i < ITERATIONS; i++) {
for(int k=0;k< numBytes*numBytes/(16*16) ;k++)
cpuMallocd[k]=5;
}
gettimeofday(&tv2, NULL);
verify(cpuMallocd,numBytes);
}
double elapsedTimeSeconds = diff_s(tv1,tv2);
printf("[%s] Latency including kernel launch overhead = %f us\n",(read==1)?"read":"write",elapsedTimeSeconds*1e6/(float)ITERATIONS);
break;
}
}
free(cpuMallocd);
cudaFree(cudaMallocd);
cudaFreeHost(hostAllocd);
cudaFreeHost(junk);
cudaDeviceReset();
return 0;
}
|
14,056 | #include "SafeQ.cuh"
void praj::fileQueue::push(std::string &&str) {
_queue.emplace(str);
}
std::string praj::fileQueue::pop() {
//std::mutex k;
//std::lock_guard<std::mutex> p(k);
std::string str(_queue.front());
_queue.pop();
return str;
}
bool praj::fileQueue::empty() {
return _queue.empty();
}
|
14,057 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <vector>
#include <iostream>
#include <algorithm>
#include<chrono>
#include <thread>
using namespace std;
__global__ void myfirstkernel(void) {
printf("Hello CUDA \n");
}
__global__ void gpuAdd(int d_a, int d_b, int* d_c) {
*d_c = d_a + d_b;
}
bool candy(std::vector<int>& ratings) {
int size = ratings.size();
if (size < 2)
{
return size;
}
std::vector<int> num(size, 1);
for (int i = 1; i < size; i++) {
if (ratings[i] > ratings[i - 1])
num[i] = num[i-1]+1;
}
for (int i = size - 1; i > 0; --i) {
if (ratings[i] < ratings[i - 1])
num[i - 1] = std::max(num[i - 1], num[i] + 1);
}
for (auto i : num) {
std::cout << i << std::endl;
}
return true;
}
bool testcandey() {
std::vector<int> rating;
rating.reserve(5);
rating.push_back(1);
rating.push_back(0);
rating.push_back(2);
rating.push_back(3);
rating.push_back(4);
candy(rating);
return true;
}
vector<int> twosum(vector<int>& nums, int target) {
int l = 0, r = nums.size()-1, sum;
while (l<r)
{
sum = nums[l] + nums[r];
if (sum == target)
break;
if (sum<target)
{
l++;
}
else
{
r--;
}
}
return vector<int>{l, r};
}
bool testtwosum() {
vector<int> numbers({ 2,7,11,15 });
int target = 9;
vector<int> results = twosum(numbers, target);
for (auto result : results)
cout << "the two sum is " << result << endl;
return true;
}
int eraseOverlapIntervals(vector<vector<int>>& intervals) {
if (intervals.empty()) {
return 1;
}
int n = intervals.size();
sort(intervals.begin(), intervals.end(), [](vector<int> a, vector<int> b) {return a[1] < b[1]; });
int total = 0, prev = intervals[0][1];
for (int i = 1; i < intervals.size(); ++i)
{
if (intervals[i][0] < prev) {
++total;
}
else
{
prev = intervals[i][1];
}
}
return total;
}
bool testeraseOverlapIntervals(){
std::vector<vector<int>> intervals;
intervals.push_back({1,3});
intervals.push_back({2,3});
intervals.push_back({2,4});
intervals.push_back({4,6});
cout << "the total is " << eraseOverlapIntervals(intervals) << endl;
return true;
}
vector<int> merge(vector<int>& nums1,int m, vector<int>& nums2,int n) {
int pos = m + n-1;
vector<int> results;
results.reserve(pos);
m = m - 1;
n = n - 1;
while (m>=0 && n>=0)
{
if (nums1[m] > nums2[n])
{
results.push_back(nums1[m]);
m = m - 1;
}
else
{
results.push_back(nums2[n]);
n = n - 1;
}
}
while (m>=0)
{
results.push_back(nums1[m]);
m = m - 1;
}
while (n >= 0)
{
results.push_back(nums2[n]);
n = n - 1;
}
return results;
}
void testmerge() {
vector<int> nums1({ 1,2,3 });
vector<int> nums2({ 2,5,6 });
vector<int> results = merge(nums1,nums1.size(),nums2,nums2.size());
for (auto result : results)
cout << "the merge is " << result << endl;
}
struct Node
{
int val;
Node* next;
Node(int x) :val(x), next(nullptr) {
}
};
Node* detectCircle(Node* head) {
Node* fast = head, * slow = head;
do {
if (!fast || !fast->next) return nullptr;
fast = fast->next->next;
slow = slow->next;
} while (fast != slow);
fast = head;
while (fast!=slow)
{
fast = fast->next;
slow = slow->next;
}
return fast;
}
void testdetectCircle() {
Node* head = new Node(22);
Node* p=head;
for (int i = 0; i < 5; i++) {
Node* temp = new Node(i);
p->next = temp;
p = temp;
}
p->next = head->next->next;
p = head;
Node* pjoin = detectCircle(head);
cout << pjoin->val << endl;
//while (p)
//{
// cout << p->val << endl;
// p = p->next;
//}
}
string minWindow(string S, string T) {
vector<int> chars(128, 0);
vector<bool> flag(128, false);
//先统计T中的字符情况
for (int i = 0; i < T.size(); i++) {
flag[T[i]] = true;
++chars[T[i]];
}
//移动滑动窗口,不断更改统计数据
int cnt = 0, l = 0, min_l = 0, min_size = S.size() + 1;
for (int r = 0; r < S.size(); ++r) {
if (flag[S[r]]) {
if (--chars[S[r]] >= 0) {
++cnt;
}
}
//若目前滑动窗口已包含T中全部字符,
//则尝试将l右移,在不影响结果的情况下获得最短字符串
while (cnt == T.size())
{
if (r - l + 1 < min_size) {
min_l = l;
min_size = r - l + 1;
}
if (flag[S[l]] && ++chars[S[l]] > 0) {
--cnt;
}
++l;
}
}
return min_size > S.size() ? "" : S.substr(min_l, min_size);
}
void testminwindow() {
string s = "ADOBECODEBANC";
string t = "ABC";
string result = minWindow(s, t);
cout << "the result is " << result << endl;
}
int mysqrt(int a) {
if (a == 0)
return a;
int l = 0, r = a, mid,sqrt;
while (l<r)
{
mid = l + 0.5 * (r - l);
sqrt = a / mid;
if (sqrt == mid)
return mid;
else if (sqrt > mid)
l = mid;
else
r = mid;
}
return r;
}
void testsqrt()
{
//int a = 100;
int result = mysqrt(100);
cout << "the result is " << result << endl;
}
//区间查找
int lower_bound(vector<int>& nums, int target) {
int l = 0, r = nums.size(), mid;
while (l<r)
{
mid = (l + r) / 2;
if (nums[mid]>=target)
{
r = mid;
}
else
{
l = mid+1;
}
}
return l;
}
int upper_bound(vector<int>& nums, int target) {
int l = 0, r = nums.size(), mid;
while (l < r)
{
mid = (l + r) / 2;
if (nums[mid] > target)
{
r = mid;
}
else
{
l = mid + 1;
}
}
return l;
}
vector<int> searchRange(vector<int>& nums, int target) {
if (nums.empty()) return vector<int>{-1, -1};
int lower = lower_bound(nums, target);
int upper = upper_bound(nums, target) -1;
if (lower == nums.size() || nums[lower] != target) {
return vector<int>({ -1,-1 });
}
return vector<int>{lower, upper};
}
void testserachRange() {
vector<int> nums({ 5,7,7,8,8,10 });
vector<int> results = searchRange(nums, 8);
for (auto result : results)
cout << "the bound is " << result << endl;
}
//有序数组旋转二分查找
bool rotatevectorsearch(vector<int>& nums, int target) {
int l = 0, r = nums.size()-1, mid;
while (l<r)
{
mid = (l + r) / 2;
if (nums[mid] == target) {
return true;
}
if (nums[l] == nums[mid])
++l;
else if (nums[l]<nums[mid])
{
if (target > nums[l] && target < nums[mid])
r = mid - 1;
else
l = mid + 1;
}
else if (nums[mid] < nums[r])
{
if (target > nums[mid] && target < nums[r])
l = mid + 1;
else
r = mid - 1;
}
}
return false;
}
void testRotateVector() {
vector<int> nums({2,5,6,0,0,1,2});
bool result = rotatevectorsearch(nums, 10);
cout << "the result is " << result << endl;
}
int add(int x, int y) {
return x + y;
}
int callback(int x, int y, int(*func)(int, int)) {
return (*func)(x, y);
}
;
__global__ void normkernel(float* src, float* dst, int nums) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < nums) {
//cout << src[idx] << endl;
//dst[idx] = src[idx];
dst[idx] = src[idx * 3];
dst[nums + idx] = src[idx * 3 + 1];
dst[2 * nums + idx] = src[idx * 3 + 2];
printf("the dst valuse is %f \n", dst[idx]);
}
}
void testnormkernel() {
cudaError_t err = cudaSuccess;
int numElements = 20*20*3;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float* h_A = (float*)malloc(size);
// Allocate the host input vector B
float* h_B = (float*)malloc(size);
for (int i = 0; i < numElements; ++i) {
h_A[i] = float(i);
}
float* d_A = NULL;
err = cudaMalloc((void**)&d_A, size);
float* d_B = NULL;
err = cudaMalloc((void**)&d_B, size);
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
int threadsPerBlock = 256;
int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
normkernel <<<blocksPerGrid, threadsPerBlock >>> (d_A, d_B, numElements/3);
//normkernel << <21, 256 >> > (d_A, d_B, numElements);
err = cudaMemcpy(h_B, d_B, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < numElements; ++i) {
cout<<h_B[i]<<endl;
}
err = cudaFree(d_A);
err = cudaFree(d_B);
free(h_A);
free(h_B);
}
void add1(int a, int b, int& c, int thread) {
c = a + b;
}
int main(void) {
//static const int threads_nums = 10;
//std::thread threads[threads_nums];
//int c[threads_nums];
//for (int i = 0; i < threads_nums; i++)
// threads[i] = std::thread(add1, i, i, c[i], i);
//for (int i = 0; i < threads_nums; i++)
// threads[i].join();..
testnormkernel();
cin.get();
//myfirstkernel<<<4, 4>>>();
////printf("Hello CUDA \n");
//int h_c;
//int* d_c;
//cudaMalloc((void**)&d_c, sizeof(int));
//gpuAdd << <4, 4 >> > (1,4,d_c);
//cudaMemcpy(&h_c, d_c, sizeof(int), cudaMemcpyDeviceToHost);
//printf("the hc is %d", h_c);
//cudaFree(d_c);
testcandey();
testeraseOverlapIntervals();
testtwosum();
testmerge();
testdetectCircle();
testminwindow();
testsqrt();
testserachRange();
testRotateVector();
return 0;
} |
14,058 |
namespace nscv01{} |
14,059 | #include <math.h>
#include <float.h>
#include <cuda.h>
__global__ void gpu_Heat (float *h, float *g, int N) {
// TODO: kernel computation
//...
/*int tidx = blockIdx.x*blockDim.x+threadIdx.x;
int tidy = blockIdx.y*blockDim.y+threadIdx.y;
if( tidx > 0 && tidx < (N-1) && tidy > 0 && tidy < (N-1) )
g[tidx*N+tidy] = 0.25*( h[tidx*N+(tidy-1)]
+ h[tidx*N+(tidy+1)]
+ h[(tidx-1)*N+tidy]
+ h[(tidx+1)*N+tidy] );
*/
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
if( row > 0 && row < (N-1) && col > 0 && col < (N-1) )
g[row*N+col] = 0.25f *( h[row*N+(col-1)]
+ h[row*N+(col+1)]
+ h[(row-1)*N+col]
+ h[(row+1)*N+col] );
}
|
14,060 | #include "includes.h"
__global__ void reduceSingle(int *idata, int *single, int nrows)
{
// Reduce rows to the first element in each row
int i;
extern __shared__ int parts[];
// Each block gets a row, each thread will reduce part of a row
// Calculate our offset into the row
// The number of cols per thread
// Sum my part of one dimensional array and put it shared memory
parts[threadIdx.x] = 0;
for (i = threadIdx.x; i < nrows; i+=blockDim.x) {
parts[threadIdx.x] += idata[i];
}
int tid = threadIdx.x;
if (tid < 512) { parts[tid] += parts[tid + 512];}
__syncthreads();
if (tid < 256) { parts[tid] += parts[tid + 256];}
__syncthreads();
if (tid < 128) { parts[tid] += parts[tid + 128];}
__syncthreads();
if (tid < 64) { parts[tid] += parts[tid + 64];}
__syncthreads();
if (tid < 32) { parts[tid] += parts[tid + 32];}
__syncthreads();
if(threadIdx.x == 0) {
*single = 0;
for(i = 0; i < 32; i++) {
*single += parts[i];
}
}
} |
14,061 | //xfail:BOOGIE_ERROR
//--blockDim=1024 --gridDim=1 --no-inline
//error: possible null pointer access
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#define N 2//8
#define tid (blockIdx.x * blockDim.x + threadIdx.x)
__device__ float multiplyByTwo(float *v, unsigned int index)
{
return v[index] * 2.0f;
}
__device__ float divideByTwo(float *v, unsigned int index)
{
return v[index] * 0.5f;
}
typedef float(*funcType)(float*, unsigned int);
__global__ void foo(float *v)
{
funcType f = (funcType)3; // it's a null pointer
v[tid] = (*f)(v, tid);
}
int main(){
float* w;
float* dev_w;
int size = N*sizeof(float);
w =(float*) malloc(size);
for (int i = 0; i < N; ++i){
w[i] = i;
}
cudaMalloc((void**)&dev_w, size);
cudaMemcpy(dev_w,w, size,cudaMemcpyHostToDevice);
foo <<<1,N>>>(dev_w);
//ESBMC_verify_kernel_f(foo, 1, N, dev_w);
cudaMemcpy(w,dev_w,size,cudaMemcpyDeviceToHost);
printf("\nw:");
for (int i = 0; i < N; ++i){
printf(" %f ", w[i]);
// assert(!(w[i] == i));
}
//printf ("\n (float) functype: %f", divideByTwo)//3.5;
free(w);
cudaFree(dev_w);
return 0;
}
|
14,062 | //#include <hayai/hayai.hpp>
//
//#include "fixed-HAMT.cuh"
//
//#include "concurrent-xfasttrie-fixture.cu"
//
//using HAMT5 = HAMT<key_type, mapped_type, 5>;
//using HAMT5InsertionFixture = XTrieInsertionFixture<HAMT5, Structure::XFASTTRIE>;
//using HAMT5GetWarpFixture = XTrieGetWarpFixture<HAMT5, Structure::XFASTTRIE>;
//using HAMT5PredecessorFixture = XTriePredecessorFixture<HAMT5, Structure::XFASTTRIE, true>;
//
//BENCHMARK_F(HAMT5InsertionFixture, HAMT5, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS)
//{
// insert();
//}
///*
//BENCHMARK_F(HAMT5GetWarpFixture, GetWarpHAMT5, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS)
//{
// get_warp();
//}
//
//BENCHMARK_F(HAMT5PredecessorFixture, PredecessorHAMT5, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS)
//{
// predecessor();
//}*/
//
//using HAMT6 = HAMT<key_type, mapped_type, 6>;
//using HAMT6InsertionFixture = XTrieInsertionFixture<HAMT6, Structure::XFASTTRIE>;
//using HAMT6GetWarpFixture = XTrieGetWarpFixture<HAMT6, Structure::XFASTTRIE>;
//using HAMT6PredecessorFixture = XTriePredecessorFixture<HAMT6, Structure::XFASTTRIE, true>;
//
//BENCHMARK_F(HAMT6InsertionFixture, HAMT6, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS)
//{
// insert();
//}
///*
//BENCHMARK_F(HAMT6GetWarpFixture, GetWarpHAMT6, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS)
//{
// get_warp();
//}
//
//BENCHMARK_F(HAMT6PredecessorFixture, PredecessorHAMT6, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS)
//{
// predecessor();
//}*/
|
14,063 | #include "includes.h"
__device__ int position; //index of the largest value
__device__ int largest; //value of the largest value
int lenString = 593;
int maxNumStrings = 1000000;
int threshold = 2;
__device__ void cuda_select(int *db, int size) {
int my_id = blockDim.x * blockIdx.x + threadIdx.x;
if(my_id < size) {
if(db[2 * my_id] > db[2 * my_id + 1])
db[my_id] = db[2 * my_id];
else
db[my_id] = db[2 * my_id + 1];
}
}
__global__ void select(int *db, int size) {
int height = (int)ceil(log2((double)size));
int i = 0;
for(i = 0; i < height; i++) {
size = (int)ceil((double) size/2);
cuda_select(db, size);
}
largest = db[0];
} |
14,064 | #include "includes.h"
__global__ void generateCurve(float t, float dx, float* out, const float ZMIN, const size_t ZSIZE) {
int offset = blockIdx.x * blockDim.x + threadIdx.x;
float z = ZMIN + offset * dx;
if (offset < ZSIZE) {
out[3 * offset] = cos(z * t + t) / z;
out[3 * offset + 1] = sin(z * t + t) / z;
out[3 * offset + 2] = z + 0.1 * sin(t);
}
} |
14,065 | #include <stdio.h>
#include <stdlib.h>
#include <inttypes.h>
#include <math.h>
#include <cuda.h>
__global__ void gpudistancekernel(float *gpudistance, unsigned int width, unsigned int height, float pinholedist){
int xcoord = blockIdx.x * blockDim.x + threadIdx.x;
int ycoord = blockIdx.y * blockDim.y + threadIdx.y;
int offset = ycoord * width + xcoord;
float pixelsize;
pixelsize = 6.45;
offset = ( ycoord * width) + xcoord;
float xcontrib, ycontrib;
xcontrib = (xcoord - (width /2)) * pixelsize;
ycontrib = (ycoord - (height /2)) * pixelsize;
gpudistance[offset] = sqrtf( (xcontrib * xcontrib) + (ycontrib * ycontrib) + (pinholedist * pinholedist) );
}
extern "C" int gpudistancegrid(float *dist2pinhole,float pinholedist, unsigned int width, unsigned int height){
dim3 gridinblocks, blockinthreads;
unsigned int totalthreadsperblock;
blockinthreads.x = width/87;
blockinthreads.y = height/65;
blockinthreads.z = 1;
totalthreadsperblock = blockinthreads.x * blockinthreads.y * blockinthreads.z;
if(totalthreadsperblock > 1024){
printf("Error in your thread config.\n");
return(1);
}
gridinblocks.x = (width / blockinthreads.x);
gridinblocks.y = (height / blockinthreads.y);
float *gpudistance;
cudaMalloc(&gpudistance, sizeof(float) * width * height);
gpudistancekernel<<<gridinblocks, blockinthreads>>>(gpudistance, width, height, pinholedist);
cudaDeviceSynchronize();
//printf("Errors after running kernel(?): %s\n", cudaGetErrorString(cudaGetLastError()));
cudaMemcpy(dist2pinhole, gpudistance, sizeof(float) * width * height, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//printf("Errors after copying data back to host(?): %s\n", cudaGetErrorString(cudaGetLastError()));
cudaFree(gpudistance);
cudaDeviceReset();
return(0);
}
|
14,066 | #include "includes.h"
using namespace std;
const int MAX = 100;
__global__ void Cholesky_GPU(double *matrix, int n){
//n threads running in parallel
//int x = blockIdx.x;
int y = threadIdx.x;
//int i = x;
int j = y;
extern __device__ __shared__ double localMatrix[];
// extern __device__ __shared__ double sum[];
//matrix2d[x][y] = matrix1d[x*n+y]
//Copy to shared mem
for(int i=0; i<n; i++)
localMatrix[i*n+j] = matrix[i*n+j];
localMatrix[n*n+j] = 0; // sum column
__syncthreads();
//Do the calc;
#pragma unroll
for(int i=0; i<n; i++){
if(j<i){
localMatrix[i*n+j] = 0;
}
if(j>=i) {
localMatrix[n*n+j]=0;//initialize sum to 0
for(int k=0; k<i; k++)
localMatrix[n*n+j] +=localMatrix[k*n+i]*localMatrix[k*n+j]; // sums
//if(j<i){
// localMatrix[i*n+j]=0;
//}
if(i == j){
localMatrix[i*n+j] = sqrt(localMatrix[i*n+j] - localMatrix[n*n+j]);
}if(j > i){
localMatrix[i*n+j] = (localMatrix[i*n+j] - localMatrix[n*n+j])/localMatrix[i*n+i];
}
}
}
__syncthreads();
for(int i=0; i<n; i++)
matrix[i*n+j] = localMatrix[i*n+j];
//Copy back
} |
14,067 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
//M and N number of threads (grid and block)
void secuential(const int a[] ,const int b[], unsigned long int c[], const int sqrt_dim);
__global__ void multiply( const int a[] ,const int b[], unsigned long int c[] , const int sqrt_dim,const int thread_number)
{
unsigned long int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
unsigned long int index = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x) + threadIdx.x;
//convert global index to column and row (index_i and index_j) of matrix
unsigned long int index_i = index < sqrt_dim ? index : (int)index%sqrt_dim;
unsigned long int index_j = (index-index_i)/sqrt_dim;
unsigned int dim=sqrt_dim*sqrt_dim;
//printf("index= %i \t", index);
if(index<dim){
c[index]=0;
if(dim<=thread_number){ //if more threads than array size
//printf("Thread %i; Modifying value of index %i\n ", index, index);
c[index]= b[index]; //c= b
c[index]+= a[index_j+ index_i * sqrt_dim]; //c+= a^t
for(int i=0; i<sqrt_dim;i++){ //row of first matrix
c[index]+=a[i+index_j * sqrt_dim ]*b[i + index_i*sqrt_dim]; //c+= a*b^t
}
}
else{ //if less threads than array size
if(index!=thread_number-1){//if not last thread deal with size_array/thread_nb array entries
for(unsigned long int i=index*(int)(dim/thread_number); i< index*(int)(dim/thread_number)+(int)(dim/thread_number); i++){
//printf("Thread %i; Modifying value of index %i \n", index, i);
index_i = (int)i%sqrt_dim;
index_j = (i-index_i)/sqrt_dim;
c[i]= b[i]; //c= b
c[i]+= a[index_j+ index_i * sqrt_dim]; //c+= a^t
for(unsigned long int j=0; j<sqrt_dim;j++){ //row of first matrix
c[i]+=a[j+index_j * sqrt_dim ]*b[j+ index_i*sqrt_dim]; //c+= a*b^t
}
}
}
else{ //if last thread deal with all remaining array entries
for(unsigned long int i=index*(int)(dim/thread_number); i< dim; i++){
//printf("Thread %i; Modifying value of index %i\n",index, i );
index_i = (int)i%sqrt_dim;
index_j = (i-index_i)/sqrt_dim;
c[i]= b[i]; //c= b
c[i]+= a[index_j+ index_i * sqrt_dim]; //c+= a^t
for(unsigned long int j=0;j<sqrt_dim;j++){ //row of first matrix
c[i]+=a[j+index_j * sqrt_dim ]*b[j + index_i*sqrt_dim]; //c+= a*b^t
}
}
}
}
}
}
int main(int argc, char *argv[]){
//Measure time
clock_t time_begin;
// pointers to host & device arrays
int *d_array1 = 0,*d_array2 = 0; unsigned long int *d_array3 = 0;
int *h_array1 = 0,*h_array2 = 0;unsigned long int*h_array3 = 0;
unsigned long int *h_array_sec= 0;
unsigned int size_array=191*191; //here, size_array =L has to be a square
bool verbose=false;
int N=1;
if(argc == 3){
size_array=atoi(argv[1]) * atoi(argv[1]) ;
N=atoi(argv[2]);
}
else if(argc==4){
size_array=atoi(argv[1]) * atoi(argv[1]) ;
N=atoi(argv[2]);
verbose=(argv[3][0]=='v');
}
// malloc columns of host arrays
h_array1 = (int*)malloc( size_array * sizeof(int));
h_array_sec= (unsigned long int*)malloc( size_array * sizeof(unsigned long int));
h_array2 = (int*)malloc( size_array * sizeof(int));
h_array3 = (unsigned long int*)malloc( size_array * sizeof(unsigned long int));
//printf("Array A:\n");
for(unsigned long int i=0; i<size_array; i++){
h_array1[i]=1;//rand()%10;
// printf("%i\t", h_array1[i]);
//if((i+1)%(int)sqrt((float)size_array)==0)
// printf("\n");
}
//printf("\n");
//printf("Array B:\n");
for(unsigned int i=0; i<size_array; i++){
h_array2[i]=1;//rand()%10;
//printf("%i\t", h_array2[i]);
//if((i+1)%(int)sqrt((float)size_array)==0)
// printf("\n");
}
//printf("\n");
// cudaMalloc a device array
cudaMalloc(&d_array1,size_array * sizeof(int));
cudaMalloc(&d_array2,size_array * sizeof(int));
cudaMalloc(&d_array3,size_array * sizeof(unsigned long int));
// download and inspect the result on the host:
cudaMemcpy(d_array1, h_array1, sizeof(int)*size_array, cudaMemcpyHostToDevice);
cudaMemcpy(d_array2, h_array2, sizeof(int)*size_array, cudaMemcpyHostToDevice);
dim3 bloque(N,N); //Bloque bidimensional de N*N hilos (max 512 threads in a block)
dim3 grid(1,1); //Grid bidimensional de M*M bloques
int thread_number= N*N;
if (N*N > 512){
bloque.x = 512;
bloque.y = 512;
grid.x = ceil(double(N)/double(bloque.x));
grid.y = ceil(double(N)/double(bloque.y));
}
printf("%i threads, %ix%i matrix\n", thread_number, (int)sqrt((float)size_array), (int)sqrt((float)size_array));
time_begin=clock();
multiply<<<grid, bloque>>>(d_array1, d_array2 , d_array3,sqrt((float)size_array), thread_number);
cudaThreadSynchronize();
// download and inspect the result on the host:
cudaMemcpy(h_array3, d_array3, sizeof(unsigned long int)*size_array, cudaMemcpyDeviceToHost);
//printf("GPU time: %f seconds\n", clock() - time_begin);
//windows time
printf("GPU time, %i threads: %f seconds\n", thread_number,(((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); //1.18s
if(verbose){
printf("Array C=B + AB^t + A^t :\n");
for(int i=0; i<size_array; i++){
printf("%i\t", h_array3[i]);
if((i+1)%(int)(sqrt((float)size_array))==0)
printf("\n");
}
printf("\n");
}
time_begin=clock();
secuential(h_array1, h_array2, h_array_sec, sqrt((float)size_array));
//printf("CPU time: %f seconds\n", clock() - time_begin);
//windows time
printf("CPU time: %f seconds\n", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); //1.18s
// deallocate memory
bool b=true;
for(int i=0; i<size_array; i++){
if(h_array_sec[i] != h_array3[i]){
printf("GPU and CPU have different results (at least) at position %i\n", i);
b=false;
break;
}
}
if(b)
printf("GPU and CPU have the same results\n");
free(h_array3); free(h_array2); free(h_array1); free(h_array_sec);
cudaFree(d_array3);cudaFree(d_array2);cudaFree(d_array1);
system("pause");
}
void secuential(const int a[] ,const int b[], unsigned long int c[], const int sqrt_dim){
int dim = sqrt_dim* sqrt_dim;
int index_i, index_j;
//int *c= (int *)malloc ( dim * sizeof(int));
for(int i=0; i< dim; i++){
index_i = (int)i%sqrt_dim;
index_j = (i-index_i)/sqrt_dim;
c[i]= b[i]; //c= b
c[i]+= a[index_j+ index_i * sqrt_dim]; //c+= a^t
for(int j=0;j<sqrt_dim;j++){ //row of first matrix
c[i]+=a[j+index_j * sqrt_dim ]*b[j + index_i*sqrt_dim]; //c+= a*b^t
}
}
/*printf("Sequential result: Array C=B + AB^t + A^t :\n");
for(int i=0; i<dim; i++){
printf("%i\t", c[i]);
if((i+1)%(int)(sqrt((float)dim))==0)
printf("\n");
}
printf("\n");*/
//free(c);
}
|
14,068 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <iostream>
#include <ctype.h>
#include <vector>
#include <string>
typedef std::vector<double> double_vec;
int main()
{
double_vec aapl, msft;
std::string values;
std::string delimiter = " ";
while (true)
{
std::getline(std::cin, values);
if (!isdigit(values[0]))
{
break;
}
else
{
std::string first_val = values.substr(0, values.find(delimiter));
aapl.push_back(std::stod(first_val));
std::string second_val = values.erase(0, values.find(delimiter) + delimiter.length());
msft.push_back(std::stod(second_val));
}
}
thrust::host_vector<double> host_aapl(int(aapl.size())), host_msft(int(msft.size()));
host_aapl = aapl;
host_msft = msft;
thrust::device_vector<double> dev_aapl(host_aapl), dev_msft(host_msft), dev(int(aapl.size()), 0);
thrust::transform(
dev_aapl.begin(),
dev_aapl.end(),
dev_msft.begin(),
dev.begin(),
thrust::minus<double>());
double sumDiffs = thrust::reduce(dev.begin(), dev.end(), 0.0, thrust::plus<double>());
double avgDiffs = sumDiffs / int(aapl.size());
std::cout << "Diferença média entre AAPL e MSFT nos últimos 10 anos: US$ " << avgDiffs << std::endl;
} |
14,069 | #include <iostream>
#include <cuda.h>
#include <stdio.h>
using std::cout;
using std::endl;
__global__ void my_kernel(float mypi)
{
printf("Printf hello from the kernel!!\n");
printf("I'm in thread %i \n", threadIdx.x);
printf("Someone sent me %f \n", mypi);
}
int main(int argc, char *argv[])
{
cout << "Hello world!! I will call a CUDA kernel now!!" << endl;
my_kernel<<<1,1,0>>>(3.1415f);
cudaDeviceSynchronize();
return 0;
}
|
14,070 | #include "includes.h"
__device__ void init_vectors(short *vec, const int vec_length) {
for (int i = threadIdx.x; i < vec_length; i = i + blockDim.x) {
vec[i] = 0;
}
}
__global__ void init_vectors(int *vec, const int vec_length) {
for (int i = threadIdx.x; i < vec_length; i = i + blockDim.x) {
vec[i] = 0;
}
} |
14,071 | /* -*- mode: C; tab-width: 2; indent-tabs-mode: nil; -*- */
/*
* This code has been contributed by the DARPA HPCS program. Contact
* David Koester <dkoester@mitre.org> or Bob Lucas <rflucas@isi.edu>
* if you have questions.
*
*
* GUPS (Giga UPdates per Second) is a measurement that profiles the memory
* architecture of a system and is a measure of performance similar to MFLOPS.
* The HPCS HPCchallenge RandomAccess benchmark is intended to exercise the
* GUPS capability of a system, much like the LINPACK benchmark is intended to
* exercise the MFLOPS capability of a computer. In each case, we would
* expect these benchmarks to achieve close to the "peak" capability of the
* memory system. The extent of the similarities between RandomAccess and
* LINPACK are limited to both benchmarks attempting to calculate a peak system
* capability.
*
* GUPS is calculated by identifying the number of memory locations that can be
* randomly updated in one second, divided by 1 billion (1e9). The term "randomly"
* means that there is little relationship between one address to be updated and
* the next, except that they occur in the space of one half the total system
* memory. An update is a read-modify-write operation on a table of 64-bit words.
* An address is generated, the value at that address read from memory, modified
* by an integer operation (add, and, or, xor) with a literal value, and that
* new value is written back to memory.
*
* We are interested in knowing the GUPS performance of both entire systems and
* system subcomponents --- e.g., the GUPS rating of a distributed memory
* multiprocessor the GUPS rating of an SMP node, and the GUPS rating of a
* single processor. While there is typically a scaling of FLOPS with processor
* count, a similar phenomenon may not always occur for GUPS.
*
* Select the memory size to be the power of two such that 2^n <= 1/2 of the
* total memory. Each CPU operates on its own address stream, and the single
* table may be distributed among nodes. The distribution of memory to nodes
* is left to the implementer. A uniform data distribution may help balance
* the workload, while non-uniform data distributions may simplify the
* calculations that identify processor location by eliminating the requirement
* for integer divides. A small (less than 1%) percentage of missed updates
* are permitted.
*
* When implementing a benchmark that measures GUPS on a distributed memory
* multiprocessor system, it may be required to define constraints as to how
* far in the random address stream each node is permitted to "look ahead".
* Likewise, it may be required to define a constraint as to the number of
* update messages that can be stored before processing to permit multi-level
* parallelism for those systems that support such a paradigm. The limits on
* "look ahead" and "stored updates" are being implemented to assure that the
* benchmark meets the intent to profile memory architecture and not induce
* significant artificial data locality. For the purpose of measuring GUPS,
* we will stipulate that each thread is permitted to look ahead no more than
* 1024 random address stream samples with the same number of update messages
* stored before processing.
*
* The supplied MPI-1 code generates the input stream {A} on all processors
* and the global table has been distributed as uniformly as possible to
* balance the workload and minimize any Amdahl fraction. This code does not
* exploit "look-ahead". Addresses are sent to the appropriate processor
* where the table entry resides as soon as each address is calculated.
* Updates are performed as addresses are received. Each message is limited
* to a single 64 bit long integer containing element ai from {A}.
* Local offsets for T[ ] are extracted by the destination processor.
*
* If the number of processors is equal to a power of two, then the global
* table can be distributed equally over the processors. In addition, the
* processor number can be determined from that portion of the input stream
* that identifies the address into the global table by masking off log2(p)
* bits in the address.
*
* If the number of processors is not equal to a power of two, then the global
* table cannot be equally distributed between processors. In the MPI-1
* implementation provided, there has been an attempt to minimize the differences
* in workloads and the largest difference in elements of T[ ] is one. The
* number of values in the input stream generated by each processor will be
* related to the number of global table entries on each processor.
*
* The MPI-1 version of RandomAccess treats the potential instance where the
* number of processors is a power of two as a special case, because of the
* significant simplifications possible because processor location and local
* offset can be determined by applying masks to the input stream values.
* The non power of two case uses an integer division to determine the processor
* location. The integer division will be more costly in terms of machine
* cycles to perform than the bit masking operations
*
* For additional information on the GUPS metric, the HPCchallenge RandomAccess
* Benchmark,and the rules to run RandomAccess or modify it to optimize
* performance -- see http://icl.cs.utk.edu/hpcc/
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#define DEFAULT_LOGN 20
#define POLY 0x0000000000000007ULL
union benchtype {
uint64_t u64;
uint2 u32;
};
static __constant__ uint64_t c_m2[64];
static __device__ uint32_t d_error[1];
static __global__ void
d_init(size_t n, benchtype *t)
{
for (ptrdiff_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += gridDim.x * blockDim.x) {
t[i].u64 = i;
}
}
static __device__ uint64_t
d_starts(size_t n)
{
if (n == 0) {
return 1;
}
int i = 63 - __clzll(n);
uint64_t ran = 2;
while (i > 0) {
uint64_t temp = 0;
for (int j = 0; j < 64; j++) {
if ((ran >> j) & 1) {
temp ^= c_m2[j];
}
}
ran = temp;
i -= 1;
if ((n >> i) & 1) {
ran = (ran << 1) ^ ((int64_t) ran < 0 ? POLY : 0);
}
}
return ran;
}
enum atomictype_t {
ATOMICTYPE_CAS,
ATOMICTYPE_XOR,
};
template<atomictype_t ATOMICTYPE>
__global__ void
d_bench(size_t n, benchtype *t)
{
size_t num_threads = gridDim.x * blockDim.x;
size_t thread_num = blockIdx.x * blockDim.x + threadIdx.x;
size_t start = thread_num * 4 * n / num_threads;
size_t end = (thread_num + 1) * 4 * n / num_threads;
benchtype ran;
ran.u64 = d_starts(start);
for (ptrdiff_t i = start; i < end; ++i) {
ran.u64 = (ran.u64 << 1) ^ ((int64_t) ran.u64 < 0 ? POLY : 0);
switch (ATOMICTYPE) {
case ATOMICTYPE_CAS:
unsigned long long int *address, old, assumed;
address = (unsigned long long int *)&t[ran.u64 & (n - 1)].u64;
old = *address;
do {
assumed = old;
old = atomicCAS(address, assumed, assumed ^ ran.u64);
} while (assumed != old);
break;
case ATOMICTYPE_XOR:
atomicXor(&t[ran.u64 & (n - 1)].u32.x, ran.u32.x);
atomicXor(&t[ran.u64 & (n - 1)].u32.y, ran.u32.y);
break;
}
}
}
static __global__ void
d_check(size_t n, benchtype *t)
{
for (ptrdiff_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += gridDim.x * blockDim.x) {
if (t[i].u64 != i) {
atomicAdd(d_error, 1);
}
}
}
static void
starts()
{
uint64_t m2[64];
uint64_t temp = 1;
for (ptrdiff_t i = 0; i < 64; i++) {
m2[i] = temp;
temp = (temp << 1) ^ ((int64_t) temp < 0 ? POLY : 0);
temp = (temp << 1) ^ ((int64_t) temp < 0 ? POLY : 0);
}
cudaMemcpyToSymbol(c_m2, m2, sizeof(m2));
}
int
main(int argc, char *argv[])
{
size_t n = 0;
if (argc > 1) {
int logn = atoi(argv[1]);
if (logn >= 0) {
n = (size_t) 1 << logn;
}
}
if (n <= 0) {
n = (size_t) 1 << DEFAULT_LOGN;
}
printf("Total table size = %llu (%llu bytes.)\n",
n, n * sizeof(uint64_t));
starts();
int ndev;
cudaGetDeviceCount(&ndev);
int dev = 0;
if (argc > 2) {
dev = atoi(argv[2]);
}
if (dev < 0 || dev >= ndev) {
dev = 0;
}
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, dev);
cudaSetDevice(dev);
printf("Using GPU %d of %d GPUs.\n", dev, ndev);
printf("Warp size = %d.\n", prop.warpSize);
printf("Multi-processor count = %d.\n", prop.multiProcessorCount);
printf("Max threads per multi-processor = %d.\n",
prop.maxThreadsPerMultiProcessor);
benchtype *d_t;
if (cudaMalloc((void **)&d_t, n * sizeof(benchtype)) != cudaSuccess) {
fprintf(stderr, "Memory allocation failed!\n");
exit(-1);
}
dim3 grid(prop.multiProcessorCount *
(prop.maxThreadsPerMultiProcessor / prop.warpSize));
dim3 thread(prop.warpSize);
cudaEvent_t begin, end;
cudaEventCreate(&begin);
cudaEventCreate(&end);
d_init<<<grid, thread>>>(n, d_t);
cudaEventRecord(begin);
cudaEventSynchronize(begin);
d_bench<ATOMICTYPE_CAS><<<grid, thread>>>(n, d_t);
cudaEventRecord(end);
cudaEventSynchronize(end);
float ms;
cudaEventElapsedTime(&ms, begin, end);
cudaEventDestroy(end);
cudaEventDestroy(begin);
double time = ms * 1.0e-3;
printf("Elapsed time = %.6f seconds.\n", time);
double gups = 4 * n / (double) ms * 1.0e-6;
printf("Giga Updates per second = %.6f GUP/s.\n", gups);
d_bench<ATOMICTYPE_CAS><<<grid, thread>>>(n, d_t);
void *p_error;
cudaGetSymbolAddress(&p_error, d_error);
cudaMemset(d_error, 0, sizeof(uint32_t));
d_check<<<grid, thread>>>(n, d_t);
uint32_t h_error;
cudaMemcpy(&h_error, p_error, sizeof(uint32_t), cudaMemcpyDeviceToHost);
printf("Verification: Found %u errors.\n", h_error);
cudaFree(d_t);
return 0;
}
|
14,072 | #include <cuda.h>
#include <cuda_runtime.h>
extern "C" {
void get_dev_mem(size_t& total, size_t& free)
{
cuMemGetInfo(&free, &total);
}
}
|
14,073 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void unique_idx_calc_threadIdx(int *input) {
int tid = threadIdx.x;
printf("threadIdx: %d, value: %d, \n", tid, input[tid]);
}
__global__ void unique_gid_calculation(int *input) {
int tid = threadIdx.x;
int offset = blockIdx.x * blockDim.x;
int gid = tid + offset;
printf("blockIdx.x: %d, threadIdx.x: %d, gid: %d, value : %d \n", blockIdx.x,
tid, gid, input[gid]);
}
int main() {
int array_size = 16;
int array_byte_size = sizeof(int) * array_size;
int h_data[] = {23, 9, 4, 53, 65, 12, 1, 33, 87, 45, 23, 12, 342, 56, 44, 99};
for (int i = 0; i < array_size; i++) {
printf("%d ", h_data[i]);
}
printf("\n \n");
int *d_data;
cudaMalloc((void **)&d_data, array_byte_size);
cudaMemcpy(d_data, h_data, array_byte_size, cudaMemcpyHostToDevice);
dim3 block(4);
dim3 grid(4);
// unique_idx_calc_threadIdx<<<grid, block>>>(d_data);
unique_gid_calculation<<<grid, block>>>(d_data);
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
} |
14,074 | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
__global__ void my_first_kernel(float *x) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
x[tid] = float(threadIdx.x);
}
int main() {
float *h_ptr, *d_ptr;
int blocks, threads, size, n;
blocks = 2;
threads = 8;
size = blocks * threads;
h_ptr = (float*)malloc(sizeof(float) * size);
cudaMalloc((void**)&d_ptr, size * sizeof(float));
my_first_kernel<<<blocks, threads>>>(d_ptr);
cudaDeviceSynchronize();
cudaMemcpy(h_ptr, d_ptr, size * sizeof(float), cudaMemcpyDeviceToHost);
for (n = 0; n < size; n++) {
printf("%d %f\n", n, h_ptr[n]);
}
free(h_ptr);
cudaFree(d_ptr);
return 0;
}
|
14,075 | #include <stdio.h>
int main()
{
cudaDeviceProp prop;
int count;
int driver_version;
int runtime_version;
cudaGetDeviceCount(&count);
for(int i = 0; i < count; i++)
{
cudaGetDeviceProperties(&prop, i);
cudaDriverGetVersion(&driver_version);
cudaRuntimeGetVersion(&runtime_version);
printf("\n------------------------Device ID: %d (general info)------------------------\n", i);
printf("Device name: %s\n", prop.name);
printf("Driver version: %d.%d\n", driver_version/1000, (driver_version%100)/10);
printf("Runtime version: %d.%d\n", runtime_version/1000, (runtime_version%100)/10);
printf("Compute capability version: %d.%d\n", prop.major, prop.minor);
printf("Clock rate: %.0f MHz\n", prop.clockRate * 1e-3f);
printf("Concurrent kernels: %s\n", prop.concurrentKernels? "Yes":"No");
#if CUDART_VERSION >= 5000
printf("Concurrent copy and kernel execution: %s, with: %d copy engines\n", (prop.deviceOverlap? "Yes":"No"), prop.asyncEngineCount);
#endif
printf("Kernel execution timeout: %s\n", prop.kernelExecTimeoutEnabled? "Yes":"No");
printf("Integrated GPU sharing host memory: %s\n", prop.integrated? "Yes":"No");
printf("Support host page locked memory mapping: %s\n", prop.canMapHostMemory? "Yes":"No");
printf("\n------------------------Device ID: %d (memory info)-------------------------\n", i);
#if CUDART_VERSION >= 5000
printf("Memory clock rate: %f Mhz\n", prop.memoryClockRate*10e-7);
printf("Memory bus width: %d-bit\n", prop.memoryBusWidth);
#endif
printf("Total global memory: %lf Mbytes\n", prop.totalGlobalMem/1048576.0);
printf("Total constant memory: %ld bytes\n", prop.totalConstMem);
printf("Max memory pitch: %ld bytes\n", prop.memPitch);
printf("\n------------------------Device ID: %d (MP info)-----------------------------\n", i);
printf("Multiprocessor count: %d\n", prop.multiProcessorCount);
printf("Shared memory per block: %ld bytes\n", prop.sharedMemPerBlock);
printf("Registers per block: %d\n", prop.regsPerBlock);
printf("Threads per warp: %d\n", prop.warpSize);
#if CUDART_VERSION >= 5000
printf("Max threads per multiprocessor: %d\n", prop.maxThreadsPerMultiProcessor);
#endif
printf("Max threads per block: %d\n", prop.maxThreadsPerBlock);
printf("Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("Max grid dimensions: (%d, %d, %d)\n\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
}
return 1;
}
|
14,076 | // nvcc backGroundSubtraction.cu -o temp.exe -lm
#include <math.h>
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
// size of vector
#define M 7 // Number of frames
#define N 10 // Number of pixels per frame
#define BLOCK 128 // Size of blocks, best if it is a power of 2.
// Globals
int *BlockOfFrames_CPU, *BlockOfFrames_GPU;
float *MeanFrame_CPU, *MeanFrame_GPU;
float *BlockOfLogNormalFrames_CPU, *BlockOfLogNormalFrames_GPU;
float *MeanLogNormalFrame_CPU, *MeanLogNormalFrame_GPU;
float *MedianLogNormalFrame_CPU, *MedianLogNormalFrame_GPU;
float *StdvLogNormalFrame_CPU, *StdvLogNormalFrame_GPU;
dim3 dimBlock, dimGrid;
void AllocateMemory()
{
// This are the set of frames that will be used to generate the log normal frame
// and the standard deviation frame
BlockOfFrames_CPU = (int *)malloc(N*M*sizeof(int));
BlockOfLogNormalFrames_CPU = (float *)malloc(N*M*sizeof(float)); //Can remove after debug
cudaMalloc((void**)&BlockOfFrames_GPU,N*M*sizeof(int));
cudaMalloc((void**)&BlockOfLogNormalFrames_GPU,N*M*sizeof(float));
// Will hold the log normal frame and the standard deviation of the frames minus the log normal
MeanFrame_CPU = (float *)malloc(N*sizeof(float));
MeanLogNormalFrame_CPU = (float *)malloc(N*sizeof(float));
MedianLogNormalFrame_CPU = (float *)malloc(N*sizeof(float));
StdvLogNormalFrame_CPU = (float *)malloc(N*sizeof(float));
cudaMalloc((void**)&MeanFrame_GPU, N*sizeof(float));
cudaMalloc((void**)&MeanLogNormalFrame_GPU, N*sizeof(float));
cudaMalloc((void**)&MedianLogNormalFrame_GPU, N*sizeof(float));
cudaMalloc((void**)&StdvLogNormalFrame_GPU, N*sizeof(float));
}
/*
However you get 300,000 by 80 pixels loaded in here then CUDA will do the rest.
This is loading the big vector from 1st 300,000 then from 2nd 300,000 and so on until frame 80.
It may be faster to load the pixels the other way 80 first pixels then 80 second pixels and so on 300000 times.
Test it and see.
I just load (below) some small values to check that everything is working.
M is the number of frames and N is the number of pixels per frame
*/
void loadPixels()
{
for(int i = 0; i < M; i++)
{
for(int j = 0; j < N; j++)
{
BlockOfFrames_CPU[j +i*N] = j+i;
}
}
for(int j = 0; j < N; j++)
{
MeanFrame_CPU[j] = -1.0;
MeanLogNormalFrame_CPU[j] = -1.0;
MedianLogNormalFrame_CPU[j] = -1.0;
StdvLogNormalFrame_CPU[j] = -1.0;
}
}
void SetUpCudaDevices()
{
dimBlock.x = BLOCK;
dimBlock.y = 1;
dimBlock.z = 1;
dimGrid.x = ((N-1)/BLOCK)+1;
dimGrid.y = 1;
dimGrid.z = 1;
}
void copyFramessUp()
{
cudaMemcpyAsync(BlockOfFrames_GPU, BlockOfFrames_CPU, N*M*sizeof(int), cudaMemcpyHostToDevice);
}
__global__ void creatingMeanPixelFrame(float *meanFrame, int *allFrames, int pixelsPerFrame, int frames)
{
int pixel = threadIdx.x + blockIdx.x*blockDim.x;
if(pixel < pixelsPerFrame)
{
double sum = 0.0;
for(int i = 0; i < frames; i++)
{
sum += allFrames[pixel + pixelsPerFrame*i];
}
meanFrame[pixel] = sum/(float)frames;
}
}
__global__ void creatingLogNormalFrames(float *meanFrame, int *allFrames, float *allFramesLogNormal, int pixelsPerFrame, int frames)
{
int id;
int pixel = threadIdx.x + blockIdx.x*blockDim.x;
if(pixel < pixelsPerFrame)
{
for(int i = 0; i < frames; i++)
{
//Same screen location (pixel) but moving through frames (i).
id = pixel + pixelsPerFrame*i;
allFramesLogNormal[id] = (float)allFrames[id] - meanFrame[pixel];
allFramesLogNormal[id] = abs(allFramesLogNormal[id]);
//Can't take log of zero so to be safe check and move it off zero.
if(allFramesLogNormal[id] == 0.0f)
{
allFramesLogNormal[id] = 0.000001f;
}
allFramesLogNormal[id] = logf(allFramesLogNormal[id]);
//allFramesLogNormal[id] = (float)allFrames[id]; //Will make the logNormal be the same as the original for testing.
}
}
}
__global__ void creatingMeanLogNormalFrame(float *meanlogNormalFrame, float *allFramesLogNormal, int pixelsPerFrame, int frames)
{
int pixel = threadIdx.x + blockIdx.x*blockDim.x;
if(pixel < pixelsPerFrame)
{
double sum = 0.0;
for(int i = 0; i < frames; i++)
{
sum += allFramesLogNormal[pixel + pixelsPerFrame*i];
}
meanlogNormalFrame[pixel] = sum/(float)frames;
}
}
__global__ void creatingStdvLogNormalFrame(float *stdvLogNormalFrame, float *meanLogNormalFrame, float *allFramesLogNormal, int pixelsPerFrame, int frames)
{
int pixel = threadIdx.x + blockIdx.x*blockDim.x;
float temp;
if(pixel < pixelsPerFrame)
{
double sum = 0.0;
for(int i = 0; i < frames; i++)
{
temp = allFramesLogNormal[pixel + pixelsPerFrame*i] - meanLogNormalFrame[pixel];
sum += temp*temp;
}
stdvLogNormalFrame[pixel] = sqrtf((sum)/(float)(frames)); //frames - 1.0 Which formula for STDV
}
}
__global__ void creatingMedianLogNormalFrame(float *medianlogNormalFrame, float *allFramesLogNormal, int pixelsPerFrame, int frames)
{
int pixel = threadIdx.x + blockIdx.x*blockDim.x;
int used[M], index, count;
float median = 0.0;
float small;
if(pixel < pixelsPerFrame)
{
for(int i = 0; i < frames; i++)
{
used[i] = 0;
}
if(frames%2 == 0)
{
int middle2 = frames/2;
int middle1 = middle2 - 1;
index = -1;
count = 0;
while(count <= middle2)
{
small = 10000000.0f; //Needs to be a number larger than anything you would get in a log of a pixel.
for(int i = 0; i < frames; i++)
{
if(allFramesLogNormal[pixel + pixelsPerFrame*i] < small && used[i] == 0)
{
small = allFramesLogNormal[pixel + pixelsPerFrame*i];
index = i;
}
}
if(index == -1) printf("\nError no index found\n");
used[index] = 1;
if(count == middle1 || count == middle2)
{
median += allFramesLogNormal[pixel + pixelsPerFrame*index];
}
count++;
}
median /=2.0f;
}
else
{
int middle = frames/2;
printf("\n middle = %d\n", middle);
index = -1;
count = 0;
while(count <= middle)
{
small = 10000000.0f; //Needs to be a number larger than anything you would get in a log of a pixel.
for(int i = 0; i < frames; i++)
{
if(allFramesLogNormal[pixel + pixelsPerFrame*i] < small)
{
if(used[i] == 0)
{
small = allFramesLogNormal[pixel + pixelsPerFrame*i];
index = i;
}
}
}
if(index == -1) printf("\nError no index found\n");
used[index] = 1;
if(count == middle)
{
median += allFramesLogNormal[pixel + pixelsPerFrame*index];
}
count++;
}
}
medianlogNormalFrame[pixel] = median;
}
}
void errorCheck(const char *message)
{
cudaError_t error;
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("\n CUDA ERROR: %s = %s\n", message, cudaGetErrorString(error));
exit(0);
}
}
void copyFramesDown()
{
cudaMemcpyAsync(MeanFrame_CPU, MeanFrame_GPU, N*sizeof(float), cudaMemcpyDeviceToHost);
errorCheck("copy Mean frame down");
cudaMemcpyAsync(MeanLogNormalFrame_CPU, MeanLogNormalFrame_GPU, N*sizeof(float), cudaMemcpyDeviceToHost);
errorCheck("copy MeanLogNormal frame down");
cudaMemcpyAsync(MedianLogNormalFrame_CPU, MedianLogNormalFrame_GPU, N*sizeof(float), cudaMemcpyDeviceToHost);
errorCheck("copy MedianLogNormal frame down");
cudaMemcpyAsync(StdvLogNormalFrame_CPU, StdvLogNormalFrame_GPU, N*sizeof(float), cudaMemcpyDeviceToHost);
errorCheck("copy StdvLogNormal frame down");
cudaMemcpyAsync(BlockOfLogNormalFrames_CPU, BlockOfLogNormalFrames_GPU, M*N*sizeof(float), cudaMemcpyDeviceToHost); //Can remove after debug
errorCheck("copy BlockOfLogNormalFrames frame down");
}
void stats()
{
printf("\n\n");
for(int i = 0; i < M; i++)
{
for(int j = 0; j < N; j++)
{
printf("%d ", BlockOfFrames_CPU[j +i*N]);
}
printf("\n");
}
printf("\n\n");
for(int i = 0; i < M; i++)
{
for(int j = 0; j < N; j++)
{
printf("%f ", BlockOfLogNormalFrames_CPU[j +i*N]);
}
printf("\n");
}
printf("\n\n");
for(int i = 0; i < N; i++)
{
printf("MeanFrame[%d] = %f MeanLogNormalFrame[%d] = %f MedianLogNormalFrame[%d] = %f StdvLogNormalFrame[%d] = %f \n", i, MeanFrame_CPU[i], i, MeanLogNormalFrame_CPU[i], i, MedianLogNormalFrame_CPU[i], i, StdvLogNormalFrame_CPU[i]);
}
}
void cleanUp()
{
free(BlockOfFrames_CPU);
free(MeanFrame_CPU);
free(MeanLogNormalFrame_CPU);
free(MedianLogNormalFrame_CPU);
free(StdvLogNormalFrame_CPU);
free(BlockOfLogNormalFrames_CPU); //Can remove after debug
cudaFree(BlockOfFrames_GPU);
cudaFree(BlockOfLogNormalFrames_GPU);
cudaFree(MeanFrame_GPU);
cudaFree(MeanLogNormalFrame_GPU);
cudaFree(MedianLogNormalFrame_GPU);
cudaFree(StdvLogNormalFrame_GPU);
}
int main()
{
AllocateMemory();
SetUpCudaDevices();
loadPixels();
copyFramessUp();
errorCheck("copyFramessUp");
cudaDeviceSynchronize();
creatingMeanPixelFrame<<<dimGrid,dimBlock>>>(MeanFrame_GPU, BlockOfFrames_GPU, N, M);
errorCheck("creatingMeanPixelFrame");
creatingLogNormalFrames<<<dimGrid,dimBlock>>>(MeanFrame_GPU, BlockOfFrames_GPU, BlockOfLogNormalFrames_GPU, N, M);
errorCheck("creatingLogNormalFrames");
creatingMeanLogNormalFrame<<<dimGrid,dimBlock>>>(MeanLogNormalFrame_GPU, BlockOfLogNormalFrames_GPU, N, M);
errorCheck("creatingMeanLogNormalFrame");
creatingMedianLogNormalFrame<<<dimGrid,dimBlock>>>(MedianLogNormalFrame_GPU, BlockOfLogNormalFrames_GPU, N, M);
errorCheck("creatingMedianLogNormalFrame");
creatingStdvLogNormalFrame<<<dimGrid,dimBlock>>>(StdvLogNormalFrame_GPU, MeanLogNormalFrame_GPU, BlockOfLogNormalFrames_GPU, N, M);
errorCheck("creatingStdvLogNormalFrame");
copyFramesDown();
errorCheck("copyFramesDown");
cudaDeviceSynchronize();
stats();
cleanUp();
printf("\n DONE \n");
}
|
14,077 | #include <stdio.h>
#define MSIZE 12*8*21
#define BLOCK_SIZE 256
#define WARP_SIZE 32
//__constant__ int c_row[64*1024/4];
//__constant__ int c_row[MSIZE*BLOCK_SIZE/WARP_SIZE*4];
int main()
{
printf ("%d\n", MSIZE*BLOCK_SIZE/WARP_SIZE*4);
return 0;
}
|
14,078 | /**
03/10/2011
Jaroslaw Wojtasik
noCuda
errors.c
**/
#include "errors.cuh"
#include <stdio.h>
//==============================================
//== Globals
static ErrorCode gCurrError;
//==============================================
//== Functions
/*
* Sets Error code to be retrived as the last one.
*/
ErrorCode SetLastErrorCode( ErrorCode error ) {
gCurrError = error;
return gCurrError;
}
/*
* Returns the last set error code.
*/
ErrorCode GetLastErrorCode( void ) {
return gCurrError;
}
char * ErrorDesc( ErrorCode errorType ) {
char * res = NULL;
switch ( errorType ) {
case errOk: {
res = "No Error";
} break;
case errGeneral: {
res = "General Error";
} break;
case errFileNotFound: {
res = "File Not Found";
} break;
case errFileCorupted: {
res = "File Corupted";
} break;
case errFileWrite: {
res = "File Write Error";
} break;
case errFileRead: {
res = "File Read Error";
} break;
case errNoMemory: {
res = "Out Of Memory";
} break;
case errDataNotReady: {
res = "Data Not Ready";
} break;
case errNoData: {
res = "No Data";
} break;
default: {
res = "Unknown Error";
}
} // switch
return res;
}
|
14,079 | #include "includes.h"
__global__ void normal_eqs_disparity_multicam_GPU( float *d_CD, float *d_disparity_compact, float4 *d_Zbuffer_normals_compact, int *d_ind_disparity_Zbuffer, const float *d_focal_length, const float *d_nodal_point_x, const float *d_nodal_point_y, const float *d_baseline, const int *d_n_cols, const int *d_n_values_disparity, const int *d_start_ind_disparity, const int *d_pixel_ind_offset) {
int n_val_accum = gridDim.x * blockDim.x; // _MAX_N_VAL_ACCUM may not be
// multiple of blocksize
int n_disparity = d_n_values_disparity[blockIdx.y];
int n_accum = (int)ceilf((float)n_disparity / (float)n_val_accum);
int start_ind = d_start_ind_disparity[blockIdx.y];
float f = d_focal_length[blockIdx.y];
float ox = d_nodal_point_x[blockIdx.y];
float oy = d_nodal_point_y[blockIdx.y];
float b = d_baseline[blockIdx.y];
int n_cols = d_n_cols[blockIdx.y];
int pixel_ind_offset = d_pixel_ind_offset[blockIdx.y];
// initialize accumulators
float A0 = 0.0f, A1 = 0.0f, A2 = 0.0f, A3 = 0.0f, A4 = 0.0f, A5 = 0.0f,
A6 = 0.0f, A7 = 0.0f, A8 = 0.0f, A9 = 0.0f, A10 = 0.0f, A11 = 0.0f,
A12 = 0.0f, A13 = 0.0f, A14 = 0.0f, A15 = 0.0f, A16 = 0.0f, A17 = 0.0f,
A18 = 0.0f, A19 = 0.0f, A20 = 0.0f, A21 = 0.0f, A22 = 0.0f, A23 = 0.0f,
A24 = 0.0f, A25 = 0.0f, A26 = 0.0f;
for (int in_ind = blockDim.x * blockIdx.x * n_accum + threadIdx.x;
in_ind < blockDim.x * (blockIdx.x + 1) * n_accum; in_ind += blockDim.x) {
if (in_ind < n_disparity) { // is this a valid sample?
// fetch disparity, Zbuffer and normal from global memory
float disp = d_disparity_compact[in_ind + start_ind];
float4 tmp = d_Zbuffer_normals_compact[in_ind + start_ind];
float Zbuffer = tmp.x;
float nx = tmp.y;
float ny = tmp.z;
float nz = tmp.w;
// compute coordinates
int pixel_ind =
d_ind_disparity_Zbuffer[in_ind + start_ind] - pixel_ind_offset;
float y = floorf(__fdividef((float)pixel_ind, n_cols));
float x = (float)pixel_ind - y * n_cols;
x = __fdividef((x - ox), f);
y = -__fdividef((y - oy), f);
// reconstruct 3D point from disparity
float Zd = -(f * b) / disp;
float Xd = x * Zd;
float Yd = y * Zd;
// reconstruct 3D point from model
float Zm = Zbuffer;
float Xm = x * Zm;
float Ym = y * Zm;
/************************/
/* evaluate constraints */
/************************/
// unique values A-matrix
A0 += nx * nx;
A1 += nx * ny;
A2 += nx * nz;
A3 += Ym * nx * nz - Zm * nx * ny;
A4 += Zm * (nx * nx) - Xm * nx * nz;
A5 += -Ym * (nx * nx) + Xm * nx * ny;
A6 += ny * ny;
A7 += ny * nz;
A8 += -Zm * (ny * ny) + Ym * ny * nz;
A9 += -Xm * ny * nz + Zm * nx * ny;
A10 += Xm * (ny * ny) - Ym * nx * ny;
A11 += nz * nz;
A12 += Ym * (nz * nz) - Zm * ny * nz;
A13 += -Xm * (nz * nz) + Zm * nx * nz;
A14 += Xm * ny * nz - Ym * nx * nz;
A15 += (Ym * Ym) * (nz * nz) + (Zm * Zm) * (ny * ny) -
Ym * Zm * ny * nz * 2.0f;
A16 += -Xm * Ym * (nz * nz) - (Zm * Zm) * nx * ny + Xm * Zm * ny * nz +
Ym * Zm * nx * nz;
A17 += -Xm * Zm * (ny * ny) - (Ym * Ym) * nx * nz + Xm * Ym * ny * nz +
Ym * Zm * nx * ny;
A18 += (Xm * Xm) * (nz * nz) + (Zm * Zm) * (nx * nx) -
Xm * Zm * nx * nz * 2.0f;
A19 += -Ym * Zm * (nx * nx) - (Xm * Xm) * ny * nz + Xm * Ym * nx * nz +
Xm * Zm * nx * ny;
A20 += (Xm * Xm) * (ny * ny) + (Ym * Ym) * (nx * nx) -
Xm * Ym * nx * ny * 2.0f;
// B-vector
A21 += Xd * (nx * nx) - Xm * (nx * nx) + Yd * nx * ny - Ym * nx * ny +
Zd * nx * nz - Zm * nx * nz;
A22 += Yd * (ny * ny) - Ym * (ny * ny) + Xd * nx * ny - Xm * nx * ny +
Zd * ny * nz - Zm * ny * nz;
A23 += Zd * (nz * nz) - Zm * (nz * nz) + Xd * nx * nz - Xm * nx * nz +
Yd * ny * nz - Ym * ny * nz;
A24 += -Yd * Zm * (ny * ny) + Ym * Zd * (nz * nz) + Ym * Zm * (ny * ny) -
Ym * Zm * (nz * nz) - (Ym * Ym) * ny * nz + (Zm * Zm) * ny * nz +
Xd * Ym * nx * nz - Xm * Ym * nx * nz - Xd * Zm * nx * ny +
Yd * Ym * ny * nz + Xm * Zm * nx * ny - Zd * Zm * ny * nz;
A25 += Xd * Zm * (nx * nx) - Xm * Zd * (nz * nz) - Xm * Zm * (nx * nx) +
Xm * Zm * (nz * nz) + (Xm * Xm) * nx * nz - (Zm * Zm) * nx * nz -
Xd * Xm * nx * nz - Xm * Yd * ny * nz + Xm * Ym * ny * nz +
Yd * Zm * nx * ny - Ym * Zm * nx * ny + Zd * Zm * nx * nz;
A26 += -Xd * Ym * (nx * nx) + Xm * Yd * (ny * ny) + Xm * Ym * (nx * nx) -
Xm * Ym * (ny * ny) - (Xm * Xm) * nx * ny + (Ym * Ym) * nx * ny +
Xd * Xm * nx * ny - Yd * Ym * nx * ny + Xm * Zd * ny * nz -
Xm * Zm * ny * nz - Ym * Zd * nx * nz + Ym * Zm * nx * nz;
}
}
/**************************/
/* write out accumulators */
/**************************/
int out_ind =
27 * n_val_accum * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
d_CD[out_ind] = A0;
d_CD[out_ind + n_val_accum] = A1;
d_CD[out_ind + 2 * n_val_accum] = A2;
d_CD[out_ind + 3 * n_val_accum] = A3;
d_CD[out_ind + 4 * n_val_accum] = A4;
d_CD[out_ind + 5 * n_val_accum] = A5;
d_CD[out_ind + 6 * n_val_accum] = A6;
d_CD[out_ind + 7 * n_val_accum] = A7;
d_CD[out_ind + 8 * n_val_accum] = A8;
d_CD[out_ind + 9 * n_val_accum] = A9;
d_CD[out_ind + 10 * n_val_accum] = A10;
d_CD[out_ind + 11 * n_val_accum] = A11;
d_CD[out_ind + 12 * n_val_accum] = A12;
d_CD[out_ind + 13 * n_val_accum] = A13;
d_CD[out_ind + 14 * n_val_accum] = A14;
d_CD[out_ind + 15 * n_val_accum] = A15;
d_CD[out_ind + 16 * n_val_accum] = A16;
d_CD[out_ind + 17 * n_val_accum] = A17;
d_CD[out_ind + 18 * n_val_accum] = A18;
d_CD[out_ind + 19 * n_val_accum] = A19;
d_CD[out_ind + 20 * n_val_accum] = A20;
d_CD[out_ind + 21 * n_val_accum] = A21;
d_CD[out_ind + 22 * n_val_accum] = A22;
d_CD[out_ind + 23 * n_val_accum] = A23;
d_CD[out_ind + 24 * n_val_accum] = A24;
d_CD[out_ind + 25 * n_val_accum] = A25;
d_CD[out_ind + 26 * n_val_accum] = A26;
} |
14,080 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define m 10
#define n 5
__global__ void matrix_sum(int A[], int B[], int C[], int fil, int col)
{
int my_ij = blockDim.x * blockIdx.x + threadIdx.x;
if (blockIdx.x < fil && threadIdx.x < col)
C[my_ij] = A[my_ij] + B[my_ij];
}
void fill_matrix(int A[], int fil, int col) {
int i, j;
for (i = 0; i < fil; i++) {
for (j = 0; j < col; j++)
A[i*n+j] = rand()%99;
}
}
void print_matrix(int A[], int fil, int col) {
int i, j;
for (i = 0; i < fil; i++) {
for (j = 0; j < col; j++)
printf("%d ", A[i*n+j]);
printf("\n");
}
}
int main(int argc, char* argv[]) {
int *h_A, *h_B, *h_C;
int *d_A, *d_B, *d_C;
size_t size;
size = m*n*sizeof(int);
h_A = (int*) malloc(size);
h_B = (int*) malloc(size);
h_C = (int*) malloc(size);
fill_matrix(h_A, m, n);
fill_matrix(h_B, m, n);
print_matrix(h_A, m, n);
printf("\n");
print_matrix(h_B, m, n);
printf("\n");
cudaMalloc((void **)&d_A, size);
cudaMalloc((void **)&d_B, size);
cudaMalloc((void **)&d_C, size);
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
matrix_sum<<<m, n>>>(d_A, d_B, d_C, m, n);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
print_matrix(h_C, m, n);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
return 0;
}
|
14,081 | #include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
using namespace std;
const int DIMBLOCKX=32;
//DEVICE
__global__ void kernelSum_Column_Matrix(float* matrix, float* array, int tam){
__shared__ float shareMatrix[DIMBLOCKX];
float value=0;
int col=blockIdx.x;
int step= tam/blockDim.x;
int posIni= col*tam+threadIdx.x*step;
for(int i=0;i<step;i++){
value=value+matrix[posIni+i];
}
shareMatrix[threadIdx.x]=value;
__syncthreads();
if(threadIdx.x==0){
for(int j=1;j<blockDim.x;j++){
shareMatrix[0]=shareMatrix[0]+shareMatrix[j];
}
array[blockIdx.x]=shareMatrix[0];
}
}
//HOST
int main(){
int row=512;
int col=512;
float* matrix= (float*) malloc(sizeof(float)*row*col);
float* matrix_DEVICE= NULL;
float* array_DEVICE= NULL;
float* array=new float[col];
for(int i=0;i<row;i++){
for(int j=0; j<col;j++){
matrix[i*col+j]=j;
}
}
cudaMalloc((void**)&matrix_DEVICE,sizeof(float)*row*col);
cudaMalloc((void**)&array_DEVICE, col*sizeof(float));
cudaMemcpy(matrix_DEVICE,matrix,sizeof(float)*row*col,cudaMemcpyHostToDevice);
dim3 dimGrid(col,1);
dim3 dimBlock(row/DIMBLOCKX,1);
kernelSum_Column_Matrix<<< dimGrid , dimBlock >>>(matrix_DEVICE,array_DEVICE,col);
cudaMemcpy(array,array_DEVICE,sizeof(float)*col,cudaMemcpyDeviceToHost);
for( int index = 0; index<col ; index++){
cout<<array[index]<<" ";
}
cudaFree(matrix_DEVICE);
cudaFree(array_DEVICE);
delete[] array;
delete[] matrix;
} |
14,082 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#define NTYPES 3
#define MAXVARS 16
// Array counts by type
static int dcount=0;
static int fcount=0;
static int icount=0;
// CPU memory
double *dvec[MAXVARS];
float *fvec[MAXVARS];
int *ivec[MAXVARS];
// Host memory
double* devdVec[MAXVARS];
float* devfVec[MAXVARS];
int* deviVec[MAXVARS];
static dim3 ngrids;
static dim3 nblocks;
static dim3 nthreads;
void freeAllmem(){
int i;
printf("Freeing integer variables\n");
for(i=0;i<=icount;i++){
if(ivec[icount])free(ivec[icount]);
if(deviVec[icount])free(deviVec[icount]);
}
printf("Freeing float variables\n");
for(i=0;i<=fcount;i++){
if(fvec[fcount])free(fvec[fcount]);
if(devfVec[fcount])free(devfVec[fcount]);
}
printf("Freeing double variables\n");
for(i=0;i<=dcount;i++){
if(dvec[dcount])free(dvec[dcount]);
if(devdVec[dcount])free(devdVec[dcount]);
}
printf("Done freeing memory...\n\n\n");
return;
}
void init_kernel(int argc,...){
int i;
//Get device information
int count;
cudaGetDeviceCount(&count);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,0);
printf("\n\nCompute capability: %d.%d\n",prop.major,prop.minor);
cudaError_t cudastatus0,cudastatus1;
int arraysize;
int typesizes[NTYPES]={sizeof(double),sizeof(float),sizeof(int)};
int tnum=-1;
// stdarg list vars
int vindex;
va_list varlist;
va_start(varlist,argc);
for(vindex=0;vindex<argc/2;vindex++){
tnum=va_arg(varlist,int);
if(tnum==0){
printf("......found a double.\n");
arraysize=va_arg(varlist,int);
dvec[dcount] = (double*) calloc(arraysize, typesizes[tnum]);
cudastatus0=cudaMalloc((void**)&devdVec[dcount],arraysize*typesizes[tnum]);
cudastatus1=cudaMemcpy(devdVec[dcount],dvec[dcount],arraysize*typesizes[tnum],
cudaMemcpyHostToDevice);
// Error check
if(cudastatus0!=cudaSuccess|cudastatus1!=cudaSuccess){
printf("Error in devdVec memory allocation:\nstatus0: %s\nstatus1: %s\n",
cudaGetErrorString(cudastatus0),
cudaGetErrorString(cudastatus1));
freeAllmem();
exit(EXIT_FAILURE);
}
// update double vector count
dcount++;
printf("Allocated double variable of size: %d\n",arraysize);
}else if(tnum==1){
printf("......found a float.\n");
arraysize=va_arg(varlist,int);
fvec[fcount] = (float*) calloc(arraysize, typesizes[tnum]);
cudastatus0=cudaMalloc((void**)&devfVec[fcount],arraysize*typesizes[tnum]);
cudastatus1=cudaMemcpy(devfVec[fcount],fvec[fcount],arraysize*typesizes[tnum],
cudaMemcpyHostToDevice);
// Error check
if(cudastatus0!=cudaSuccess|cudastatus1!=cudaSuccess){
printf("Error in devfVec memory allocation:\nstatus0: %s\nstatus1: %s\n",
cudaGetErrorString(cudastatus0),
cudaGetErrorString(cudastatus1));
freeAllmem();
exit(EXIT_FAILURE);
}
// update float vector count
fcount++;
printf("Allocated float variable of size: %d\n",arraysize);
}else if(tnum==2){
printf("......found an int.\n");
arraysize=va_arg(varlist,int);
ivec[icount] = (int*) calloc(arraysize, typesizes[tnum]);
cudastatus0=cudaMalloc((void**)&deviVec[icount],arraysize*typesizes[tnum]);
cudastatus1=cudaMemcpy(deviVec[icount],ivec[icount],arraysize*typesizes[tnum],
cudaMemcpyHostToDevice);
// Error check
if(cudastatus0!=cudaSuccess|cudastatus1!=cudaSuccess){
printf("Error in deviVec memory allocation:\nstatus0: %s\nstatus1: %s\n",
cudaGetErrorString(cudastatus0),
cudaGetErrorString(cudastatus1));
freeAllmem();
exit(EXIT_FAILURE);
}
// update float vector count
icount++;
printf("Allocated integer variable of size: %d\n",arraysize);
}else{
printf("Error, unknown datatype: %d.\n", tnum);
freeAllmem();
printf("Exiting on failure...\n\n\n");
exit(EXIT_FAILURE);
}
}//end for-loop
va_end(varlist);
return;
}
|
14,083 | #include<stdio.h>
#include<stdlib.h>
#define N 2048
#define BLOCK_SIZE 32
__global__ void matrix_transpose_naive(int *input, int *output) {
int indexX = threadIdx.x + blockIdx.x * blockDim.x;
int indexY = threadIdx.y + blockIdx.y * blockDim.y;
int index = indexY * N + indexX;
int transposedIndex = indexX * N + indexY;
// this has discoalesced global memory store
output[transposedIndex] = input[index];
// this has discoalesced global memore load
// output[index] = input[transposedIndex];
}
__global__ void matrix_transpose_shared(int *input, int *output) {
__shared__ int sharedMemory [BLOCK_SIZE] [BLOCK_SIZE];
// global index
int indexX = threadIdx.x + blockIdx.x * blockDim.x;
int indexY = threadIdx.y + blockIdx.y * blockDim.y;
// transposed global memory index
int tindexX = threadIdx.x + blockIdx.y * blockDim.x;
int tindexY = threadIdx.y + blockIdx.x * blockDim.y;
// local index
int localIndexX = threadIdx.x;
int localIndexY = threadIdx.y;
int index = indexY * N + indexX;
int transposedIndex = tindexY * N + tindexX;
// reading from global memory in coalesed manner and performing tanspose in shared memory
sharedMemory[localIndexX][localIndexY] = input[index];
__syncthreads();
// writing into global memory in coalesed fashion via transposed data in shared memory
output[transposedIndex] = sharedMemory[localIndexY][localIndexX];
}
//basically just fills the array with index.
void fill_array(int *data) {
for(int idx=0;idx<(N*N);idx++)
data[idx] = idx;
}
void print_output(int *a, int *b) {
printf("\n Original Matrix::\n");
for(int idx=0;idx<(N*N);idx++) {
if(idx%N == 0)
printf("\n");
printf(" %d ", a[idx]);
}
printf("\n Transposed Matrix::\n");
for(int idx=0;idx<(N*N);idx++) {
if(idx%N == 0)
printf("\n");
printf(" %d ", b[idx]);
}
}
int main(void) {
int *a, *b;
int *d_a, *d_b; // device copies of a, b, c
int size = N * N *sizeof(int);
// Alloc space for host copies of a, b, c and setup input values
a = (int *)malloc(size); fill_array(a);
b = (int *)malloc(size);
// Alloc space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE,1);
dim3 gridSize(N/BLOCK_SIZE,N/BLOCK_SIZE,1);
matrix_transpose_naive<<<gridSize,blockSize>>>(d_a,d_b);
// Copy result back to host
// cudaMemcpy(b, d_b, size, cudaMemcpyDeviceToHost);
// print_output(a,b);
matrix_transpose_shared<<<gridSize,blockSize>>>(d_a,d_b);
// Copy result back to host
cudaMemcpy(b, d_b, size, cudaMemcpyDeviceToHost);
// print_output(a,b);
// terminate memories
free(a);
free(b);
cudaFree(d_a);
cudaFree(d_b);
return 0;
}
|
14,084 | // Author: Amandeep Kaur (2018014)
// As part of assignment 1 in CSE:560 GPU computing course
// Code adapted from given code file ahe_cpu.cpp provided by Prof. Ojaswa Sharma
#include <stdio.h>
#include <iostream>
#include <time.h>
#define TILE_SIZE_X 1024
#define TILE_SIZE_Y 1024
__constant__ unsigned char const_mappings[65536];
__global__ void findEqualizationMappings(unsigned char* img_in, int width, int height, unsigned char *mappings, int *pdf, int *cdf)
{
int ntiles_x = (width / TILE_SIZE_X);
int ty = threadIdx.y;
int tx = threadIdx.x;
int by = blockIdx.y;
int bx = blockIdx.x;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
if (row<height && col<width){
int tile_i = (col)/TILE_SIZE_X; //0-indexed
int tile_j = (row)/TILE_SIZE_Y;
int offset = 256*(tile_i + tile_j * ntiles_x);
atomicAdd(&pdf[offset + img_in[col+row*width]],1);
__syncthreads();
if ((row+1)%TILE_SIZE_Y==0 && (col + 1)%TILE_SIZE_X==0){ //one thread from each block
int cdf_min = TILE_SIZE_X*TILE_SIZE_Y+1; // minimum non-zero value
cdf[offset]=pdf[offset];
for(int i=1; i< 256; i++)
cdf[offset+i] = cdf[offset+i-1] + pdf[offset+i];
for(int i=0; i<256; i++)
if(cdf[offset+i] != 0) {cdf_min = cdf[offset+i]; break;}
for (int i=0;i<256;i++){
mappings[i + offset] = (unsigned char)round(255.0 * float(cdf[offset+i] - cdf_min)/float(TILE_SIZE_X*TILE_SIZE_Y - cdf_min));
}
}
}
}
__global__ void performAdaptiveEqualization(unsigned char* img_in, unsigned char* img_out, int width, int height){
int ntiles_x = (width / TILE_SIZE_X);
int ntiles_y = (height / TILE_SIZE_Y);
int ty = threadIdx.y;
int tx = threadIdx.x;
int by = blockIdx.y;
int bx = blockIdx.x;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
if (row<height && col<width){
// FINDING TILE CENTERS FOR INTERPOLATION
int tile_i0, tile_j0, tile_i1, tile_j1;
tile_i0 = (col - TILE_SIZE_X/2) / TILE_SIZE_X;
if(tile_i0 < 0) tile_i0 = 0;
tile_j0 = (row - TILE_SIZE_Y/2) / TILE_SIZE_Y;
if(tile_j0 < 0) tile_j0 = 0;
tile_i1 = (col + TILE_SIZE_X/2) / TILE_SIZE_X;
if(tile_i1 >= ntiles_x) tile_i1 = ntiles_x - 1;
tile_j1 = (row + TILE_SIZE_Y/2) / TILE_SIZE_Y;
if(tile_j1 >= ntiles_y) tile_j1 = ntiles_y - 1;
// OFFSETS IN INTERMEDIATE ARRAYS CORRESPONDING TO TILE CENTERS
int offset00 = 256*(tile_i0 + tile_j0*ntiles_x);
int offset01 = 256*(tile_i0 + tile_j1*ntiles_x);
int offset10 = 256*(tile_i1 + tile_j0*ntiles_x);
int offset11 = 256*(tile_i1 + tile_j1*ntiles_x);
unsigned char v00, v01, v10, v11;
v00 = const_mappings[img_in[col+row*width] + offset00];
v01 = const_mappings[img_in[col+row*width] + offset01];
v10 = const_mappings[img_in[col+row*width] + offset10];
v11 = const_mappings[img_in[col+row*width] + offset11];
float x_frac = float(col - tile_i0*TILE_SIZE_X - TILE_SIZE_X/2)/float(TILE_SIZE_X);
float y_frac = float(row - tile_j0*TILE_SIZE_Y - TILE_SIZE_Y/2)/float(TILE_SIZE_Y);
//PERFORMING BILINEAR INTERPOLATION
float v0 = v00*(1 - x_frac) + v10*x_frac;
float v1 = v01*(1 - x_frac) + v11*x_frac;
float v= v0*(1 - y_frac) + v1*y_frac;
if (v < 0) v = 0;
if (v > 255) v = 255;
img_out[col+row*width] = (unsigned char)(v);
}
}
extern "C" void run_sampleKernel(unsigned char* img_in, unsigned char* img_out, int width, int height)
{
int ntiles_x = (width / TILE_SIZE_X);
int ntiles_y = (height / TILE_SIZE_Y);
int ntiles = (ntiles_x * ntiles_y);
// INITIALIZING REQUIREMENTS
int *dpdf;
int *dcdf;
cudaMalloc((void**)&dpdf, 256*ntiles*sizeof(int));
cudaMalloc((void**)&dcdf, 256*ntiles*sizeof(int));
cudaMemset(dpdf, 0, 256*ntiles*sizeof(int));
unsigned char *dmappings;
cudaMalloc((void**)&dmappings, 256*ntiles*sizeof(unsigned char));
// WRITING INPUT IMAGE TO DEVICE MEMORY
unsigned char * dimg_in;
cudaMalloc((void**)&dimg_in, height*width*sizeof(unsigned char));
cudaMemcpy(dimg_in,img_in, height*width*sizeof(unsigned char),cudaMemcpyHostToDevice);
// INITIALIZING OUTPUT IMAGE SPACE IN DEVICE MEMORY
unsigned char * dimg_out;
cudaMalloc((void**)&dimg_out, height*width*sizeof(unsigned char));
// SETTING UP LAUNCH CONFIGURATION
dim3 grid,block;
block.x = 32;
block.y = 32;
int req = (height*width)/(32*32);
grid.x = pow(req,0.5);
grid.y = pow(req,0.5);
// TIMER
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
//STEP 1
findEqualizationMappings<<<grid,block>>>(dimg_in, width, height, dmappings, dpdf, dcdf);
// cudaDeviceSynchronize();
// COPYING MAPPINGS TO CONST_MAPPINGS (TO USE CONSTANT MEMORY)
int *mappings;
mappings = (int *)malloc(256*ntiles*sizeof(unsigned char));
cudaMemcpy(mappings, dmappings, 256*ntiles*sizeof(unsigned char),cudaMemcpyDeviceToHost);
cudaMemcpyToSymbol(const_mappings, mappings, ntiles*256*sizeof(unsigned char));
//STEP 2
performAdaptiveEqualization<<<grid,block>>>(dimg_in, dimg_out, width, height);
cudaDeviceSynchronize();
cudaEventRecord(stop);
// WRITING OUTPUT IMAGE TO HOST MEMORY
cudaMemcpy(img_out, dimg_out, height*width*sizeof(unsigned char),cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << "Time taken : " << milliseconds << std::endl;
// CLEANUP
cudaFree(dpdf);
cudaFree(dcdf);
cudaFree(dmappings);
cudaFree(dimg_out);
cudaFree(dimg_in);
cudaFree(const_mappings);
}
|
14,085 | #include "includes.h"
__device__ void warp_reduce(float* S,int tx){
S[tx] += S[tx + 32]; __syncthreads();
S[tx] += S[tx + 16]; __syncthreads();
S[tx] += S[tx + 8]; __syncthreads();
S[tx] += S[tx + 4]; __syncthreads();
S[tx] += S[tx + 2]; __syncthreads();
S[tx] += S[tx + 1]; __syncthreads();
}
__global__ void reduce_v5(float* in,float* out, int n){
int tx = threadIdx.x;
int bx = blockIdx.x;
int i = bx*(BX*2)+tx;
__shared__ float S[BX]; //Want to have only BX amount of shared mem which is THREAD_MAX in previous
S[tx] = in[i] + in[i+BX]; //Increased part thread activity at start and start only half the threads
__syncthreads();
if(BX >= 1024){ // Max threads for block in my gpu is 1024
if(tx < 512)
S[tx] += S[tx+512];
__syncthreads();
}
if(BX >= 512){
if(tx < 256)
S[tx] += S[tx+256];
__syncthreads();
}
if(BX >= 256){
if(tx < 128)
S[tx] += S[tx+128];
__syncthreads();
}
if(BX >= 128){
if(tx < 64)
S[tx] += S[tx+64];
__syncthreads();
}
if(tx < WARP_SIZE) { //WARP_SIZE is 32
warp_reduce(S,tx); //Unroaling the last warp
}
if(tx==0)
out[bx] = S[0];
} |
14,086 | // Filename: csort.cu
// nvcc -c -arch sm_13 csort.cu
#include <thrust/device_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
extern "C" {
//Sort for integer arrays
void sort_int_wrapper( int *data, int N)
{
// Wrap raw pointer with a device_ptr
thrust::device_ptr <int> dev_ptr(data);
// Use device_ptr in Thrust sort algorithm
thrust::sort(dev_ptr, dev_ptr+N);
}
//Sort for float arrays
void sort_float_wrapper( float *data, int N)
{
thrust::device_ptr <float> dev_ptr(data);
thrust::sort(dev_ptr, dev_ptr+N);
}
//Sort for double arrays
void sort_double_wrapper( double *data, int N)
{
thrust::device_ptr <double> dev_ptr(data);
thrust::sort(dev_ptr, dev_ptr+N);
}
}
|
14,087 | #include<bits/stdc++.h>
using namespace std;
#define pi (2.0*acos(0.0))
#define eps 1e-6
#define ll long long
#define inf (1<<29)
#define vi vector<int>
#define vll vector<ll>
#define sc(x) scanf("%d",&x)
#define scl(x) scanf("%lld",&x)
#define all(v) v.begin() , v.end()
#define me(a,val) memset( a , val ,sizeof(a) )
#define pb(x) push_back(x)
#define pii pair<int,int>
#define mp(a,b) make_pair(a,b)
#define Q(x) (x) * (x)
#define L(x) ((x<<1) + 1)
#define R(x) ((x<<1) + 2)
#define M(x,y) ((x+y)>>1)
#define fi first
#define se second
#define MOD 1000000007
#define ios ios::sync_with_stdio(0)
#define N 1024
#define BL 32
__global__ void MatrixMultiplication(int *A,int *B,int *C){
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;;
int k = 0;
for(int i = 0 ; i < N ; i++)
k += A[ row * N + i ] * B[ i * N + col ];
C[ row * N + col ] = k;
}
int main(){
int *a , *b , *c;
size_t size = N * N * sizeof(int) ;
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
/*
a = new int[N * N];
b = new int[N * N];
c = new int[N * N];
*/
for(int i = 0 ; i < N ; i++)
for(int j = 0 ; j < N ; j++)
a[ i * N + j ] = (i==j) , b[ i * N + j ] = (i==j);
int *A , *B , *C;
cudaMalloc( &A , size );
cudaMalloc( &B , size );
cudaMalloc( &C , size );
dim3 block( BL , BL );
dim3 grid( N / block.x , N / block.y );
cout<<"BL: "<<BL<<" other: "<<N / block.y<<endl;
for(int i = 0 ; i < 10 ; i++){
for(int j = 0 ; j < 10 ; j++)
cout << a[ i * N + j ] << " ";
/*int r = 0;
for(int k = 0 ; k < N ; k++)
r += a[i][k] * b[k][j];
if( r != c[i][j] ){
printf("La cagaste\n");
break;
}*/
cout << "\n";
}
cudaMemcpy( A , a , size , cudaMemcpyHostToDevice );
cudaMemcpy( B , b , size , cudaMemcpyHostToDevice );
MatrixMultiplication<<< grid , block >>>( A , B , C );
cudaMemcpy( c , C , size , cudaMemcpyDeviceToHost );
for(int i = 0 ; i < 10 ; i++){
for(int j = 0 ; j < 10 ; j++)
cout << c[ i * N + j ] << " ";
/*int r = 0;
for(int k = 0 ; k < N ; k++)
r += a[i][k] * b[k][j];
if( r != c[i][j] ){
printf("La cagaste\n");
break;
}*/
cout << "\n";
}
cudaFree(A);
cudaFree(B);
cudaFree(C);
return 0;
}
|
14,088 | #include <iostream>
#include <cstdio>
// typedef float pfx_dtype ;
int nextpow2(int v)
{
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
return v;
}
__device__ int dnextpow2(int v)
{
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
return v;
}
typedef int pfx_dtype ;
__global__ void prescan(pfx_dtype *outArr, pfx_dtype *inArr, int n)
{
extern __shared__ pfx_dtype temp[];
int n_original = n;
n = (n & (n - 1)) == 0? n: dnextpow2(n);
int thread_id = threadIdx.x;
int offset = 1;
if(2*thread_id < n_original)
temp[2*thread_id] = inArr[2*thread_id];
else
temp[2*thread_id] =0;
if(2*thread_id+1 <n_original)
temp[2*thread_id+1] = inArr[2*thread_id+1];
else
temp[2*thread_id+1] =0;
for (int d = n>>1; d > 0; d >>= 1)
{
__syncthreads();
if (thread_id < d)
{
int ai = offset*(2*thread_id+1)-1;
int bi = offset*(2*thread_id+2)-1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thread_id == 0) { temp[n - 1] = 0; }
for (int d = 1; d < n; d *= 2)
{
offset >>= 1;
__syncthreads();
if (thread_id < d)
{
int ai = offset*(2*thread_id+1)-1;
int bi = offset*(2*thread_id+2)-1;
pfx_dtype t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
if(2*thread_id < n_original)
outArr[2*thread_id] = temp[2*thread_id]+ inArr[2*thread_id]; // write results to device memory
if(2*thread_id+1 < n_original)
outArr[2*thread_id+1] = temp[2*thread_id+1]+ inArr[2*thread_id+1];
__syncthreads();
if(2*thread_id < n_original)
printf("xA[%d] = %d \n",2*thread_id , outArr[2*thread_id]);
if(2*thread_id+1 < n_original)
printf("xA[%d] = %d \n",2*thread_id+1 , outArr[2*thread_id+1]);
__syncthreads();
}
#define SELF_TEST
#ifdef SELF_TEST
#include <iostream>
#include "cub/cub.cuh"
#define THREAD_BLOCK_SIZE 8
// __global__
// void cub_scan_test(int N)
// {
// int thread_id = threadIdx.x;
// typedef cub::BlockScan<int, THREAD_BLOCK_SIZE > BlockScan; /*1D int data type*/
// __shared__ typename BlockScan::TempStorage temp_storage; /*storage temp*/
// extern __shared__ int* IndirectJ1;
// extern __shared__ int* IndirectJ2= IndirectJ1+ N*sizeof(int);
// if (thread_id < N)
// {
// IndirectJ1[thread_id] = 2*thread_id +1;
// }
// __syncthreads();
// if (thread_id < THREAD_BLOCK_SIZE)
// BlockScan(temp_storage).InclusiveSum (IndirectJ1[thread_id], IndirectJ2[thread_id]);
// if (thread_id < THREAD_BLOCK_SIZE)
// printf("%d %d\n", thread_id, IndirectJ2[thread_id]);
// }
// extern __shared__
// #define THREAD_BLOCK_SIZE 7
__global__ void initData(pfx_dtype* A, int n)
{
int threadId = threadIdx.x;
if(threadId<n)
A[threadId] = 2*threadId+1;
printf("A[%d] = %d \n",threadId,A[threadId]);
}
int main(int argc, char* argv[])
{
if(argc<2)
{
std::cout<<"Error with number of arguments\n";
return -1;
}
int N = atoi(argv[1]);
int N2=N;
if((N & (N - 1)) == 0)
{
std::cout<<"Power of Two\n";
}
else
{
std::cout<<"Not a power of Two\n";
N2 = nextpow2(N);
std::cout<<"Using "<<N2<<"\n";
}
pfx_dtype *A, *xA;
cudaMalloc(&A, sizeof(pfx_dtype)*N);
cudaMalloc(&xA, sizeof(pfx_dtype)*N);
initData<<< 1,THREAD_BLOCK_SIZE >>> (A,N);
if(cudaDeviceSynchronize() != cudaSuccess)
std::cout<<"Error- 0\n";
// prescan<<< 1,THREAD_BLOCK_SIZE/2,2*THREAD_BLOCK_SIZE*sizeof(pfx_dtype) >>> (xA, A, N);
prescan<<< 1,(N+1)/2,2*N*sizeof(pfx_dtype) >>> (xA, A, N);
prescan<<< 1,N2,2*N*sizeof(pfx_dtype) >>> (xA, A, N);
if(cudaDeviceSynchronize() != cudaSuccess)
std::cout<<".....EXITING\n";
else
std::cout<<"No errors reported\n";
// typedef cub::BlockScan<int, THREAD_BLOCK_SIZE> BlockScan; /*1D int data type*/
// __shared__ typename BlockScan::TempStorage temp_storage; /*storage temp*/
// cub_scan_test <<< 1,THREAD_BLOCK_SIZE >>> (N);
return 0;
}
#endif |
14,089 | #include"stdio.h"
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include"math.h"
#include <ctype.h>
/* Euclidean distance calculation */
__host__ __device__ long distD(int x,int y,int N,long*dt)
{
int id;
id=x*N+y;
return(dt[id]);
}
/*A kenel function that finds a minimal weighted neighbor using TPN mapping strategy*/
__global__ void tsp(int *rt,long cost,unsigned long long *dst_tid,long cit,long *dt,long sol)
{
long i,j;
long change=0;
long id=threadIdx.x+blockIdx.x*blockDim.x;
if(id<sol)
{
i=cit-2-floorf(((int)__dsqrt_rn(8*(sol-id-1)+1)-1)/2);
j=id-i*(cit-1)+(i*(i+1)/2)+1;
change=distD(rt[i],rt[j],cit,dt)+distD(rt[(i+1)%cit],rt[(j+1)%cit],cit,dt)
-distD(rt[i],rt[(i+1)%cit],cit,dt)-distD(rt[j],rt[(j+1)%cit],cit,dt);
cost+=change;
if(change < 0)
atomicMin(dst_tid, ((unsigned long long)cost << 32) | id);
}
}
/* At each IHC steps, XY coordinates are arranged using next initial solution's order*/
void twoOpt(int x,int y,int *route,int city)
{
int *tmp_r;
int i,j;
tmp_r=(int*)malloc(sizeof(int)*(y-x));
for(j=0,i=y;i>x;i--,j++)
{
tmp_r[j]=route[i];
}
for(j=0,i=x+1;i<=y;i++,j++)
{
route[i]=tmp_r[j];
}
free(tmp_r);
}
int main(int argc, char *argv[])
{
int ch, cnt, in1;
float in2, in3;
FILE *f;
float *posx, *posy;
char str[256];
long dst,d,tid,x,y, cities;
unsigned long long *d_dst_tid;
int blk,thrd;
clock_t start,end;
long sol;
int *r,i,j;
f = fopen(argv[1], "r");
if (f == NULL) {fprintf(stderr, "could not open file \n"); exit(-1);}
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
ch = getc(f); while ((ch != EOF) && (ch != ':')) ch = getc(f);
fscanf(f, "%s\n", str);
cities = atoi(str);
if (cities <= 2) {fprintf(stderr, "only %d cities\n", cities); exit(-1);}
sol=cities*(cities-1)/2;
posx = (float *)malloc(sizeof(float) * cities); if (posx == NULL) {fprintf(stderr, "cannot allocate posx\n"); exit(-1);}
posy = (float *)malloc(sizeof(float) * cities); if (posy == NULL) {fprintf(stderr, "cannot allocate posy\n"); exit(-1);}
r = (int *)malloc(sizeof(int) * cities); if (posy == NULL) {fprintf(stderr, "cannot allocate posy\n"); exit(-1);}
ch = getc(f); while ((ch != EOF) && (ch != '\n')) ch = getc(f);
fscanf(f, "%s\n", str);
if (strcmp(str, "NODE_COORD_SECTION") != 0) {fprintf(stderr, "wrong file format\n"); exit(-1);}
cnt = 0;
while (fscanf(f, "%d %f %f\n", &in1, &in2, &in3))
{
posx[cnt] = in2;
posy[cnt] = in3;
cnt++;
if (cnt > cities) {fprintf(stderr, "input too long\n"); exit(-1);}
if (cnt != in1) {fprintf(stderr, "input line mismatch: expected %d instead of %d\n", cnt, in1); exit(-1);}
}
if (cnt != cities) {fprintf(stderr, "read %d instead of %d cities\n", cnt, cities); exit(-1);}
fscanf(f, "%s", str);
if (strcmp(str, "EOF") != 0) {fprintf(stderr, "didn't see 'EOF' at end of file\n"); exit(-1);}
fflush(f);
fclose(f);
/*Distance matrix */
long *dist_mat;
dist_mat = (long *)malloc(sizeof(long) * (cities*cities));
for (int i = 0; i < cities; ++i)
{
for (int j = 0; j < cities; ++j)
{
dist_mat[i*cities+j] = sqrtf(pow(posx[i] - posx[j], 2)
+powf(posy[i] - posy[j], 2));
}
}
/*CUDA threads and blocks configuration*/
if(sol < 1024)
{
blk = 1;
thrd = cities;
}
else
{
blk=(sol-1)/1024+1;
thrd=1024;
}
/*Initial solution construction using NN approach*/
r[0]=0;
int k=1;i=0;float min;int minj,mini,count=1,flag=0;dst=0;
int *v=(int*)calloc(cities,sizeof(int));
v[0]=1;
while(count!=cities)
{
flag=0;
for(j=1;j<cities;j++)
{
if(i!=j && !v[j])
{
int id;
if(i>j)
{id=j*cities+i;}
else{id=i*cities+j;}
min=dist_mat[id];
minj=j;
break;
}
}
for(j=minj+1;j<cities;j++)
{
if( !v[j])
{
int id;
if(i>j)
id=j*cities+i;
else
id=i*cities+j;
if(min>dist_mat[id])
{
min=dist_mat[id];
mini=j;
flag=1;
}
}
}
if(flag==0)
i=minj;
else
i=mini;
dst+=min;
r[k++]=i;v[i]=1;
count++;
}
free(v);
dst+=dist_mat[r[cities-1]];
count=1;
start = clock();
cudaEvent_t strt, stp;
cudaEventCreate(&strt);
cudaEventCreate(&stp);
unsigned long long dst_tid = (((long)dst+1) << 32) -1;
unsigned long long dtid;
int *d_r;
long *d_mt;
printf("cities : %ld\ninitial cost : %ld\n",cities,dst);
if(cudaSuccess!=cudaMalloc((void**)&d_dst_tid,sizeof(unsigned long long)))printf("\nAllocating memory for dst_tid on GPU");
if(cudaSuccess!=cudaMemcpy(d_dst_tid,&dst_tid,sizeof(unsigned long long),cudaMemcpyHostToDevice))printf("\ntransfer on GPU");
if(cudaSuccess!=cudaMalloc((void**)&d_mt,sizeof(long)*(cities*cities)))printf("\nAllocating memory for thread id on GPU");
if(cudaSuccess!=cudaMalloc((void**)&d_r,sizeof(int)*cities))printf("\nAllocating memory for thread id on GPU");
if(cudaSuccess!=cudaMemcpy(d_mt,dist_mat,sizeof(long)*(cities*cities),cudaMemcpyHostToDevice))printf("\ntransfer on GPU 1");
if(cudaSuccess!=cudaMemcpy(d_r,r,sizeof(int)*cities,cudaMemcpyHostToDevice))printf("\ntransfer on GPU 1");
tsp<<<blk,thrd>>>(d_r,dst,d_dst_tid,cities,d_mt,sol);
if(cudaSuccess!=cudaMemcpy(&dtid,d_dst_tid,sizeof(unsigned long long),cudaMemcpyDeviceToHost))
printf("\nCan't transfer minimal cost back to CPU");
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, strt, stp);
d = dtid >> 32;
printf("\nfirst cost found %ld",d);
while( d < dst )
{
dst=d;
tid = dtid & ((1ull<<32)-1);
x=cities-2-floor((sqrt(8*(sol-tid-1)+1)-1)/2);
y=tid-x*(cities-1)+(x*(x+1)/2)+1;
twoOpt(x,y,r,cities);
unsigned long long dst_tid = (((long)dst+1) << 32) -1;
if(cudaSuccess!=cudaMemcpy(d_r,r,sizeof(int)*cities,cudaMemcpyHostToDevice))printf("\ntransfer on GPU 1");
if(cudaSuccess!=cudaMemcpy(d_dst_tid,&dst_tid,sizeof(unsigned long long),cudaMemcpyHostToDevice))
printf("\ntransfer on GPU");
tsp<<<blk,thrd>>>(d_r,dst,d_dst_tid,cities,d_mt,sol);
if(cudaSuccess!=cudaMemcpy(&dtid,d_dst_tid,sizeof(unsigned long long),cudaMemcpyDeviceToHost))
printf("\nCan't transfer minimal cost back to CPU");
d = dtid >> 32;
count++;
}
printf("\nMinimal Distance : %ld\n",d);
printf("\nnumber of time climbed %d\n",count);
end = clock();
double t=((double) (end - start)) / CLOCKS_PER_SEC;
printf("\ntime : %f\n",t);
cudaFree(d_r);
cudaFree(d_mt);
cudaFree(d_dst_tid);
free(posx);
free(posy);
free(r);
return 0;
}
|
14,090 | #include "includes.h"
__global__ void matrixMul(int *A, int *B, int *C, int n)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int num = n;
if (row < num && col < num)
{
long Cvalue = 0;
for (int i = 0; i < num; i++)
{
Cvalue += A[row * num + i] * B[i * num + col];
}
C[row * num + col] = Cvalue;
}
} |
14,091 | #include <cuda.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <curand_kernel.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <time.h>
#include <stdio.h>
#include <float.h>
extern "C"
{
struct Point {
float x, y;
int id;
__device__ bool equal(Point &p) {
return p.x==x && p.y==y;
}
};
struct Line {
Point p1;
Point p2;
};
struct Polygon {
Line *lines;
int lines_count;
};
enum {LEFT, RIGHT, BEYOND, BEHIND, BETWEEN, ORIGIN, DESTINATION};
enum { INSIDE, OUTSIDE, BOUNDARY };
enum { TOUCHING, CROSSING, INESSENTIAL };
__device__ float gr_viz[100000000];//матрица смежности графа видимости размером point_len x point_len
__device__ float gr_viz1[100000000];
__device__ float distance[10000];
__device__ bool visited[10000];
__device__ bool mVisited[10000];
__device__ float uDistance[10000];
__device__ float cDistance[10000];
__device__ Point *points;
__device__ Polygon *polygons;
__device__ int points_count;
__device__ int polygons_count;
__device__ int dimension;
__device__ int k;
__device__ __constant__ float MAX_VALUE = 1000000;
__device__ __inline__ int pointInPolygon(Point &a);
__device__ __inline__ bool isInsidePolygons2(float x1, float y1);
__device__ __inline__ bool isInsidePolygons(Point &p);
__device__ __inline__ bool isVisibleLine3(Point &p1, Point &p2);
__device__ __inline__ bool isVisibleLine2(float &x1, float &y1, float &x2, float &y2);
__device__ __inline__ bool isVisibleLine(Point &p1, Point &p2);
__device__ __inline__ float evklid2(float x1, float y1, float x2, float y2);
__device__ __inline__ float evklid(Point &p1, Point &p2);
__device__ __inline__ bool intersect2(float &x1, float &y1, float &x2, float &y2,
float &x3, float &y3, float &x4, float &y4);
__device__ bool intersect(Point &a, Point &b, Point &c, Point &d);
__global__ void deinit() {
delete []points;
delete []polygons[0].lines;
delete []polygons;
}
__global__ void init(float *new_x, float *new_y, int *count,
int *p, int *p_len, int *d) {
k = 0;
dimension = d[0];
points_count = count[0];
points = new Point[points_count];
polygons_count = p_len[0];
polygons = new Polygon[polygons_count];
for (int i = 0; i < points_count; i++) {
points[i].x = new_x[i];
points[i].y = new_y[i];
points[i].id = i;
distance[i] = MAX_VALUE;
uDistance[i] = MAX_VALUE;
cDistance[i] = MAX_VALUE;
visited[i] = false;
mVisited[i] = false;
}
uDistance[points_count-1] = 0;
cDistance[points_count-1] = 0;
mVisited[points_count-1] = true;
int prev = 0;
for (int i = 0; i < p_len[0]; i++) {
int cur = p[i];
int size = cur - prev;
Line *lines = new Line[size];
for (int j = 0; j < size; j++) {
lines[j].p1 = points[prev + j];
lines[j].p2 = points[prev + (j + 1) % size];
//printf(" line %d, %d\n", prev + j, prev + (j + 1) % size);
}
polygons[i].lines = lines;
polygons[i].lines_count = size;
prev = p[i];
}
}
__global__ void get_graph_viz(float *graph)
{
for (int i = 0; i < points_count; i++)
for (int j=0; j < points_count; j++)
graph[i*points_count+j] = gr_viz[i*points_count+j];
//memcpy(graph, gr_viz, sizeof(float)*points_count*points_count);
}
__global__ void graph_viz()
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
int ind = i*points_count + j;
int ind1 = j*points_count + i;
if (i<j||ind >= points_count*points_count) {
//printf("return %d, %d\n", i, j);
return;
}
Point p1 = points[i];
Point p2 = points[j];
Point p;// = new Point();
p.x = (p2.x + p1.x) / 2.0;
p.y = (p2.y + p1.y) / 2.0;
if (abs(p1.id - p2.id) <= 1) {
gr_viz[ind1] = gr_viz1[ind1] =
gr_viz[ind] = gr_viz1[ind] = evklid(p1, p2);
} else if (isVisibleLine3(p1, p2)) {
gr_viz[ind1] = gr_viz1[ind1] =
gr_viz[ind] = gr_viz1[ind] = evklid(p1, p2);
} else {
gr_viz[ind1] = gr_viz1[ind1] =
gr_viz[ind] = gr_viz1[ind] = -1.0;
}
//&& isInsidePolygons2((p2.x + p1.x) / 2.0, (p2.y + p1.y) / 2.0) && isInsidePolygons2((p2.x + 0.2*p1.x) / (1.0+0.2), (p2.y + 0.2*p1.y) / (1+0.2))
//delete p;
}
__global__ void graph_viz1()
{
for (int i = 0; i < points_count; i++)
for (int j = 0; j < points_count; j++) {
int ind = i*points_count + j;
int ind1 = j*points_count + i;
if (i<j||ind >= points_count*points_count) {
//printf("return %d, %d\n", i, j);
continue;
}
Point p1 = points[i];
Point p2 = points[j];
Point p;// = new Point();
p.x = (p2.x + p1.x) / 2.0;
p.y = (p2.y + p1.y) / 2.0;
if (abs(p1.id - p2.id) <= 1) {
gr_viz[ind1] = gr_viz1[ind1] =
gr_viz[ind] = gr_viz1[ind] = evklid(p1, p2);
} else if (isVisibleLine3(p1, p2)) {
gr_viz[ind1] = gr_viz1[ind1] =
gr_viz[ind] = gr_viz1[ind] = evklid(p1, p2);
} else {
gr_viz[ind1] = gr_viz1[ind1] =
gr_viz[ind] = gr_viz1[ind] = MAX_VALUE;
}
}
//&& isInsidePolygons2((p2.x + p1.x) / 2.0, (p2.y + p1.y) / 2.0) && isInsidePolygons2((p2.x + 0.2*p1.x) / (1.0+0.2), (p2.y + 0.2*p1.y) / (1+0.2))
//delete p;
}
__global__ void dijkstra1() {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= points_count)
return;
if (mVisited[i]) {
mVisited[i] = false;
for (int j = 0; j < points_count; j++) {
int index = i*points_count+j;
if (j >= points_count)
continue;
if (gr_viz[index] > 0.0) {
if (uDistance[i] > cDistance[j] + gr_viz[index])
uDistance[i] = cDistance[j] + gr_viz[index];
}
}
}
}
__global__ void dijkstra2() {
unsigned int ind = blockIdx.x * blockDim.x + threadIdx.x;
if (cDistance[ind] >= uDistance[ind]) {
cDistance[ind] = uDistance[ind];
mVisited[ind] = true;
}
uDistance[ind] = cDistance[ind];
}
__global__ void isEmpty(float *check) {
check[0] = 0;
for (int i = 0; i < points_count; i++)
if (uDistance[i] > distance[i]) {
check[0] = 1;
return;
}
}
__global__ void dPrint() {
printf("\nСтоимость пути из начальной вершины до остальных: \n");
for (int i=0; i<points_count; i++)
printf("%d > %d = %f\n", points_count-1, i, uDistance[i]-distance[i]);
}
__global__ void dijkstra(int *index_from, float *distances) {
int st = points_count - 1, index, u;
float min;
index_from[st] = st;
distances[st] = distance[st] = 0;
for (int i = 0; i < points_count - 1; i++) {
min = MAX_VALUE;
for (int j = 0; j < points_count; j++)
if (!visited[j] && distance[j] <= min) {
min = distance[j];
index = j;
}
u = index;
visited[u] = true;
for (int j = 0; j < points_count; j++)
if (!visited[j] && gr_viz[u*points_count+j]>0.1 && distance[u]<MAX_VALUE && (distance[u]+gr_viz[u*points_count+j] < distance[j])) {
index_from[j] = u;
distances[j] = distance[j] = distance[u] + gr_viz[u*points_count+j];
}
}
printf("\nСтоимость пути из начальной вершины до остальных: \n");
for (int i=0; i<points_count; i++)
if (distance[i] >= 0 || distance[i] < MAX_VALUE)
printf("%d > %d = %f\n", st, i, distance[i]);
else
printf("%d > %d = маршрут не доступен\n", st, i);
}
__global__ void spm1(float *indexes) {
for (int i = 0; i < dimension; i++) {
for (int j = 0; j < dimension; j++) {
unsigned int index = i*dimension+j;
float _x = (float)i;
float _y = (float)j;
float min_dis = MAX_VALUE;
float value_dis;
if(!isInsidePolygons2(_x, _y)) {
indexes[index] = 0;
continue;
}
for (int k = 0; k < points_count; k++) {
value_dis = distance[k] + evklid2(points[k].x, points[k].y, _x, _y);
if (value_dis < min_dis && isVisibleLine2(_x, _y, points[k].x, points[k].y)) {
min_dis = value_dis;
indexes[index] = (float)points[k].id;
}
}
}
}
}
__global__ void spm(float *indexes) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int index = i*dimension+j;
if (index >= dimension*dimension) {
//printf("return %d, %d\n", i, j);
return;
}
float _x = (float)i;
float _y = (float)j;
if(!isInsidePolygons2(_x, _y)) {
indexes[i*dimension+j] = 0;
return;
}
float min_dis = MAX_VALUE;
float value_dis;
for (int k = 0; k < points_count; k++) {
value_dis = distance[k] + evklid2(points[k].x, points[k].y, _x, _y);
if (value_dis < min_dis && isVisibleLine2(_x, _y, points[k].x, points[k].y)) {
min_dis = value_dis;
indexes[index] = (float)points[k].id;
}
}
}
__global__ void floyd2() {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i*points_count+j >= points_count*points_count)
return;;
float ij=gr_viz1[i*points_count+j],
ik=gr_viz1[i*points_count+k],
kj=gr_viz1[k*points_count+j];
float result = 0;
if (ik < 0 || kj < 0)
return;
if (ij < 0)
result = ik+kj;
else
result = min( ij, ik+kj );
gr_viz1[i*points_count+j] = result;
return;
if (i == 0 && j == 0) {
k++;
}
}
__global__ void get_floyd2_result(float *matrix) {
//unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
//unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
//memcpy(matrix, gr_viz1, sizeof(float)*points_count*points_count);
for (int i = 0; i < points_count; i++)
for (int j = 0; j < points_count; j++)
matrix[i*points_count+j] = gr_viz1[i*points_count+j];
}
__global__ void floyd() {
int index = (int)(((float)points_count / (float)min(32,points_count)) + 1);
unsigned int index_x = index * threadIdx.x;
unsigned int index_y = index * threadIdx.y;
for (int k = 0; k < points_count; k++) {
for (int i = index_x; i < index_x + index && i < points_count; i++) {
for (int j = index_y; j < index_y + index && j < points_count; j++) {
if (i*points_count+j >= points_count*points_count)
continue;
float ij=gr_viz[i*points_count+j],
ik=gr_viz[i*points_count+k],
kj=gr_viz[k*points_count+j];
//matrix[i*points_count+j]=ij;
float result = 0;
if (ik < 0 || kj < 0)
continue;
if (ij < 0)
result = ik+kj;
else
result = min( ij, ik+kj );
//matrix[i*points_count+j]=
gr_viz[i*points_count+j] = result;
}
}
__syncthreads();
}
}
__global__ void get_floyd_result(float *matrix) {
//unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
//unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
//memcpy(matrix, gr_viz, sizeof(float)*points_count*points_count);
for (int i = 0; i < points_count; i++)
for (int j = 0; j < points_count; j++)
matrix[i*points_count+j] = gr_viz[i*points_count+j];
}
__global__ void floyd1(float *matrix) {
//unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
//unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
//unsigned int index = i*points_count+j;
for (int k = 0; k < points_count; k++)
for (int i = 0; i < points_count; i++)
for (int j = 0; j < points_count; j++) {
float result = 0;
if (gr_viz1[i*points_count+k] < 0 || gr_viz1[k*points_count+j] < 0)
continue;
if (gr_viz1[i*points_count+j] < 0)
result = gr_viz[i*points_count+k]+gr_viz1[k*points_count+j];
else
result = min( gr_viz1[i*points_count+j], gr_viz1[i*points_count+k]+gr_viz1[k*points_count+j]);
gr_viz1[i*points_count+j] = matrix[i*points_count+j] = result;
}
}
__device__ __inline__ bool pointInSegment(Point &t, Point &p1, Point &p2) {
float k1 = (p1.x + p2.x) / p1.x;
float k2 = (p1.x + p2.x) / p2.x;
if (t.x < min(k1, k2) || t.x > max(k1, k2))
return false;
k1 = (p1.y + p2.y) / p1.y;
k2 = (p1.y + p2.y) / p2.y;
if (t.y < min(k1, k2) || t.y > max(k1, k2))
return false;
return true;
}
__device__ __inline__ bool pointInSegment2(float x3, float y3, float x1, float y1, float x2, float y2) {
float k1 = (x1 + x2) / x1;
float k2 = (x1 + x2) / x2;
if (x3 < min(k1, k2) || x3 > max(k1, k2))
return false;
k1 = (y1 + y2) / y1;
k2 = (y1 + y2) / y2;
if (y3 < min(k1, k2) || y3 > max(k1, k2))
return false;
return true;
}
__device__ __inline__ bool isInsidePolygons2(float x1, float y1) {
int inter_count = 0;
float x2 = 0;
float y2 = y1;
for (int k = 0; k < polygons_count; k++) {
Polygon polygon = polygons[k];
inter_count = 0;
for(int l = 0; l < polygon.lines_count; l++) {
Line line = polygon.lines[l];
if (intersect2(x2, y2, x1, y1, line.p1.x, line.p1.y, line.p2.x, line.p2.y) )
inter_count = 1 - inter_count;
else if (pointInSegment2(x1, y1, line.p1.x, line.p1.y, line.p2.x, line.p2.y))
return true;
}
if (inter_count == 1) {
return true;
}
}
return inter_count == 1;
}
__device__ __inline__ bool isInsidePolygons(Point &p1) {
int inter_count = 0;
Point *p2 = new Point();
p2->x = 0;
p2->y = p1.y;
for (int k = 0; k < polygons_count; k++) {
Polygon polygon = polygons[k];
inter_count = 0;
for(int l = 0; l < polygon.lines_count; l++) {
Line line = polygon.lines[l];
if (intersect(*p2, p1, line.p1, line.p2))
inter_count = 1 - inter_count;
else if (pointInSegment(p1, line.p1, line.p2))
return true;
}
if (inter_count == 1) {
delete p2;
return true;
}
}
delete p2;
return inter_count == 1;
}
__device__ __inline__ bool isVisibleLine(Point &p1, Point &p2) {
int diff_id = abs(p1.id - p2.id);
for (int k = 0; k < polygons_count; k++) {
Polygon polygon = polygons[k];
if (diff_id > 1 && diff_id != polygon.lines_count - 1) {
int id1 = p1.id - polygon.lines[0].p1.id;
int id2 = p2.id - polygon.lines[0].p1.id;
if ( id1 >= 0 && id2 >= 0 && id1 < polygon.lines_count && id2 < polygon.lines_count) {
return false;
}
}
for(int l = 0; l < polygon.lines_count; l++) {
Line line = polygon.lines[l];
if (p1.id!=line.p1.id && p2.id!=line.p1.id && p1.id!=line.p2.id && p2.id!=line.p2.id &&
intersect(p1, p2, line.p1, line.p2)) {
return false;
}
}
}
return true;
}
__device__ __inline__ bool isVisibleLine2(float &x1, float &y1, float &x2, float &y2) {
for (int k = 0; k < polygons_count; k++) {
Polygon polygon = polygons[k];
for(int l = 0; l < polygon.lines_count; l++) {
Line line = polygon.lines[l];
Point p3 = line.p1;
Point p4 = line.p2;
if (intersect2(x1, y1, x2, y2, p3.x, p3.y, p4.x, p4.y) &&
!((x2 == p3.x && y2 == p3.y) || (x2 == p4.x && y2 == p4.y))) {
return false;
}
}
}
return true;
}
__device__ __inline__ bool leftSide(Point &p2, Point &p0, Point &p1)
{
float sa = (p1.x - p0.x)*(p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y) ;
return sa > 0.0 ;
}
__device__ __inline__ int rightSide(Point &p2, Point &p0, Point &p1)
{
float sa = (p1.x - p0.x)*(p2.y-p0.y)-(p1.y-p0.y)*(p2.x-p0.x); //(p1.x - p0.x)*(p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y) ;
return sa <= 0.0 ;
}
__device__ __inline__ bool isVisibleLine3( Point &p1, Point &p2 ) {
int firstPoint = 0;
int secondPoint = 0;
Line l1, l2, l3, l4;
for (int k = 0; k < polygons_count; k++) {
Polygon polygon = polygons[k];
for(int l = 0; l < polygon.lines_count; l++) {
Line line = polygon.lines[l];
Point p3 = line.p1;
Point p4 = line.p2;
if (intersect2(p1.x, p1.y, p2.x, p2.y, p3.x, p3.y, p4.x, p4.y) &&
!((p2.id == p3.id) || (p2.id == p4.id) || (p1.id == p3.id) || (p1.id == p4.id))) {
return false;
}
if ((p1.id == p3.id || p1.id == p4.id) && leftSide(p2, p3, p4)) {
firstPoint++;
}
if ((p2.id == p3.id || p2.id == p4.id) && leftSide(p1, p3, p4)) {
secondPoint++;
}
}
}
if (firstPoint + secondPoint < 2)
return true;
float ratio = 11.0;
float ratio1 = 0.1;
return isInsidePolygons2((p2.x + ratio*p1.x) / (1.0+ratio), (p2.y + ratio*p1.y) / (1.0+ratio))
&& isInsidePolygons2((p2.x + ratio1*p1.x) / (1.0+ratio1), (p2.y + ratio1*p1.y) / (1.0+ratio1));
}
__device__ __inline__ float evklid2(float x1, float y1, float x2, float y2) {
float m1 = x1 - x2;
float m2 = y1 - y2;
return sqrt(m1*m1 + m2*m2);
}
__device__ __inline__ float evklid(Point &p1, Point &p2) {
return sqrt(pow(p1.x - p2.x, 2) + pow(p1.y - p2.y, 2));
}
__device__ __inline__ void swap(float &a, float &b) {
float c = a;
a = b;
b = c;
}
__inline__ __device__ float area2 (float &x1, float &y1, float &x2, float &y2, float &x3, float &y3) {
return (x2 - x1) * (y3 - y1) - (y2 - y1) * (x3 - x1);
}
__inline__ __device__ float area (Point &a, Point &b, Point &c) {
return (b.x - a.x) * (c.y - a.y) - (b.y - a.y) * (c.x - a.x);
}
__inline__ __device__ bool intersect_1 (float a, float b, float c, float d) {
if (a > b) swap (a, b);
if (c > d) swap (c, d);
return max(a,c) <= min(b,d);
}
__device__ __inline__ bool intersect2(float &x1, float &y1, float &x2, float &y2,
float &x3, float &y3, float &x4, float &y4) {
return
area2(x1, y1, x2, y2, x3, y3) * area2(x1, y1, x2, y2, x4, y4) <= 0
&& area2(x3, y3, x4, y4, x1, y1) * area2(x3, y3, x4, y4, x2, y2) <= 0
&& intersect_1 (x1, x2, x3, x4)
&& intersect_1 (y1, y2, y3, y4);
}
__device__ __inline__ bool intersect (Point &a, Point &b, Point &c, Point &d) {
return intersect_1 (a.x, b.x, c.x, d.x)
&& intersect_1 (a.y, b.y, c.y, d.y)
&& area(a,b,c) * area(a,b,d) <= 0
&& area(c,d,a) * area(c,d,b) <= 0;
}
__device__ __inline__ int classify(Point &p2, Point &p0, Point &p1)
{
float sa = (p1.x - p0.x)*(p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y);
if (sa > 0.0)
return LEFT;
if (sa < 0.0)
return RIGHT;
if (((p1.x - p0.x) * (p2.x - p0.x)< 0.0) || ((p1.y - p0.y) * (p2.y - p0.y) < 0.0))
return BEHIND;
if (evklid2(p1.x - p0.x, p1.y - p0.y, p1.x - p0.x, p1.y - p0.y) < evklid2(p2.x - p0.x, p2.y - p0.y, p2.x - p0.x, p2.y - p0.y))
return BEYOND;
if (fabs(p0.x - p2.x) < 0.01 && fabs(p0.y - p2.y) < 0.01)
return ORIGIN;
if (fabs(p1.x - p2.x) < 0.01 && fabs(p1.y - p2.y) < 0.01)
return DESTINATION;
return BETWEEN;
}
__device__ __inline__ int edgeType(Point &a, Line &e)
{
Point v = e.p1;
Point w = e.p2;
switch (classify(a, e.p1, e.p2)) {
case LEFT:
return ((v.y<a.y)&&(a.y<=w.y)) ? CROSSING : INESSENTIAL;
case RIGHT:
return ((w.y<a.y)&&(a.y<=v.y)) ? CROSSING : INESSENTIAL;
case BETWEEN:
case ORIGIN:
case DESTINATION:
return TOUCHING;
default:
return INESSENTIAL;
}
}
__device__ __inline__ int pointInPolygon(Point &a)
{
int parity = 0;
Polygon polygon = polygons[0];
for(int l = 0; l < polygon.lines_count; l++) {
Line line = polygon.lines[l];
switch (edgeType(a, line)) {
case TOUCHING:
return BOUNDARY;
case CROSSING:
parity = 1 - parity;
}
}
return (parity ? INSIDE : OUTSIDE);
}
}
|
14,092 | #include "includes.h"
__global__ void BFS_kernel_multi_block_spill( volatile unsigned int *frontier, volatile unsigned int *frontier2, unsigned int frontier_len, volatile unsigned int *cost, volatile int *visited, unsigned int *edgeArray, unsigned int *edgeArrayAux, unsigned int numVertices, unsigned int numEdges, volatile unsigned int *frontier_length, const unsigned int max_local_mem)
{
extern volatile __shared__ unsigned int b_q[];
volatile __shared__ unsigned int b_q_length[1];
volatile __shared__ unsigned int b_offset[1];
//get the threadId
unsigned int tid=threadIdx.x + blockDim.x * blockIdx.x;
unsigned int lid=threadIdx.x;
//initialize the block queue length and warp queue offset
if (lid == 0 )
{
b_q_length[0]=0;
b_offset[0]=0;
}
__syncthreads();
//Initialize the warp queue sizes to 0
if(tid<frontier_len)
{
//get the nodes to traverse from block queue
unsigned int node_to_process=frontier[tid];
visited[node_to_process]=0;
//get the offsets of the vertex in the edge list
unsigned int offset=edgeArray[node_to_process];
unsigned int next=edgeArray[node_to_process+1];
//Iterate through the neighbors of the vertex
while(offset<next)
{
//get neighbor
unsigned int nid=edgeArrayAux[offset];
//get its cost
unsigned int v=atomicMin((unsigned int *)&cost[nid],
cost[node_to_process]+1);
//if cost is less than previously set add to frontier
if(v>cost[node_to_process]+1)
{
int is_in_frontier=atomicExch((int *)&visited[nid],1);
//if node already in frontier do nothing
if(is_in_frontier==0)
{
//increment the warp queue size
unsigned int t=atomicAdd((unsigned int *)&b_q_length[0],
1);
if(t<max_local_mem)
{
b_q[t]=nid;
}
//write to global memory if shared memory full
else
{
int off=atomicAdd((unsigned int *)frontier_length,
1);
frontier2[off]=nid;
}
}
}
offset++;
}
}
__syncthreads();
//get block queue offset in global queue
if(lid==0)
{
if(b_q_length[0] > max_local_mem)
{
b_q_length[0]=max_local_mem;
}
b_offset[0]=atomicAdd((unsigned int *)frontier_length,b_q_length[0]);
}
__syncthreads();
//copy block queue to frontier
if(lid < b_q_length[0])
frontier2[lid+b_offset[0]]=b_q[lid];
} |
14,093 | #include <curand_kernel.h>
#include <curand.h>
#include <chrono>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include <time.h>
#define PRECISION double
__global__ void monteCuda(PRECISION *counts, int num_iter, curandState *states)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int seed = idx;
curand_init(seed, idx, 0, &states[idx]);
int count = 0;
PRECISION x, y, z;
// Calculate PI following a Monte Carlo method
for (int iter = 0; iter < num_iter; iter++)
{
// Generate random (X,Y) points
x = curand_uniform(&states[idx]);
y = curand_uniform(&states[idx]);
z = sqrt((x*x) + (y*y));
// Check if point is in unit circle
if (z <= 1.0)
{
count++;
}
}
counts[idx] = ((PRECISION)count / (PRECISION)num_iter);
}
// returns if values successfully read or not.
bool setValuesFromArgs(int argc, char **argv, unsigned int *block_size, unsigned int *num_threads, unsigned int *num_iter)
{
if (argc < 4) {
printf("Incorrect parameters!\nUsage: %s <block size> <num threads> <iterations per thread>\n", *argv);
return false;
}
char *s;
*block_size = strtoul(argv[1], &s, 10);
*num_threads = strtoul(argv[2], &s, 10);
*num_iter = strtoul(argv[3], &s, 10);
return true;
}
int main(int argc, char* argv[])
{
unsigned int block_size, num_threads, num_iter;
if(!setValuesFromArgs(argc, argv, &block_size, &num_threads, &num_iter)) return 0;
bool bench = argc == 5;
auto start = std::chrono::system_clock::now();
// Change num_threads to a multiple of block_size to prevent unexpected outcomes (memory size not matching up etc)
num_threads = ((num_threads + block_size - 1) / block_size) * block_size;
PRECISION count = 0.0;
PRECISION pi;
PRECISION *counts = (PRECISION*)malloc(num_threads * sizeof(PRECISION));
curandState *dev_random;
cudaMalloc(&dev_random, num_threads*sizeof(curandState));
PRECISION *p_counts = 0;
cudaMalloc(&p_counts, num_threads * sizeof(PRECISION));
monteCuda<<<(num_threads + block_size - 1) / block_size, block_size>>>(p_counts, num_iter, dev_random);
cudaDeviceSynchronize();
cudaMemcpy(counts, p_counts, num_threads * sizeof(PRECISION), cudaMemcpyDeviceToHost);
// Estimate Pi and display the result
for(int i = 0; i < num_threads; i++) {
count += counts[i];
}
pi = (count / (PRECISION)num_threads) * 4.0;
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> time = end-start;
if(bench) {
printf("%f %f\n", pi, time.count());
}
else printf("The result is %f\n", pi);
return 0;
}
|
14,094 | /*
* EDDL Library - European Distributed Deep Learning Library.
* Version: 1.1
* copyright (c) 2022, Universitat Politècnica de València (UPV), PRHLT Research Centre
* Date: March 2022
* Author: PRHLT Research Centre, UPV, (rparedes@prhlt.upv.es), (jon@prhlt.upv.es)
* All rights reserved
*/
#include <string.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <cuda.h>
__global__ void gpu_isfinite(float *A, float *B, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = isfinite(A[thread_id_x]);
}
}
__global__ void gpu_isinf(float *A, float *B, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = isinf(A[thread_id_x]);
}
}
__global__ void gpu_isnan(float *A, float *B, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = isnan(A[thread_id_x]);
}
}
__global__ void gpu_isneginf(float *A, float *B, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = isinf(A[thread_id_x]) && A[thread_id_x] < 0.0f;
}
}
__global__ void gpu_isposinf(float *A, float *B, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = isinf(A[thread_id_x]) && A[thread_id_x] > 0.0f;
}
}
__global__ void gpu_logical_and(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = (bool)A[thread_id_x] & (bool)B[thread_id_x];
}
}
__global__ void gpu_logical_or(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = (bool)A[thread_id_x] | (bool)B[thread_id_x];
}
}
__global__ void gpu_logical_not(float *A, float *B, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = !((bool)A[thread_id_x]);
}
}
__global__ void gpu_logical_xor(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = (bool)A[thread_id_x] ^ (bool)B[thread_id_x];
}
}
__global__ void gpu_allclose(float *A, float *B, float rtol, float atol, bool equal_nan, long int size, bool &allclose){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
// if(!allclose) return; // Abort if there is a result
if (thread_id_x < size && allclose){
bool close = fabsf(A[thread_id_x] - B[thread_id_x]) <= (atol + rtol * fabsf(B[thread_id_x]));
if (!close){
allclose = false;
// return;
}
}
}
__global__ void gpu_isclose(float *A, float *B, float *C, float rtol, float atol, bool equal_nan, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = fabsf(A[thread_id_x] - B[thread_id_x]) <= (atol + rtol * fabsf(B[thread_id_x]));
}
}
__global__ void gpu_greater(float *A, float *B, float v, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = A[thread_id_x] > v;
}
}
__global__ void gpu_greater(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = A[thread_id_x] > B[thread_id_x];
}
}
__global__ void gpu_greater_equal(float *A, float *B, float v, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = A[thread_id_x] >= v;
}
}
__global__ void gpu_greater_equal(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = A[thread_id_x] >= B[thread_id_x];
}
}
__global__ void gpu_less(float *A, float *B, float v, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = A[thread_id_x] < v;
}
}
__global__ void gpu_less(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = A[thread_id_x] < B[thread_id_x];
}
}
__global__ void gpu_less_equal(float *A, float *B, float v, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = A[thread_id_x] <= v;
}
}
__global__ void gpu_less_equal(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = A[thread_id_x] <= B[thread_id_x];
}
}
__global__ void gpu_equal(float *A, float *B, float v, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = A[thread_id_x] == v;
}
}
__global__ void gpu_equal(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = A[thread_id_x] == B[thread_id_x];
}
}
__global__ void gpu_not_equal(float *A, float *B, float v, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
B[thread_id_x] = A[thread_id_x] != v;
}
}
__global__ void gpu_not_equal(float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
C[thread_id_x] = A[thread_id_x] != B[thread_id_x];
}
}
|
14,095 | /*
* @Author: jose
* @Date: 2020-08-24 00:00:00
* @Last Modified by: jose
* @Last Modified time: 2020-08-24 00:00:00
*/
// local libs
#include "interface.cuh"
// ===========================================
// Check Errors
// ===========================================
#define imart_assert_cuda(status, msg) \
imart_assert_cuda_error((status), __FILE__, __LINE__, msg);
void imart_assert_cuda_error(cudaError_t code, const char *file, int line, const char* msg, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"\n******* CUDA Error *******"\
"\n[Error] Information:\t%s"\
"\n[Error] Error code:\t%i"\
"\n[Error] Description:\t%s"\
"\n[Error] File:\t\t%s"\
"\n[Error] Line:\t\t%d\n",
msg, code, cudaGetErrorString(code), file, line);
if (abort) exit(code);
};
};
// ===========================================
// Kernels
// ===========================================
// __global__ void kernel_print(const char * msg)
__global__ void kernel_print()
{
printf("[GPU] Hello!\n");
// printf("[GPU] %s\n", msg);
};
// ===========================================
// Functions
// ===========================================
void cuda_check_gpu()
{
int devicesCount;
cudaGetDeviceCount(&devicesCount);
for(int deviceIndex = 0; deviceIndex < devicesCount; ++deviceIndex)
{
cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties, deviceIndex);
std::cout << "CUDA Device:\t" << deviceProperties.name << std::endl;
}
};
void cuda_print()
{
kernel_print<<<1, 1>>>();
imart_assert_cuda( cudaPeekAtLastError(), "Fail to run kernel print" );
imart_assert_cuda( cudaDeviceSynchronize(), "Fail to sync kernel print");
};
template <typename type>
void cuda_create_memory(type * & x, int size)
{
imart_assert_cuda ( cudaMalloc(&x, size*sizeof(type)), "Memory allocation" );
};
template <typename type>
void cuda_clean_memory(type * & x)
{
imart_assert_cuda( cudaFree(x), "Memory free" );
};
template <typename type>
void cuda_push_memory(type * x, type * data, int size, int offset)
{
// printf("vector in:\n");
// for(int i = 0; i < size; i++)
// printf("%f ",data[i]);
imart_assert_cuda( cudaMemcpy(x, data, size*sizeof(type), cudaMemcpyHostToDevice), "Memory copy host to device" );
// cudaMemcpy(x + offset, data, size*sizeof(type), cudaMemcpyHostToDevice);
};
template <typename type>
void cuda_push_memory(type * x, const type * data, int size, int offset)
{
imart_assert_cuda( cudaMemcpy(x, data, size*sizeof(type), cudaMemcpyHostToDevice), "Memory copy host to device" );
};
template <typename type>
void cuda_pull_memory(type * x, type * data, int size, int offset)
{
// printf("pull\n");
// imart_assert_cuda( cudaMemcpy(data, x, size*sizeof(type), cudaMemcpyDeviceToHost), "Memory copy device to host" );
imart_assert_cuda( cudaMemcpy(data, x + offset, size*sizeof(type), cudaMemcpyDeviceToHost), "Memory copy device to host" );
// cudaMemcpy(data, x + offset, size*sizeof(type), cudaMemcpyDeviceToHost);
// printf("vector out:\n");
// for(int i = 0; i < size; i++)
// printf("%f ",data[i]);
};
// ===========================================
// Explicit instanciation
// ===========================================
template void cuda_create_memory<float>(float * & x, int size);
template void cuda_clean_memory<float>(float * & x);
template void cuda_push_memory<float>(float * x, float * data, int size, int offset);
template void cuda_push_memory<float>(float * x, const float * data, int size, int offset);
template void cuda_pull_memory<float>(float * x, float * data, int size, int offset);
template void cuda_create_memory<double>(double * & x, int size);
template void cuda_clean_memory<double>(double * & x);
template void cuda_push_memory<double>(double * x, double * data, int size, int offset);
template void cuda_push_memory<double>(double * x, const double * data, int size, int offset);
template void cuda_pull_memory<double>(double * x, double * data, int size, int offset);
template void cuda_create_memory<unsigned int>(unsigned int * & x, int size);
template void cuda_clean_memory<unsigned int>(unsigned int * & x);
template void cuda_push_memory<unsigned int>(unsigned int * x, unsigned int * data, int size, int offset);
template void cuda_push_memory<unsigned int>(unsigned int * x, const unsigned int * data, int size, int offset);
template void cuda_pull_memory<unsigned int>(unsigned int * x, unsigned int * data, int size, int offset);
template void cuda_create_memory<int>(int * & x, int size);
template void cuda_clean_memory<int>(int * & x);
template void cuda_push_memory<int>(int * x, int * data, int size, int offset);
template void cuda_push_memory<int>(int * x, const int * data, int size, int offset);
template void cuda_pull_memory<int>(int * x, int * data, int size, int offset);
template void cuda_create_memory<unsigned short>(unsigned short * & x, int size);
template void cuda_clean_memory<unsigned short>(unsigned short * & x);
template void cuda_push_memory<unsigned short>(unsigned short * x, unsigned short * data, int size, int offset);
template void cuda_push_memory<unsigned short>(unsigned short * x, const unsigned short * data, int size, int offset);
template void cuda_pull_memory<unsigned short>(unsigned short * x, unsigned short * data, int size, int offset);
template void cuda_create_memory<short>(short * & x, int size);
template void cuda_clean_memory<short>(short * & x);
template void cuda_push_memory<short>(short * x, short * data, int size, int offset);
template void cuda_push_memory<short>(short * x, const short * data, int size, int offset);
template void cuda_pull_memory<short>(short * x, short * data, int size, int offset);
template void cuda_create_memory<unsigned char>(unsigned char * & x, int size);
template void cuda_clean_memory<unsigned char>(unsigned char * & x);
template void cuda_push_memory<unsigned char>(unsigned char * x, unsigned char * data, int size, int offset);
template void cuda_push_memory<unsigned char>(unsigned char * x, const unsigned char * data, int size, int offset);
template void cuda_pull_memory<unsigned char>(unsigned char * x, unsigned char * data, int size, int offset);
template void cuda_create_memory<char>(char * & x, int size);
template void cuda_clean_memory<char>(char * & x);
template void cuda_push_memory<char>(char * x, char * data, int size, int offset);
template void cuda_push_memory<char>(char * x, const char * data, int size, int offset);
template void cuda_pull_memory<char>(char * x, char * data, int size, int offset);
|
14,096 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
// The spmv kernel from nvidia document, with bug fix and modification
// https://www.nvidia.com/docs/IO/66889/nvr-2008-004.pdf
__global__ void
spmv_csr_vector_kernel(const int num_rows,
const int *ptr,
const int *indices,
const float *data,
cudaTextureObject_t b_tex,
float *y)
{
//used 33 to avoid memory bank conflict
__shared__ double vals[33];
// 32 threads per row, each thread load matrix(data) and vector(x) 32 elements apart
// memory access to global memory between 32 threads should be coalesced
// To enable quicker memory reuse, cache vector b into a texture object
int row = blockIdx.x;
if (row < num_rows)
{
int row_start = ptr[row];
int row_end = ptr[row + 1];
// compute running sum per thread
vals[threadIdx.x] = 0;
for (int jj = row_start + threadIdx.x; jj < row_end; jj += 32)
vals[threadIdx.x] += data[jj] * tex1Dfetch<float>(b_tex,indices[jj]);
__syncthreads();
// parallel reduction in shared memory
if (threadIdx.x < 16)
vals[threadIdx.x] += vals[threadIdx.x + 16];
__syncthreads();
if (threadIdx.x < 8)
vals[threadIdx.x] += vals[threadIdx.x + 8];
__syncthreads();
if (threadIdx.x < 4)
vals[threadIdx.x] += vals[threadIdx.x + 4];
__syncthreads();
if (threadIdx.x < 2)
vals[threadIdx.x] += vals[threadIdx.x + 2];
__syncthreads();
if (threadIdx.x < 1)
vals[threadIdx.x] += vals[threadIdx.x + 1];
__syncthreads();
// first thread writes the result
if (threadIdx.x == 0)
y[row] += vals[threadIdx.x];
}
}
int main(int argc, char **argv)
{
FILE *fp;
char line[1024];
int *ptr, *indices;
float *data, *b, *t, *t_gpu;
int i, j;
int n; // number of nonzero elements in data
int nr; // number of rows in matrix
int nc; // number of columns in matrix
// Open input file and read to end of comments
if (argc != 2)
abort();
if ((fp = fopen(argv[1], "r")) == NULL)
{
abort();
}
fgets(line, 128, fp);
while (line[0] == '%')
{
fgets(line, 128, fp);
}
// Read number of rows (nr), number of columns (nc) and
// number of elements and allocate memory for ptr, indices, data, b and t.
sscanf(line, "%d %d %d\n", &nr, &nc, &n);
// Unified Memory Allocation
cudaMallocManaged(&ptr,(nr + 1) * sizeof(int));
cudaMallocManaged(&indices,n * sizeof(int));
cudaMallocManaged(&data,n * sizeof(float));
cudaMallocManaged(&b,nc * sizeof(float));
cudaMallocManaged(&t_gpu,nr * sizeof(float));
t = (float *) malloc(nr*sizeof(float));
// Read data in coordinate format and initialize sparse matrix
int lastr = 0;
for (i = 0; i < n; i++)
{
int r;
fscanf(fp, "%d %d %f\n", &r, &(indices[i]), &(data[i]));
indices[i]--; // start numbering at 0
if (r != lastr)
{
ptr[r - 1] = i;
lastr = r;
}
}
ptr[nr] = n;
// initialize t to 0 and b with random data
for (i = 0; i < nr; i++)
{
t[i] = 0.0;
}
for (i = 0; i < nc; i++)
{
b[i] = (float)rand() / 1111111111;
}
// MAIN COMPUTATION, SEQUENTIAL VERSION
for (i = 0; i < nr; i++)
{
for (j = ptr[i]; j < ptr[i + 1]; j++)
{
t[i] = t[i] + data[j] * b[indices[j]];
}
}
// Compute result on GPU and compare output
int device = -1;
cudaGetDevice(&device);
cudaMemPrefetchAsync(ptr, (nr + 1) * sizeof(int), device, NULL);
cudaMemPrefetchAsync(indices, n * sizeof(int), device, NULL);
cudaMemPrefetchAsync(data, n * sizeof(float), device, NULL);
cudaMemPrefetchAsync(t_gpu, nr * sizeof(float), device, NULL);
// Use read only texture object to cache vector b, saving ~100 cycle on read
// refered to https://devblogs.nvidia.com/cuda-pro-tip-kepler-texture-objects-improve-performance-and-flexibility/
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = b;
resDesc.res.linear.desc.f = cudaChannelFormatKindFloat;
resDesc.res.linear.desc.x = 32; // bits per channel
resDesc.res.linear.sizeInBytes = nr*sizeof(float);
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = cudaReadModeElementType;
cudaTextureObject_t tex=0;
cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
//Call Kernel
dim3 dimGrid(nr,1,1);
dim3 dimBlock(32,1,1);
spmv_csr_vector_kernel<<<dimGrid,dimBlock>>>(nr,ptr,indices,data,tex,t_gpu);
//Sync
cudaDeviceSynchronize();
//Result Validation
for(i=0;i<nr;i++){
if(fabs(t[i]-t_gpu[i])>1e-5){
std::cout<< "Not equal at " << i << "\n";
std::cout<< t[i] << " " << t_gpu[i] << " " << "\n";
abort();
}
}
//Garbage collection
cudaFree(ptr);
cudaFree(indices);
cudaFree(data);
cudaFree(b);
cudaFree(t_gpu);
cudaDestroyTextureObject(tex);
}
|
14,097 | #include <stdio.h>
//Compiler version gcc 6.3.0
__global__ void sum(int *a, int *b, int *c)
{
*c = *a + *b;
}
int main(void)
{
int a, b, c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int size = sizeof(int);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
//Input values
a = 10; b = 20;
// Copy inputs to device
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
//Launching add() kernel on GPU
sum<<<1,1>>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost); // Cleanup
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
printf("%d",c);
return 0;
}
|
14,098 | #include "includes.h"
__global__ void VecAdd(float * A, float * B, float * C)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
} |
14,099 | #include <iostream>
#include "cuda.h"
using namespace std;
#define N 64
__global__ void atomicKernel(int* sum, int* array)
{
const int tid = blockDim.x * blockDim.y * threadIdx.z
+ blockDim.x * threadIdx.y + threadIdx.x;
array[tid] = tid;
atomicAdd(sum, tid);
}
int main()
{
dim3 grid(1,1,1), block(2,8,4);
int* hSum;
hSum = (int*)malloc(sizeof(int));
int* dSum;
cudaMalloc((void**)&dSum, sizeof(int));
int* hArray;
hArray = (int*)malloc(N*sizeof(int));
int* dArray;
cudaMalloc((void**)&dArray, N*sizeof(int));
atomicKernel<<<grid,block>>>(dSum, dArray);
cudaDeviceSynchronize();
cudaMemcpy(hSum, dSum, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(hArray, dArray, N*sizeof(int), cudaMemcpyDeviceToHost);
int cpuSum = 0;
for(int i = 0; i < N; i++)
cpuSum += hArray[i];
cout<< "CPU Sum: " << cpuSum << endl;
cout<< "GPU Sum: " << *hSum << endl;
}
|
14,100 | //pass
//--blockDim=32 --gridDim=2
#include <cuda.h>
__device__ void write(int* A, int idx, int temp)
{
A[idx] = temp;
}
__device__ int read(int* A, int idx)
{
return A[idx + 1];
}
__global__ void race (int* A)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = blockDim.x * bid + tid;
int temp = read(A, idx);
write(A, idx, temp);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.