serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
12,301 | #include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
//#include <helper_cuda.h>
#include <time.h>
#define BLOCK_SIZE 1024
#define NUMBER_OF_ELEMENTS 1024
__global__ void vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
__global__ void total(float *input, float *output, int len) {
//@@ Load a segment of the input vector into shared memory
__shared__ float partialSum[2 * BLOCK_SIZE];
unsigned int t = threadIdx.x, start = 2* blockIdx.x * BLOCK_SIZE;
if (start + t < len)
partialSum[t] = input[start + t];
else
partialSum[t] = 0;
if (start + BLOCK_SIZE + t < len)
partialSum[BLOCK_SIZE + t] = input[start + BLOCK_SIZE + t];
else
partialSum[BLOCK_SIZE + t] = 0;
//@@ Traverse the reduction tree
for (unsigned int stride = BLOCK_SIZE; stride >= 1; stride >>= 1) {
__syncthreads();
if (t < stride)
partialSum[t] += partialSum[t + stride];
}
//@@ Write the computed sum of the block to the output vector at the
//@@ correct index
if (t == 0)
output[blockIdx.x] = partialSum[0];
}
int main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
int numElements = NUMBER_OF_ELEMENTS;
int memorySizeIn=numElements*sizeof(float);
int memorySizeOut=sizeof(float);
printf("Calculating the sum of %d elements.\n", numElements);
printf("Allocating host vectors...\n");
// Allocate the host output vector
float *h_input = (float *)malloc(memorySizeIn);
// Allocate the host output vector
float *h_output = (float *)malloc(memorySizeIn);
// Verify that allocations succeeded
if (h_input == NULL || h_output == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
else
printf("Success.\n");
// Initialize the host input vector with random values
for (int i = 0; i < numElements; ++i)
{
h_input[i]=(float)rand()/(float)RAND_MAX;
// printf("%f ", h_input[i]);
}
printf("Allocating device vectors... \n");
float *d_input = NULL;
err = cudaMallocManaged((void **)&d_input, memorySizeIn);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device input vector!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
else
printf("Device input vector allocated.\n");
float *d_output = NULL;
err = cudaMallocManaged((void **)&d_output, memorySizeIn);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device output vector!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
else
printf("Device output vector allocated.\n");
printf("Copying input vector from the host memory to the CUDA device...\n");
err = cudaMemcpy(d_input, h_input, memorySizeIn, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy input vector from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
else
printf("Success.\n");
// Launch the Kernel
int threadsPerBlock = BLOCK_SIZE;
int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launching with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
float sumGPU = 0;
int k = 0;
int iter=0;
clock_t start=clock();
int inputElementsLeft = numElements;
while(inputElementsLeft > 0)
{
int currNumElements = inputElementsLeft;
if(inputElementsLeft > BLOCK_SIZE*2)
currNumElements = BLOCK_SIZE*2;
total<<<blocksPerGrid, threadsPerBlock>>>(d_input + k, d_output, currNumElements);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch the kernel in %d iteration (error code %s)!\n", iter, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
err = cudaMemcpy(h_output, d_output, memorySizeOut, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy output vector from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float sum = h_output[0];
sumGPU+=sum;
k+=currNumElements;
iter+=1;
inputElementsLeft-=currNumElements;
}
cudaDeviceSynchronize();
clock_t end=clock();
double time_elapsed_gpu=((double) (end - start))*1000 / CLOCKS_PER_SEC;
// Calculating the sum using CPU
float sumCPU=0;
for (int i=0; i<numElements; i++) {
sumCPU+=h_input[i];
}
printf("GPU sum is %f and CPU sum is %f.\n", sumGPU, sumCPU);
printf("Time elapsed for GPU computations: %lf ms.\n", time_elapsed_gpu);
// Free device memory
err = cudaFree(d_input);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free input vector (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_output);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free output vector (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_input);
free(h_output);
return 0;
}
|
12,302 | #include "includes.h"
__global__ void DrawRgbaTextureKernel2DBlock(float *target, int targetWidth, int targetHeight, int inputX, int inputY, float *texture, int textureWidth, int textureHeight)
{
int id = blockDim.x * blockDim.y * (blockIdx.y * gridDim.x + blockIdx.x)
+ blockDim.x * threadIdx.y
+ threadIdx.x; // 2D grid of 2D blocks; block dimension x = texture width;
// grid dimension x + block dimension y = texture height
int targetPixels = targetWidth * targetHeight;
int texturePixels = textureWidth * textureHeight;
int idTextureRgb = blockIdx.y;
int idTexturePixel = (id - idTextureRgb * texturePixels);
int idTextureY = blockIdx.x * blockDim.y + threadIdx.y;
int idTextureX = threadIdx.x;
if (idTextureRgb < 3) // 3 channels that we will write to
{
// the texture is in BGR format, we want RGB
switch (idTextureRgb)
{
case 0: // R
idTextureRgb = 2; // B
break;
case 2: // B
idTextureRgb = 0; // R
break;
}
// if the texture pixel offset by inputX, inputY, lies inside the target
if (idTextureX + inputX < targetWidth &&
idTextureX + inputX >= 0 &&
idTextureY + inputY < targetHeight &&
idTextureY + inputY >= 0)
{
int tIndex = targetPixels * idTextureRgb + targetWidth * (idTextureY + inputY) + (idTextureX + inputX);
int aIndex = idTexturePixel + 3 * texturePixels; // the A component of the texture
float a = texture[aIndex];
target[tIndex] = target[tIndex] * (1.0f - a) + a * texture[id];
}
}
} |
12,303 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>
using namespace std;
#define BlockSize 128
// -------------------------------------------------------------------------- //
__global__ void calculaC( const float * A, const float * B, float * C, const int NBlocks, const int Bsize ) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if( tid < NBlocks * Bsize ) {
int k = tid / Bsize;
int istart = k * Bsize;
int iend = istart + Bsize;
for( int j = istart; j < iend; j++ ) {
float a = A[j] * tid;
if( (int) ceil(a) % 2 == 0 )
C[tid] = a + B[j];
else
C[tid] = a - B[j];
}
}
}
// -------------------------------------------------------------------------- //
__global__ void calculaD( const float * C, float * D, const int NBlocks, const int Bsize ) {
extern __shared__ float sdata[];
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = ( (i < NBlocks * Bsize) ? C[i] : 0.0f );
__syncthreads();
for( int s = blockDim.x / 2; s > 0; s >>= 1 ) {
if( tid < s ) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if( tid == 0 )
D[blockIdx.x] = sdata[0];
}
// -------------------------------------------------------------------------- //
__global__ void calculaMaxC( const float * C, float * max, const int NBlocks, const int Bsize ) {
extern __shared__ float sdata[];
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = ( (i < NBlocks * Bsize) ? C[i] : 0.0f );
__syncthreads();
for( int s = blockDim.x / 2; s > 0; s >>= 1 ) {
if( tid < s )
if( sdata[tid] < sdata[tid + s] )
sdata[tid] = sdata[tid + s];
__syncthreads();
}
if( tid == 0 )
max[blockIdx.x] = sdata[0];
}
// -------------------------------------------------------------------------- //
int main( int argc, char *argv[] ) {
int Bsize, NBlocks;
if( argc != 3 ) {
cout << "Uso: " << argv[0] << " <Num_bloques> <Tam_bloque>" << endl;
return(0);
}
NBlocks = atoi( argv[1] );
Bsize = atoi( argv[2] );
const int N = Bsize * NBlocks;
const int size = N * sizeof(float);
const int dsize = NBlocks * sizeof(float);
// Punteros a memoria host
float *A, *B, *Ccpu, *Dcpu, *Cgpu, *Dgpu, *mxgpu;
// Colocando punteros en el host
A = new float[N];
B = new float[N];
Ccpu = new float[N];
Dcpu = new float[NBlocks];
Cgpu = new float[N];
Dgpu = new float[NBlocks];
float mxcpu;
mxgpu = new float[N];
// Inicializando vectores A y B
for( int i = 0; i < N; i++ ) {
A[i] = 5;
B[i] = 5;
}
// ------------------------------- FASE CPU ------------------------------- //
double t1cpu = clock();
for( int k = 0; k < NBlocks; k++ ) {
int istart = k * Bsize;
int iend = istart + Bsize;
Dcpu[k] = 0.0;
for( int i = istart; i < iend; i++ ) {
Ccpu[i] = 0.0;
for( int j = istart; j < iend; j++ ) {
float a = A[j] * i;
if( (int) ceil(a) % 2 == 0 )
Ccpu[i] = a + B[j];
else
Ccpu[i] = a - B[j];
}
Dcpu[k] += Ccpu[i];
mxcpu = ( i == 1 ) ? Ccpu[0] : max( Ccpu[i], mxcpu );
}
}
double t2cpu = clock();
double tcpu = ( t2cpu - t1cpu ) / CLOCKS_PER_SEC;
// ------------------------------- FASE GPU ------------------------------- //
// Punteros a memoria device
float *a_d, *b_d, *c_d, *d_d, *max_d;
// Colocando arrays en device
cudaMalloc( (void **) &a_d, size );
cudaMalloc( (void **) &b_d, size );
cudaMalloc( (void **) &c_d, size );
cudaMalloc( (void **) &d_d, dsize );
cudaMalloc( (void **) &max_d, size );
// Copiando los datos de memoria host a device
cudaMemcpy( a_d, A, size, cudaMemcpyHostToDevice );
cudaMemcpy( b_d, B, size, cudaMemcpyHostToDevice );
// Lanzamiento del Kernel que calcula C (memoria global)
dim3 threadsPerBlockC( Bsize, 1 );
dim3 numBlocksC( NBlocks, 1 );
double t1gpu = clock();
calculaC<<<numBlocksC, threadsPerBlockC>>>( a_d, b_d, c_d, NBlocks, Bsize );
// Lanzamiento del Kernel que calcula D
int smemSize = Bsize * sizeof(float);
calculaD<<<numBlocksC, threadsPerBlockC, smemSize>>>( c_d, d_d, NBlocks, Bsize );
// Lanzamiento del Kernel que calcula el máximo
calculaMaxC<<<numBlocksC, threadsPerBlockC, smemSize>>>( c_d, max_d, NBlocks, Bsize );
double t2gpu = clock();
// Copiando los datos de memoria device a host
cudaMemcpy( Cgpu, c_d, size, cudaMemcpyDeviceToHost );
cudaMemcpy( Dgpu, d_d, dsize, cudaMemcpyDeviceToHost );
cudaMemcpy( mxgpu, max_d, size, cudaMemcpyDeviceToHost );
float mx2 = 0.0f;
for( int i = 0; i < NBlocks; i++ )
if( mx2 < mxgpu[i] )
mx2 = mxgpu[i];
double tgpu = ( t2gpu - t1gpu ) / CLOCKS_PER_SEC;
// ------------------------------ RESULTADOS ------------------------------ //
cout << endl << "///// RESULTADOS EN CPU /////" << endl;
cout << "------------------------------------" << endl;
//for( int i = 0; i < N; i++ )
//cout << "C[" << i << "] = " << Ccpu[i] << endl;
cout << "------------------------------------" << endl;
//for( int k = 0; k < NBlocks; k++ )
//cout << "D[" << k << "] = " << Dcpu[k] << endl;
cout << "------------------------------------" << endl;
cout << endl << "El valor máximo en C es: " << mxcpu << endl;
cout << endl << "Tiempo gastado en CPU: " << tcpu << endl << endl;
cout << endl << "///// RESULTADOS EN GPU /////" << endl;
cout << "------------------------------------" << endl;
//for( int i = 0; i < N; i++ )
//cout << "C[" << i << "] = " << Cgpu[i] << endl;
cout << "------------------------------------" << endl;
//for( int k = 0; k < NBlocks; k++ )
//cout << "D[" << k << "] = " << Dgpu[k] << endl;
cout << "------------------------------------" << endl;
cout << endl << "El valor máximo en C es: " << mx2 << endl;
cout << "Tiempo gastado en GPU: " << tgpu << endl << endl;
cout << "Ganancia = " << tcpu / tgpu << endl;
cout << "Error en el máximo = " << mxcpu - mx2 << endl << endl;
}
|
12,304 | #include <stdio.h>
__device__ int addition (int a, int b)
{
int r;
r=a+b;
return r;
}
__global__ void device_greetings(void)
{
printf("Hello, world from the device!\n");
char n[] = "big!\n";
printf(n);
int z;
z = addition(5,3);
printf("The result is %d\n", z);
}
int main(void)
{
// greet from the host
printf("Hello, world from the host!\n");
// launch a kernel with a single thread to greet from the device
device_greetings<<<1,1>>>();
cudaDeviceSynchronize();
return 0;
} |
12,305 | // Kernel code for Gaussian Mixture Model Expectation Maximization.
//
// Andrew Harp (andrew.harp@gmail.com)
// http://andrewharp.com/gmmcuda
// This effects the maximum dimensionality of your data.
// Has to be hardcoded because it affects memory allocation.
// Due to the way I clear out the lower triangle of the cholesky (and
// possibly other) places MAX_DIM * MAX_DIM needs to be less than
// BLOCK_SIZE.
#define MAX_DIM 16
// You can change this, but only to a power of 2.
#define BLOCK_SIZE 256
// Estep-normalize is broken down into an arbitrary number of chunks.
#define NUM_CHUNKS 32
#include "cuda_runtime.h"
///////////////////////////////////////////////////////////////////////////
__device__ void cholesky(float* el, const unsigned int ndim) {
const unsigned int tid = threadIdx.x;
// Dunno how to parallelize this part...
if (tid == 0) {
float sum = 0;
int i, j, k;
//if (el.ncols() != n) throw("need square matrix");
for (i=0; i<ndim; i++) {
for (j=i; j<ndim; j++) {
sum = el[__umul24(i, ndim)+j];
for (k=i-1; k >= 0; k--) {
sum -= el[__umul24(i, ndim)+k] * el[__umul24(j, ndim)+k];
}
if (i == j) {
//if (sum <= 0.0)
// throw("Cholesky failed");
el[__umul24(i, ndim)+i] = sqrt(sum);
} else {
el[__umul24(j, ndim)+i] = sum/el[__umul24(i, ndim)+i];
}
}
}
}
__syncthreads();
// Clear lower triangular part.
if ((tid/ndim) < (tid%ndim)) {
el[__umul24((tid/ndim), ndim) + (tid%ndim)] = 0.0f;
}
}
///////////////////////////////////////////////////////////////////////////
__device__ float logdet(float* el, const unsigned int ndim) {
float sum = 0.0f;
for (unsigned int i=0; i<ndim; ++i) {
sum += __logf(el[(i*ndim)+i]);
}
return 2.*sum;
}
///////////////////////////////////////////////////////////////////////////
__device__ float parallelSum(float* data, const unsigned int ndata) {
const unsigned int tid = threadIdx.x;
float t;
__syncthreads();
// Butterfly sum. ndata MUST be a power of 2.
for(unsigned int bit = ndata >> 1; bit > 0; bit >>= 1) {
t = data[tid] + data[tid^bit]; __syncthreads();
data[tid] = t; __syncthreads();
}
return data[tid];
}
///////////////////////////////////////////////////////////////////////////
__device__ void copyArray(float* fromArr, float* toArr, unsigned int ndata=BLOCK_SIZE) {
unsigned int n;
unsigned int base_off;
for (base_off=0; base_off < ndata; base_off += blockDim.x) {
n = base_off + threadIdx.x;
if (n < ndata) {
toArr[n] = fromArr[n];
}
}
}
///////////////////////////////////////////////////////////////////////////
// Parallel reduction, for when all you want is the sum of a certain
// quantity computed for every 1 to N. CODE should be something in terms
// of n. The resulting sum will be placed in RESULT.
// tmp_buff, base_off, RESULT, and n must be previously defined, however
// they will be overwritten during the execution of the macro.
#define REDUCE(N, CODE, RESULT) \
base_off = 0; \
RESULT = 0.0f; \
while (base_off + BLOCK_SIZE < N) { \
n = base_off + tid; \
tmp_buff[tid] = CODE; \
RESULT += parallelSum(tmp_buff, BLOCK_SIZE); \
base_off += BLOCK_SIZE; \
} \
n = base_off + tid; \
if (n < N) {tmp_buff[tid] = CODE;} \
else {tmp_buff[tid] = 0.0f;} \
RESULT += parallelSum(tmp_buff, BLOCK_SIZE);
///////////////////////////////////////////////////////////////////////////
// This function computes for a single cluster k.
__global__ void estep_kernel(float* _resp_, float* _frac_,
float* _data_, float* _means_,
float* _sig_, float* _lndets_,
const unsigned int num_clusts,
const unsigned int num_dims,
const unsigned int num_data) {
const unsigned int try_num = blockIdx.y;
const unsigned int tid = threadIdx.x;
const unsigned int k = blockIdx.x;
const unsigned int sigsize = __umul24(num_dims, num_dims);
// Base offsets.
const unsigned int rb = __umul24(try_num, num_clusts) * num_data + (k*num_data);
const unsigned int mb = __umul24(try_num, num_clusts) * num_dims;
const unsigned int sb = __umul24(try_num, num_clusts) * sigsize;
const unsigned int fb = __umul24(try_num, num_clusts);
const unsigned int lndb = __umul24(try_num, num_clusts);
unsigned int n, base_off;
float tmp, sum;
float v[MAX_DIM];
__shared__ float lndet_k;
__shared__ float frac_k;
__shared__ float chol[MAX_DIM * MAX_DIM];
__syncthreads();
copyArray(_sig_ + sb + __umul24(k, sigsize), chol, sigsize);
cholesky(chol, num_dims);
__syncthreads();
if (tid == 0) {
frac_k = _frac_[fb + k];
lndet_k = logdet(chol, num_dims);
_lndets_[lndb + k] = lndet_k;
}
__syncthreads();
// Loop through data.
for (base_off=0; base_off < num_data; base_off += BLOCK_SIZE) {
n = base_off + tid;
sum=0.0f;
//if (b.size() != n || y.size() != n) throw("bad lengths");
if (n < num_data) {
for (unsigned int i=0; i<num_dims; ++i) {
tmp = _data_[__umul24(i, num_data) + n] - _means_[mb + __umul24(k, num_dims)+i];
for (unsigned int j=0; j<i; j++) {
tmp -= chol[__umul24(i, num_dims)+j] * v[j];
}
v[i] = tmp/chol[__umul24(i, num_dims)+i];
sum += v[i] * v[i];
}
// Assign likelihood of this data being in this cluster.
_resp_[rb + n] = -0.5f*(sum + lndet_k) + __logf(frac_k);
}
} // (n < num_data)
}
///////////////////////////////////////////////////////////////////////////
// Loop through data again, normalizing probabilities.
// We are looping across clusters here as well as data, since every data
// point needs to know its potential parents.
__global__ void estep_normalize_kernel(float* _resp_, float* _frac_,
float* _data_, float* _means_,
float* _sig_, float* _lndets_,
float* _loglike_,
const unsigned int num_clusts,
const unsigned int num_dims,
const unsigned int num_data) {
const unsigned int try_num = blockIdx.y;
const unsigned int num_chunks = gridDim.x;
const unsigned int chunk_num = blockIdx.x;
const unsigned int tid = threadIdx.x;
// We're only handling so many data points per block in this kernel, since
// data is independant of other data here.
const unsigned int n_per_block = ceil((float)num_data / (float)num_chunks);
const unsigned int start_off = __umul24(n_per_block, chunk_num);
const unsigned int end_off = min(start_off + n_per_block, num_data);
// Base offsets.
const unsigned int rb = __umul24(try_num, num_clusts) * num_data;
const unsigned int lb = __umul24(try_num, num_chunks);
unsigned int n, base_off, k;
float sum, max, tmp;
__shared__ float loglike[BLOCK_SIZE];
loglike[tid] = 0.0f;
__syncthreads();
// Loop through data.
for (base_off = start_off; base_off < end_off; base_off += BLOCK_SIZE) {
n = base_off + tid;
if (n < end_off) {
max = -99.9e30f;
// Find cluster with maximum likelihood for this data point.
for (k=0; k<num_clusts; ++k) {
tmp = _resp_[rb + (k*num_data) + n];
if (tmp > max) {
max = tmp;
}
}
// Sum marginal probabilities.
sum = 0.0f;
for (k=0; k<num_clusts; ++k) {
sum += __expf(_resp_[rb + (k*num_data) + n] - max);
}
// Assign probabilities of point belonging to each cluster.
tmp = max + __logf(sum);
for (k = 0; k < num_clusts; ++k) {
_resp_[rb + (k*num_data) + n] =
__expf(_resp_[rb + (k*num_data) + n] - tmp);
}
loglike[tid] += tmp;
}
}
tmp = parallelSum(loglike, BLOCK_SIZE);
if (tid == 0) {
_loglike_[lb + chunk_num] = tmp;
}
}
///////////////////////////////////////////////////////////////////////////
__global__ void mstep_kernel(float* _resp_, float* _frac_,
float* _data_, float* _means_,
float* _sig_,
const unsigned int num_clusts,
const unsigned int num_dims,
const unsigned int num_data) {
const unsigned int try_num = blockIdx.x / num_clusts;
const unsigned int tid = threadIdx.x;
// Every block is mapped to cluster and dimension.
const unsigned int k = blockIdx.x % num_clusts;
const unsigned int m = blockIdx.y;
// Base offsets.
const unsigned int rb = __umul24(try_num, num_clusts) * num_data + (k*num_data);
const unsigned int mb = __umul24(try_num, num_clusts) * num_dims;
const unsigned int fb = __umul24(try_num, num_clusts);
unsigned int n, base_off;
float wgt_k, sum_k_m;
__shared__ float tmp_buff[BLOCK_SIZE];
// Sum all weight assigned to cluster k.
REDUCE(num_data, _resp_[rb + n], wgt_k);
__syncthreads();
// Update fractional prior.
if (tid == 0 && m == 0) {
_frac_[fb + k] = wgt_k / (float)num_data;
}
__syncthreads();
// Only concerned about dimension m in this block.
// Sum will become the sum of movement in that direction for this cluster.
REDUCE(num_data, _resp_[rb + n] * _data_[(m*num_data)+n], sum_k_m);
__syncthreads();
if (tid == 0) {
_means_[mb + __umul24(k, num_dims) + m] = sum_k_m / wgt_k;
}
}
///////////////////////////////////////////////////////////////////////////
__global__ void mstep_sigma_kernel(float* _resp_, float* _frac_,
float* _data_, float* _means_,
float* _sig_,
const unsigned int num_clusts,
const unsigned int num_dims,
const unsigned int num_data) {
// Every block is mapped to cluster and dimension pair.
const unsigned int try_num = blockIdx.x / num_clusts;
const unsigned int k = blockIdx.x % num_clusts;
const unsigned int m = blockIdx.y / num_dims;
const unsigned int j = blockIdx.y % num_dims;
const unsigned int tid = threadIdx.x;
const unsigned int sigsize = __umul24(num_dims, num_dims);
// Base offsets.
const unsigned int rb = __umul24(try_num, num_clusts) * num_data + (k*num_data);
const unsigned int mb = __umul24(try_num, num_clusts) * num_dims;
const unsigned int sb = __umul24(try_num, num_clusts) * sigsize;
const unsigned int fb = __umul24(try_num, num_clusts);
const unsigned int db_m = (m*num_data);
const unsigned int db_j = (j*num_data);
unsigned int n, base_off;
__shared__ float tmp_buff[BLOCK_SIZE];
__shared__ float wgt_k;
__shared__ float mean_k_m;
if (tid == 0) {
wgt_k = _frac_[fb + k] * num_data;
mean_k_m = _means_[mb + __umul24(k, num_dims) + m];
}
__syncthreads();
float sum;
REDUCE(num_data,
_resp_[rb + n] *
(_data_[db_m + n] - mean_k_m) *
(_data_[db_j + n]),
sum);
// Set this block's Sigma val.
if (tid == 0) {
_sig_[sb +
(__umul24(k, sigsize)) +
(__umul24(m, num_dims)) + j] = sum / wgt_k;
}
}
|
12,306 | #include <stdio.h>
#include <iostream>
#include <assert.h>
#include <cuda.h>
#include <time.h>
#define TILE_WIDTH 32
using namespace std;
// Multiplicacion con shared mem
__global__ void matrixMulKernelTiled(float *d_M, float *d_N, float *d_P, int width1, int height1, int width2) {
__shared__ float ds_M[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_N[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
for (int p = 0; p < width1 / TILE_WIDTH; p++) {
//Nos ubicamos en el elemento de la matriz 1 que deseamos multiplicar
if (row < height1 and (p * TILE_WIDTH + tx) < width1) {
ds_M[ty][tx] = d_M[row * width1 + p * TILE_WIDTH + tx];
} else {
//si esta fuera del rango llenamos con cero
ds_M[ty][tx] = 0.0;
}
//Nos ubicamos en el elemento de la matriz 2 que deseamos multiplicar
if ((p * TILE_WIDTH + ty) < width1 and col < width1) {
ds_N[ty][tx] = d_N[(p * TILE_WIDTH + ty) * width2 + col];
} else {
//si esta fuera del rango llenamos con cero
ds_N[ty][tx] = 0.0;
}
__syncthreads();
//Se hace la multiplicacion utilizando shared mem
if (row < height1 and col < width2)
for (int k = 0; k < TILE_WIDTH; k++) {
Pvalue += ds_M[ty][k] * ds_N[k][tx];
}
__syncthreads();
}
//Se guardan los resultados.
if (row < height1 and col < width2)
d_P[row * width2 + col] = Pvalue;
}
//Multiplicacón en GPU:
void MatrixMulCPU(float *M, float *N, float *P, int width1, int height1, int width2) {
//Aqui se guarda el resultado de la multiplicacion
int sum = 0;
for (int i = 0; i < height1; i++) {
for (int j = 0; j < width2; j++) {
sum = 0;
for (int k = 0; k < width1; k++)
//Se hace el productto y se guarda en la variable
sum += M[i * width1 + k] * N[k * width2 + j];
//Se colocan los valores en la matriz resultado
P[i * width2 + j] = sum;
}
}
}
//Inicializa las matrices a multiplicar.
int initValues(float *data, int width, int heigth){
for(int i = 0; i < width*heigth; i++)
data[i] = 1.0;
return 0;
}
int main()
{
clock_t start, end;
float *h_M, *h_N, *h_P,*h_P_d; //Matrices del host
float *d_M, *d_N,*d_P; // Matrices del device
//Aqui introducimos los tamaños de las matrices 1 y 2 (heigth y width)
int heigth1 = 10;
int width1 = 10;
int heigth2 = 10;
int width2 = 15;
cudaError_t error = cudaSuccess;
int size1 = width1 * heigth1 * sizeof(float); //Dimension de la matriz 1
int size2 = width2 * heigth2 * sizeof(float); //Dimension de la matriz 2
int size3 = width2 * heigth1 * sizeof(float); //Dimension de la matriz resultado
//Reservamos memoria para las matrices del host
h_M = (float*)malloc(size1);
h_N = (float*)malloc(size2);
h_P = (float*)malloc(size3);
h_P_d = (float*)malloc(size3);
if(h_P_d == NULL)
return 0;
//Inicializamos las matrices
initValues(h_M, width1, heigth1);
initValues(h_N, width2, heigth2);
//Procedimiento en GPU:
//Reservamos espacio en el device para una matriz de dimensión size1
error = cudaMalloc((void**)&d_M,size1);
if(error != cudaSuccess){
printf("Error reservando memoria para d_M");
exit(0);
}
//Reservamos espacio en el device para una matriz de dimensión size2
error = cudaMalloc((void**)&d_N,size2);
if(error != cudaSuccess){
printf("Error reservando memoria para d_N");
exit(0);
}
//Reservamos espacio en el device para la matriz resultante de size3
error = cudaMalloc((void**)&d_P,size3);
if(error != cudaSuccess){
printf("Error reservando memoria para d_P");
exit(0);
}
//Copiamos los datos de las matrices del host al device con las mismas dimensiones.
error = cudaMemcpy(d_M, h_M, size1, cudaMemcpyHostToDevice);
if(error != cudaSuccess){
printf("Error copiando datos a d_M");
exit(0);
}
error = cudaMemcpy(d_N, h_N, size2, cudaMemcpyHostToDevice);
if(error != cudaSuccess){
printf("Error copiando datos a d_N");
exit(0);
}
int blockSize = 1;
dim3 dimBlock(blockSize,blockSize,1);
dim3 dimGrid(ceil(width2 / float(blockSize)), ceil(heigth1 / float(blockSize)), 1);
// CICLO DE TIEMPOS
for(int x=1; x<=5;x++)
{
printf ("Ciclo numero %d\n",x);
//multiplicación con CPU
start = clock();
MatrixMulCPU(h_M, h_N, h_P, width1, heigth1, width2); //Invocamos la multiplicacion secuencial en CPU.
end = clock();
double cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo en CPU: %.10f\n", cpu_time_used);
//Fin
//Multiplicacion con GPU
start = clock();
matrixMulKernelTiled<<<dimGrid, dimBlock>>>(d_M, d_N, d_P, width1, heigth1, width2);// Invocamos la multiplicacion con Tiles.
cudaMemcpy(h_P_d,d_P,size3,cudaMemcpyDeviceToHost); //Copiamos el resultado de la matriz del device al host.
end = clock();
double gpu_time_used = double(end - start) / CLOCKS_PER_SEC;
printf("Tiempo en GPU: %.10f\n",gpu_time_used);
//FIN
}
cudaFree(d_M);
cudaFree(d_N);
cudaFree(d_P);
return 0;
}
|
12,307 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
// CUDA .
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
//, GPU
__global__ void addKernel(int *c, const int *a, const int *b)
{
//
int i = threadIdx.x; //
c[i] = a[i] + b[i]; //
}
int main()
{
// : b
const int arraySize = 5; //
const int a[arraySize] = { 1, 2, 3, 4, 5 }; //
const int b[arraySize] = { 10, 20, 30, 40, 50 }; // b
int c[arraySize] = { 0 }; //
// ( GPU) -
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
//
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// 5.5
// cudaDeviceReset ,
// traces
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// CUDA .
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// 0.
// , GPU . -
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
//1.
// ( , )
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); // size( arraySize) () dev_c
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
//
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// b
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
//2.
//
//cudaMemcpy() size * sizeof(int) , , dev_a,
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// b
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// 3. //4 * , *
//
addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // __global__ ,
//1 - , size - ( arraySize = 5)
//
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize , .
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// 5.
// GPU
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); // -
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error: // ( )
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
12,308 | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// System includes
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#define BLOCK_SIZE 16
#define MATRIX_DIM 256
#define POS(i, j) (((i) * MATRIX_DIM) + j)
// CUDA API error checking macro
static void handleError(cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err),
file, line );
exit(EXIT_FAILURE);
}
}
#define cudaCheck( err ) (handleError(err, __FILE__, __LINE__ ))
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
__global__ void matrixMulCUDA(float *C, float *A, float *B)
{
int posx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int posy = blockIdx.y * BLOCK_SIZE + threadIdx.y;
float res = 0.0;
for (int i = 0; i < MATRIX_DIM; ++i) {
res += A[POS(posx, i)] * B[POS(i, posy)];
}
// Write the block sub-matrix to device memory;
// each thread writes one element
C[POS(posx, posy)] = res;
}
void randomInit(float *data, int size)
{
for (int i = 0; i < size; ++i)
{
data[i] = ((double)rand()/(double)RAND_MAX);
}
}
double computeMatrixMulPos(int row, int column, float* A, float* B) {
double sum = 0.0;
for (int i = 0; i < MATRIX_DIM; i++) {
sum += A[POS(row, i)] * B[POS(i, column)];
}
return sum;
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int main()
{
// Allocate host memory for matrices A, B and C
size_t matrix_mem_size = sizeof(float) * MATRIX_DIM * MATRIX_DIM;
float *h_A = (float *)malloc(matrix_mem_size);
float *h_B = (float *)malloc(matrix_mem_size);
float *h_C = (float *) malloc(matrix_mem_size);
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrices!\n");
exit(EXIT_FAILURE);
}
// Initialize host memory
randomInit(h_A, MATRIX_DIM * MATRIX_DIM);
randomInit(h_B, MATRIX_DIM * MATRIX_DIM);
// Allocate device memory
float *d_A, *d_B, *d_C;
cudaCheck(cudaMalloc((void **) &d_A, matrix_mem_size));
cudaCheck(cudaMalloc((void **) &d_B, matrix_mem_size));
cudaCheck(cudaMalloc((void **) &d_C, matrix_mem_size));
// copy host memory to device
cudaCheck(cudaMemcpy(d_A, h_A, matrix_mem_size, cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(d_B, h_B, matrix_mem_size, cudaMemcpyHostToDevice));
// Setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(MATRIX_DIM / threads.x, MATRIX_DIM / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
matrixMulCUDA<<< grid, threads >>>(d_C, d_A, d_B);
cudaCheck(cudaPeekAtLastError());
// Copy result from device to host
cudaCheck(cudaMemcpy(h_C, d_C, matrix_mem_size, cudaMemcpyDeviceToHost));
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-3; // machine zero
for (int row = 0; row < MATRIX_DIM; row++)
for (int column = 0; column < MATRIX_DIM; column++) {
double expected = computeMatrixMulPos(row, column, h_A, h_B);
if (abs(h_C[POS(row,column)] - expected) > eps) {
printf("ERROR: position (%d, %d) %f != %f\n", row,
column, h_C[POS(row, column)], expected);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
if (correct) {
return EXIT_SUCCESS;
} else {
return EXIT_FAILURE;
}
}
|
12,309 | #include "includes.h"
__global__ void ConditionCFLKernel2D3 (double *newDT, double *DT2D, double *DT1D, double *Vmoy, double *invRmed, int *CFL, int nsec, int nrad, double DeltaT)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
double newdt;
if (j == 0){
newdt = newDT[1];
for (int i=2; i<nrad; i++){
if (newDT[i] < newdt)
newdt = newDT[i];
}
for (int i = 0; i < nrad-1; i++) {
if (DT1D[i] < newdt)
newdt = DT1D[i];
}
if (DeltaT < newdt)
newdt = DeltaT;
CFL[0] = (int)(ceil(DeltaT/newdt));
}
} |
12,310 | #include "includes.h"
extern "C" {
}
__global__ void reduce_sum_final(const float* x, float* y, unsigned int len) {
*y = 0;
for(int i = 0; i < len; ++i) {
*y += x[i];
}
} |
12,311 |
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#if defined(__unix) || defined(__linux)
#include <sys/time.h>
#endif
// includes, project
#include <cuda.h>
#include <cuda_runtime.h>
#include <cufft.h>
extern "C" void NewCUDAFFT(float *input, int dim[3], int forward, int doComplex, int time)
{
#if defined(__unix) || defined(__linux)
struct timeval start, end, s, e;
double starttime, endtime, stime, etime;
if (time){
gettimeofday(&s, NULL);
stime=s.tv_sec+s.tv_usec/1000000.0;
gettimeofday(&start, NULL);
starttime=start.tv_sec+start.tv_usec/1000000.0;
}
#endif
if (doComplex==1){
cufftComplex *d_in;
cufftComplex *d_out;
cufftComplex *h_in;
cufftComplex *h_out;
int x = dim[0];
int y = dim[1];
int z = dim[2];
int size=x*y*z;
int mem_size=sizeof(cufftComplex)*size;
h_in=(cufftComplex*)malloc(mem_size);
h_out=(cufftComplex*)malloc(mem_size);
#if defined(__unix) || defined(__linux)
if (time){
gettimeofday(&end, NULL);
endtime=end.tv_sec+(end.tv_usec/1000000.0);
fprintf(stderr, "\n mallocs took %.6f seconds.\n", endtime-starttime);
gettimeofday(&start, NULL);
starttime=start.tv_sec+start.tv_usec/1000000.0;
}
#endif
for(int i = 0; i < size; i++)//initialize values
{
h_in[i].x = input[2*i];
h_in[i].y = input[2*i+1];
}
#if defined(__unix) || defined(__linux)
if (time){
gettimeofday(&end, NULL);
endtime=end.tv_sec+(end.tv_usec/1000000.0);
fprintf(stderr, "\n Populating took %.6f seconds.\n", endtime-starttime);
gettimeofday(&start, NULL);
starttime=start.tv_sec+start.tv_usec/1000000.0;
}
#endif
cudaMalloc((void**)&d_in, mem_size);//allocate memory on device
cudaMemcpy(d_in, h_in, mem_size, cudaMemcpyHostToDevice);//copy memory to device
#if defined(__unix) || defined(__linux)
if (time){
gettimeofday(&end, NULL);
endtime=end.tv_sec+(end.tv_usec/1000000.0);
fprintf(stderr, "\n cudaMalloc/cudaMemcpy took %.6f seconds.\n", endtime-starttime);
gettimeofday(&start, NULL);
starttime=start.tv_sec+start.tv_usec/1000000.0;
}
#endif
cufftHandle planForward;//create a plan--much like LAPACK's procedure for optimization
cufftPlan3d(&planForward, x, y, z, CUFFT_C2C);//initialize plan
d_out=d_in;
#if defined(__unix) || defined(__linux)
if (time){
gettimeofday(&end, NULL);
endtime=end.tv_sec+(end.tv_usec/1000000.0);
fprintf(stderr, "\n Planning took %.6f seconds.\n", endtime-starttime);
gettimeofday(&start, NULL);
starttime=start.tv_sec+start.tv_usec/1000000.0;
}
#endif
if (forward==1)
cufftExecC2C(planForward, d_in, d_out, CUFFT_FORWARD);//execute FFT
else
cufftExecC2C(planForward, d_in, d_out, CUFFT_INVERSE);//execute RFFT
#if defined(__unix) || defined(__linux)
if (time){
gettimeofday(&end, NULL);
endtime=end.tv_sec+(end.tv_usec/1000000.0);
fprintf(stderr, "\n Execution took %.6f seconds.\n", endtime-starttime);
gettimeofday(&start, NULL);
starttime=start.tv_sec+start.tv_usec/1000000.0;
}
#endif
cudaMemcpy(h_out, d_out, mem_size, cudaMemcpyDeviceToHost);
#if defined(__unix) || defined(__linux)
if (time){
gettimeofday(&end, NULL);
endtime=end.tv_sec+(end.tv_usec/1000000.0);
fprintf(stderr, "\n cudaMemcpy took %.6f seconds.\n", endtime-starttime);
gettimeofday(&start, NULL);
starttime=start.tv_sec+start.tv_usec/1000000.0;
}
#endif
int normalizer;//note: when using CUFFT, upon RFFT a normalization factor of 1/pixels = (pixels^(-1/2))^2 must be applied, since the device doesn't normalize on its own
if(forward == 0)
normalizer = size;
else
normalizer = 1;
#if defined(__unix) || defined(__linux)
for(int i=0;i<size;i++)//copy out results
{
input[i*2] = h_out[i].x/normalizer;
input[(i*2)+1] = h_out[i].y/normalizer;
}
if (time){
gettimeofday(&end, NULL);
endtime=end.tv_sec+(end.tv_usec/1000000.0);
fprintf(stderr, "\n Copying results took %.6f seconds.\n", endtime-starttime);
gettimeofday(&start, NULL);
starttime=start.tv_sec+start.tv_usec/1000000.0;
}
#endif
cufftDestroy(planForward);
cudaFree(d_in);
free(h_in);
free(h_out);
}
else {
if (forward==1){
cufftReal* d_in;
cufftComplex* d_out;
cufftReal* h_in;
cufftComplex* h_out;
int x = dim[0];
int y = dim[1];
int z = dim[2];
int size=x*y*z;
h_in=(cufftReal*)malloc(sizeof(cufftReal)*size);
h_out=(cufftComplex*)malloc(sizeof(cufftComplex)*size);
#if defined(__unix) || defined(__linux)
if (time){
gettimeofday(&end, NULL);
endtime=end.tv_sec+(end.tv_usec/1000000.0);
fprintf(stderr, "\n mallocs took %.6f seconds.\n", endtime-starttime);
gettimeofday(&start, NULL);
starttime=start.tv_sec+start.tv_usec/1000000.0;
}
#endif
for(int i = 0; i < size; i++)
{
h_in[i]=(cufftReal)input[2*i];
}
#if defined(__unix) || defined(__linux)
if (time){
gettimeofday(&end, NULL);
endtime=end.tv_sec+(end.tv_usec/1000000.0);
fprintf(stderr, "\n Population took %.6f seconds.\n", endtime-starttime);
gettimeofday(&start, NULL);
starttime=start.tv_sec+start.tv_usec/1000000.0;
}
#endif
unsigned int pad_length=size;
//find the smallest power of 2 greater than or equal to pad_length
pad_length-=1;
pad_length|=(pad_length>>1);
pad_length|=(pad_length>>2);
pad_length|=(pad_length>>4);
pad_length|=(pad_length>>8);
pad_length|=(pad_length>>16);
pad_length+=1;
cudaMalloc((void**)&d_in, pad_length*sizeof(cufftReal)*2);
cudaMemset(d_in, 0, pad_length*2);
cudaMemcpy(d_in, h_in, sizeof(cufftReal)*size, cudaMemcpyHostToDevice);
#if defined(__unix) || defined(__linux)
if (time){
gettimeofday(&end, NULL);
endtime=end.tv_sec+(end.tv_usec/1000000.0);
fprintf(stderr, "\n cudaMalloc/cudaMemset/cudaMemcpy took %.6f seconds.\n", endtime-starttime);
gettimeofday(&start, NULL);
starttime=start.tv_sec+start.tv_usec/1000000.0;
}
#endif
cufftHandle plan;
cufftPlan3d(&plan, x, y, z, CUFFT_R2C);
d_out=(cufftComplex*)d_in;
#if defined(__unix) || defined(__linux)
if (time){
gettimeofday(&end, NULL);
endtime=end.tv_sec+(end.tv_usec/1000000.0);
fprintf(stderr, "\n Planning took %.6f seconds.\n", endtime-starttime);
gettimeofday(&start, NULL);
starttime=start.tv_sec+start.tv_usec/1000000.0;
}
#endif
cufftExecR2C(plan, d_in, d_out);
#if defined(__unix) || defined(__linux)
if (time){
gettimeofday(&end, NULL);
endtime=end.tv_sec+(end.tv_usec/1000000.0);
fprintf(stderr, "\n Execution took %.6f seconds.\n", endtime-starttime);
gettimeofday(&start, NULL);
starttime=start.tv_sec+start.tv_usec/1000000.0;
}
#endif
cudaMemcpy(h_out, d_out, sizeof(cufftComplex)*size, cudaMemcpyDeviceToHost);
#if defined(__unix) || defined(__linux)
if (time){
gettimeofday(&end, NULL);
endtime=end.tv_sec+(end.tv_usec/1000000.0);
fprintf(stderr, "\n cudaMemcpy took %.6f seconds.\n", endtime-starttime);
gettimeofday(&start, NULL);
starttime=start.tv_sec+start.tv_usec/1000000.0;
}
#endif
for(int i=0;i<size;i++)
{
input[i*2] = h_out[i].x;
input[(i*2)+1] = h_out[i].y;
}
#if defined(__unix) || defined(__linux)
if (time){
gettimeofday(&end, NULL);
endtime=end.tv_sec+(end.tv_usec/1000000.0);
fprintf(stderr, "\n Copying results took %.6f seconds.\n", endtime-starttime);
gettimeofday(&start, NULL);
starttime=start.tv_sec+start.tv_usec/1000000.0;
}
#endif
cufftDestroy(plan);
cudaFree(d_in);
free(h_in);
free(h_out);
}
else{
cufftComplex* d_in;
cufftReal* d_out;
cufftComplex* h_in;
cufftReal* h_out;
int x = dim[0];
int y = dim[1];
int z = dim[2];
int size=x*y*z;
h_in=(cufftComplex*)malloc(sizeof(cufftComplex)*size);
h_out=(cufftReal*)malloc(sizeof(cufftReal)*size);
#if defined(__unix) || defined(__linux)
if (time){
gettimeofday(&end, NULL);
endtime=end.tv_sec+(end.tv_usec/1000000.0);
fprintf(stderr, "\n mallocs took %.6f seconds.\n", endtime-starttime);
gettimeofday(&start, NULL);
starttime=start.tv_sec+start.tv_usec/1000000.0;
}
#endif
for(int i = 0; i < size; i++)
{
h_in[i].x=input[2*i];
h_in[i].y=input[(2*i)+1];
}
#if defined(__unix) || defined(__linux)
if (time){
gettimeofday(&end, NULL);
endtime=end.tv_sec+(end.tv_usec/1000000.0);
fprintf(stderr, "\n Population took %.6f seconds.\n", endtime-starttime);
gettimeofday(&start, NULL);
starttime=start.tv_sec+start.tv_usec/1000000.0;
}
#endif
unsigned int pad_length=size;
//find the smallest power of 2 greater than or equal to pad_length
pad_length-=1;
pad_length|=(pad_length>>1);
pad_length|=(pad_length>>2);
pad_length|=(pad_length>>4);
pad_length|=(pad_length>>8);
pad_length|=(pad_length>>16);
pad_length+=1;
cudaMalloc((void**)&d_in, pad_length*sizeof(cufftComplex));
cudaMemset(d_in, 0, pad_length*2);
cudaMemcpy(d_in, h_in, sizeof(cufftComplex)*size, cudaMemcpyHostToDevice);
#if defined(__unix) || defined(__linux)
if (time){
gettimeofday(&end, NULL);
endtime=end.tv_sec+(end.tv_usec/1000000.0);
fprintf(stderr, "\n cudaMalloc/cudaMemset/cudaMemcpy took %.6f seconds.\n", endtime-starttime);
gettimeofday(&start, NULL);
starttime=start.tv_sec+start.tv_usec/1000000.0;
}
#endif
cufftHandle plan;
cufftPlan3d(&plan, x, y, z, CUFFT_C2R);
d_out=(cufftReal*)d_in;
#if defined(__unix) || defined(__linux)
if (time){
gettimeofday(&end, NULL);
endtime=end.tv_sec+(end.tv_usec/1000000.0);
fprintf(stderr, "\n Planning took %.6f seconds.\n", endtime-starttime);
gettimeofday(&start, NULL);
starttime=start.tv_sec+start.tv_usec/1000000.0;
}
#endif
cufftExecC2R(plan, d_in, d_out);
#if defined(__unix) || defined(__linux)
if (time){
gettimeofday(&end, NULL);
endtime=end.tv_sec+(end.tv_usec/1000000.0);
fprintf(stderr, "\n Execution took %.6f seconds.\n", endtime-starttime);
gettimeofday(&start, NULL);
starttime=start.tv_sec+start.tv_usec/1000000.0;
}
#endif
cudaMemcpy(h_out, d_out, sizeof(cufftReal)*size, cudaMemcpyDeviceToHost);
#if defined(__unix) || defined(__linux)
if (time){
gettimeofday(&end, NULL);
endtime=end.tv_sec+(end.tv_usec/1000000.0);
fprintf(stderr, "\n cudaMemcpy took %.6f seconds.\n", endtime-starttime);
gettimeofday(&start, NULL);
starttime=start.tv_sec+start.tv_usec/1000000.0;
}
#endif
int normalizer=size;
for(int i=0;i<size;i++)
{
input[2*i] = (float)h_out[i]/normalizer;
}
#if defined(__unix) || defined(__linux)
if (time){
gettimeofday(&end, NULL);
endtime=end.tv_sec+(end.tv_usec/1000000.0);
fprintf(stderr, "\n Copying results took %.6f seconds.\n", endtime-starttime);
gettimeofday(&start, NULL);
starttime=start.tv_sec+start.tv_usec/1000000.0;
}
#endif
cufftDestroy(plan);
cudaFree(d_in);
free(h_in);
free(h_out);
}
}
#if defined(__unix) || defined(__linux)
if (time){
gettimeofday(&e, NULL);
etime=e.tv_sec+e.tv_usec/1000000.0;
fprintf(stderr, "\n Full thing took %.6f seconds.\n", etime-stime);
}
#endif
}
|
12,312 | #include <stdio.h>
#include <time.h>
#include <assert.h>
#define QTD_ELEMENTOS 1024
#define NUM_THREAD_BLOCK_Y 32
#define NUM_THREAD_BLOCK_X 32
void imprimeSoma(int *data, unsigned n)
{
double soma = 0;
for (int i=0; i < n; i++) {
for (int j=0; j < n; j++){
soma += data[i * n + j];
}
}
printf("A soma é %d\n",soma);
}
void inicializaMatriz(int *data, unsigned size)
{
time_t t;
srand((unsigned int) time(&t));
for (int i=0; i<size; i++) {
for (int j=0; j<size; j++) {
//data[i * size + j] = (int)( rand() & 0xFF )/10.0f;
data[i * size + j] = ((int)rand() ) % 2;
}
}
}
__global__
void warshallKernelShared_Principal(int *F, int k, unsigned n){
//Variáveis declaradas nos registradores - visíveis somente para o thread
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
//Variáveis declaradas nos registradores - visíveis somente para o thread
int tidX;
int tidY;
//Variável compartilhada - visível somente para os threads do bloco
__shared__ int ladrilhoPrincipal[NUM_THREAD_BLOCK_Y][NUM_THREAD_BLOCK_X];
//Obtém os índices dos threads
tidX = k * blockDim.x + threadIdx.x;
tidY = k * blockDim.y + threadIdx.y;
//OBTÉM o ladrilho da DIAGONAL K (Principal)
ladrilhoPrincipal[threadIdx.y][threadIdx.x] = F[tidY * n + tidX];
//BARREIRA de sincronização - Garante que a memória compartilhada seja preenchida antes do processamento
__syncthreads();
//PROCESSA o ladrilho da DIAGONAL K
for(int m=0; m < blockDim.x; m++) {
if(ladrilhoPrincipal[m][threadIdx.x] == 1 && ladrilhoPrincipal[threadIdx.y][m] == 1){
ladrilhoPrincipal[threadIdx.y][threadIdx.x] = 1;
}
//BARREIRA de sincronização - Garante que as dependências de dados sejam satisfeitas.
__syncthreads();
}
//ATUALIZA os valores do ladrilho da DIAGONAL K na memória global
F[i * n + j] = ladrilhoPrincipal[threadIdx.y][threadIdx.x];
}
__global__
void warshallKernelShared_LinhaColuna(int *F, int k, unsigned n){
//Variáveis declaradas nos registradores - visíveis somente para o thread
int tidX;
int tidY;
//Variáveis compartilhadas - visíveis somente para os threads do bloco
__shared__ int ladrilhoPrincipal[NUM_THREAD_BLOCK_Y][NUM_THREAD_BLOCK_X];
__shared__ int ladrilho[NUM_THREAD_BLOCK_Y][NUM_THREAD_BLOCK_X];
//OBTÉM os índices dos threads
tidX = k * blockDim.x + threadIdx.x;
tidY = k * blockDim.y + threadIdx.y;
//OBTÉM o ladrilho da DIAGONAL na memória global e atribui para a memória compartilhada
ladrilhoPrincipal[threadIdx.y][threadIdx.x] = F[tidY * n + tidX];
//OBTÉM os índices dos threads
tidX = blockIdx.x * blockDim.x + threadIdx.x;
tidY = k * blockDim.y + threadIdx.y;
//OBTÉM o ladrilho da LINHA k na memória global e atribui para a memória compartilhada
ladrilho[threadIdx.y][threadIdx.x] = F[tidY * n + tidX];
//BARREIRA de sincronização - Garante que a memória compartilhada seja preenchida antes do processamento
__syncthreads();
//PROCESSA o ladrilho da LINHA k
for(int m=0; m < blockDim.x; m++) {
if(ladrilhoPrincipal[threadIdx.y][m] == 1 && ladrilho[m][threadIdx.x] == 1){
ladrilho[threadIdx.y][threadIdx.x] = 1;
}
//BARREIRA de sincronização - Garante que as dependências de dados sejam satisfeitas.
__syncthreads();
}
//ATUALIZA os valores do ladrilho da LINHA k na memória global
F[tidY * n + tidX] = ladrilho[threadIdx.y][threadIdx.x];
//===========================================================
//OBTÉM os índices dos threads
tidX = k * blockDim.x + threadIdx.x;
tidY = blockIdx.x * blockDim.y + threadIdx.y;
//OBTÉM o ladrilho da COLUNA k na memória global e atribui para a memória compartilhada
ladrilho[threadIdx.y][threadIdx.x] = F[tidY * n + tidX];
//BARREIRA de sincronização - Garante que a memória compartilhada seja preenchida antes do processamento
__syncthreads();
//PROCESSA o ladrilho da COLUNA k
for(int m=0; m < blockDim.x; m++) {
if(ladrilhoPrincipal[m][threadIdx.x] == 1 && ladrilho[threadIdx.y][m] == 1){
ladrilho[threadIdx.y][threadIdx.x] = 1;
}
//BARREIRA de sincronização - Garante que as dependências de dados sejam satisfeitas.
__syncthreads();
}
//ATUALIZA os valores do ladrilho da COLUNA k na memória global
F[tidY * n + tidX] = ladrilho[threadIdx.y][threadIdx.x];
}
__host__
void pre_processamento(int* gpuF, int k, int size, dim3 grid, dim3 bloco){
warshallKernelShared_Principal<<< 1, bloco >>>(gpuF, k, size);
warshallKernelShared_LinhaColuna<<< dim3(grid.x,1), bloco >>>(gpuF, k, size);
}
void warshallCPU(int* fechoMatriz, unsigned n)
{
for(int k = 0; k < n; k++){
for(int i = 0; i < n; i++){
for(int j = 0; j < n; j++){
if(fechoMatriz[k * n + j] == 1 && fechoMatriz[i * n + k] == 1)
fechoMatriz[i * n + j] = 1;
}
}
}
}
void processamentoCPU(int *A, unsigned n)
{
int* F = (int*) malloc( sizeof(int) * n * n);
memcpy(F, A, sizeof(int)*n*n);
double tempoGasto;
clock_t start = clock();
warshallCPU(F, n);
clock_t stop = clock();
tempoGasto = (stop - start) / (float) CLOCKS_PER_SEC;
printf("Tempo de execução da CPU: %f s\n", tempoGasto );
imprimeSoma(F, n);
free(F);
}
void mainWarshall()
{
int byteNumber = QTD_ELEMENTOS * QTD_ELEMENTOS * sizeof(int);
int *A = (int*) malloc(byteNumber);
inicializaMatriz(A, QTD_ELEMENTOS);
processamentoCPU(A, QTD_ELEMENTOS);
free(A);
}
int main(void)
{
mainWarshall();
return 0;
}
|
12,313 | #include "includes.h"
//#define __OUTPUT_PIX__
#define BLOCK_SIZE 32
__constant__ __device__ float lTable_const[1064];
__constant__ __device__ float mr_const[3];
__constant__ __device__ float mg_const[3];
__constant__ __device__ float mb_const[3];
__global__ void trianguler_convolution_gpu_kernel(float *dev_I, float *dev_O, float *T0, float *T1, float *T2, int wd, int ht, float nrm, float p)
{
unsigned int x_pos = threadIdx.x + (blockDim.x * blockIdx.x);
unsigned int y_pos = threadIdx.y + (blockDim.y * blockIdx.y);
if ((x_pos < wd) && (y_pos < ht)) {
float *It0, *It1, *It2, *Im0, *Im1, *Im2, *Ib0, *Ib1, *Ib2;
float *Ot0, *Ot1, *Ot2;
float *T00, *T10, *T20;
It0 = Im0 = Ib0 = dev_I + (y_pos * wd) + (0 * ht * wd);
It1 = Im1 = Ib1 = dev_I + (y_pos * wd) + (1 * ht * wd);
It2 = Im2 = Ib2 = dev_I + (y_pos * wd) + (2 * ht * wd);
Ot0 = dev_O + (y_pos * wd) + (0 * ht * wd);
Ot1 = dev_O + (y_pos * wd) + (1 * ht * wd);
Ot2 = dev_O + (y_pos * wd) + (2 * ht * wd);
T00 = T0 + (y_pos * wd);
T10 = T1 + (y_pos * wd);
T20 = T2 + (y_pos * wd);
if(y_pos > 0) { /// not the first row, let It point to previous row
It0 -= wd;
It1 -= wd;
It2 -= wd;
}
if(y_pos < ht - 1) { /// not the last row, let Ib point to next row
Ib0 += wd;
Ib1 += wd;
Ib2 += wd;
}
T00[x_pos] = nrm * (It0[x_pos] + (p * Im0[x_pos]) + Ib0[x_pos]);
T10[x_pos] = nrm * (It1[x_pos] + (p * Im1[x_pos]) + Ib1[x_pos]);
T20[x_pos] = nrm * (It2[x_pos] + (p * Im2[x_pos]) + Ib2[x_pos]);
__syncthreads();
if (x_pos == 0) {
Ot0[x_pos] = ((1 + p) * T00[x_pos]) + T00[x_pos + 1];
Ot1[x_pos] = ((1 + p) * T10[x_pos]) + T10[x_pos + 1];
Ot2[x_pos] = ((1 + p) * T20[x_pos]) + T20[x_pos + 1];
} else if (x_pos == wd - 1) {
Ot0[x_pos] = T00[x_pos - 1] + ((1 + p) * T00[x_pos]);
Ot1[x_pos] = T10[x_pos - 1] + ((1 + p) * T10[x_pos]);
Ot2[x_pos] = T20[x_pos - 1] + ((1 + p) * T20[x_pos]);
} else {
Ot0[x_pos] = T00[x_pos - 1] + (p * T00[x_pos]) + T00[x_pos + 1];
Ot1[x_pos] = T10[x_pos - 1] + (p * T10[x_pos]) + T10[x_pos + 1];
Ot2[x_pos] = T20[x_pos - 1] + (p * T20[x_pos]) + T20[x_pos + 1];
}
__syncthreads();
}
} |
12,314 | __device__ void _sum_32_21_0(volatile float *x, int i) {
//for optimizing warps, volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sum_32_21(int nx, float *x, int sy, int ny, float *y) {
// x[i] goes into y[(i/sy)%ny]
__shared__ float buffer[64];
int t = threadIdx.x;
int b = blockIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 0;
if (sy == 1) {
int istep = 64*ny;
for (int i=b+t*ny; i<nx; i+=istep) {
xi=x[i]; xi=xi; ai=ai+xi;
}
} else {
int jstep = sy*ny;
for (int j=0; j<nx; j+=jstep) {
int i0 = j+b*sy;
int i1 = i0+sy;
for (int i=i0+t; i<i1; i+=64) {
xi=x[i]; xi=xi; ai=ai+xi;
}
}
}
buffer[t] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(t < stride) {
ai=buffer[t]; xi=buffer[stride+t]; buffer[t]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(t<32) {
_sum_32_21_0(buffer,t); // This reuses warpSum from 20 scalar reduction.
}
__syncthreads();
if(t==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sum_32_21(int nx, float *x, int sy, int ny, float *y) {
// x[i] goes into y[(i/sy)%ny]
// _sum_32_21<<<64,64>>>(nx,x,sy,ny,y);
_sum_32_21<<<ny,64>>>(nx,x,sy,ny,y);
}
#ifdef __cplusplus
}
#endif
__device__ void _sum_64_21_0(volatile double *x, int i) {
//for optimizing warps, volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sum_64_21(int nx, double *x, int sy, int ny, double *y) {
// x[i] goes into y[(i/sy)%ny]
__shared__ double buffer[64];
int t = threadIdx.x;
int b = blockIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 0;
if (sy == 1) {
int istep = 64*ny;
for (int i=b+t*ny; i<nx; i+=istep) {
xi=x[i]; xi=xi; ai=ai+xi;
}
} else {
int jstep = sy*ny;
for (int j=0; j<nx; j+=jstep) {
int i0 = j+b*sy;
int i1 = i0+sy;
for (int i=i0+t; i<i1; i+=64) {
xi=x[i]; xi=xi; ai=ai+xi;
}
}
}
buffer[t] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(t < stride) {
ai=buffer[t]; xi=buffer[stride+t]; buffer[t]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(t<32) {
_sum_64_21_0(buffer,t); // This reuses warpSum from 20 scalar reduction.
}
__syncthreads();
if(t==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sum_64_21(int nx, double *x, int sy, int ny, double *y) {
// x[i] goes into y[(i/sy)%ny]
// _sum_64_21<<<64,64>>>(nx,x,sy,ny,y);
_sum_64_21<<<ny,64>>>(nx,x,sy,ny,y);
}
#ifdef __cplusplus
}
#endif
__device__ void _prod_32_21_0(volatile float *x, int i) {
//for optimizing warps, volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai*xi;
ai=x[i]; xi=x[i+16]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai*xi;
}
__global__ void _prod_32_21(int nx, float *x, int sy, int ny, float *y) {
// x[i] goes into y[(i/sy)%ny]
__shared__ float buffer[64];
int t = threadIdx.x;
int b = blockIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 1;
if (sy == 1) {
int istep = 64*ny;
for (int i=b+t*ny; i<nx; i+=istep) {
xi=x[i]; xi=xi; ai=ai*xi;
}
} else {
int jstep = sy*ny;
for (int j=0; j<nx; j+=jstep) {
int i0 = j+b*sy;
int i1 = i0+sy;
for (int i=i0+t; i<i1; i+=64) {
xi=x[i]; xi=xi; ai=ai*xi;
}
}
}
buffer[t] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(t < stride) {
ai=buffer[t]; xi=buffer[stride+t]; buffer[t]=ai*xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(t<32) {
_prod_32_21_0(buffer,t); // This reuses warpSum from 20 scalar reduction.
}
__syncthreads();
if(t==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
void prod_32_21(int nx, float *x, int sy, int ny, float *y) {
// x[i] goes into y[(i/sy)%ny]
// _prod_32_21<<<64,64>>>(nx,x,sy,ny,y);
_prod_32_21<<<ny,64>>>(nx,x,sy,ny,y);
}
#ifdef __cplusplus
}
#endif
__device__ void _prod_64_21_0(volatile double *x, int i) {
//for optimizing warps, volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai*xi;
ai=x[i]; xi=x[i+16]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai*xi;
}
__global__ void _prod_64_21(int nx, double *x, int sy, int ny, double *y) {
// x[i] goes into y[(i/sy)%ny]
__shared__ double buffer[64];
int t = threadIdx.x;
int b = blockIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 1;
if (sy == 1) {
int istep = 64*ny;
for (int i=b+t*ny; i<nx; i+=istep) {
xi=x[i]; xi=xi; ai=ai*xi;
}
} else {
int jstep = sy*ny;
for (int j=0; j<nx; j+=jstep) {
int i0 = j+b*sy;
int i1 = i0+sy;
for (int i=i0+t; i<i1; i+=64) {
xi=x[i]; xi=xi; ai=ai*xi;
}
}
}
buffer[t] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(t < stride) {
ai=buffer[t]; xi=buffer[stride+t]; buffer[t]=ai*xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(t<32) {
_prod_64_21_0(buffer,t); // This reuses warpSum from 20 scalar reduction.
}
__syncthreads();
if(t==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
void prod_64_21(int nx, double *x, int sy, int ny, double *y) {
// x[i] goes into y[(i/sy)%ny]
// _prod_64_21<<<64,64>>>(nx,x,sy,ny,y);
_prod_64_21<<<ny,64>>>(nx,x,sy,ny,y);
}
#ifdef __cplusplus
}
#endif
__device__ void _maximum_32_21_0(volatile float *x, int i) {
//for optimizing warps, volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+16]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 8]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 4]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 2]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 1]; x[i]=(ai>xi?ai:xi);
}
__global__ void _maximum_32_21(int nx, float *x, int sy, int ny, float *y) {
// x[i] goes into y[(i/sy)%ny]
__shared__ float buffer[64];
int t = threadIdx.x;
int b = blockIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = (-INFINITY);
if (sy == 1) {
int istep = 64*ny;
for (int i=b+t*ny; i<nx; i+=istep) {
xi=x[i]; xi=xi; ai=(ai>xi?ai:xi);
}
} else {
int jstep = sy*ny;
for (int j=0; j<nx; j+=jstep) {
int i0 = j+b*sy;
int i1 = i0+sy;
for (int i=i0+t; i<i1; i+=64) {
xi=x[i]; xi=xi; ai=(ai>xi?ai:xi);
}
}
}
buffer[t] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(t < stride) {
ai=buffer[t]; xi=buffer[stride+t]; buffer[t]=(ai>xi?ai:xi);
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(t<32) {
_maximum_32_21_0(buffer,t); // This reuses warpSum from 20 scalar reduction.
}
__syncthreads();
if(t==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
void maximum_32_21(int nx, float *x, int sy, int ny, float *y) {
// x[i] goes into y[(i/sy)%ny]
// _maximum_32_21<<<64,64>>>(nx,x,sy,ny,y);
_maximum_32_21<<<ny,64>>>(nx,x,sy,ny,y);
}
#ifdef __cplusplus
}
#endif
__device__ void _maximum_64_21_0(volatile double *x, int i) {
//for optimizing warps, volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+16]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 8]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 4]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 2]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 1]; x[i]=(ai>xi?ai:xi);
}
__global__ void _maximum_64_21(int nx, double *x, int sy, int ny, double *y) {
// x[i] goes into y[(i/sy)%ny]
__shared__ double buffer[64];
int t = threadIdx.x;
int b = blockIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = (-INFINITY);
if (sy == 1) {
int istep = 64*ny;
for (int i=b+t*ny; i<nx; i+=istep) {
xi=x[i]; xi=xi; ai=(ai>xi?ai:xi);
}
} else {
int jstep = sy*ny;
for (int j=0; j<nx; j+=jstep) {
int i0 = j+b*sy;
int i1 = i0+sy;
for (int i=i0+t; i<i1; i+=64) {
xi=x[i]; xi=xi; ai=(ai>xi?ai:xi);
}
}
}
buffer[t] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(t < stride) {
ai=buffer[t]; xi=buffer[stride+t]; buffer[t]=(ai>xi?ai:xi);
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(t<32) {
_maximum_64_21_0(buffer,t); // This reuses warpSum from 20 scalar reduction.
}
__syncthreads();
if(t==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
void maximum_64_21(int nx, double *x, int sy, int ny, double *y) {
// x[i] goes into y[(i/sy)%ny]
// _maximum_64_21<<<64,64>>>(nx,x,sy,ny,y);
_maximum_64_21<<<ny,64>>>(nx,x,sy,ny,y);
}
#ifdef __cplusplus
}
#endif
__device__ void _minimum_32_21_0(volatile float *x, int i) {
//for optimizing warps, volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+16]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 8]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 4]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 2]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 1]; x[i]=(ai<xi?ai:xi);
}
__global__ void _minimum_32_21(int nx, float *x, int sy, int ny, float *y) {
// x[i] goes into y[(i/sy)%ny]
__shared__ float buffer[64];
int t = threadIdx.x;
int b = blockIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = INFINITY;
if (sy == 1) {
int istep = 64*ny;
for (int i=b+t*ny; i<nx; i+=istep) {
xi=x[i]; xi=xi; ai=(ai<xi?ai:xi);
}
} else {
int jstep = sy*ny;
for (int j=0; j<nx; j+=jstep) {
int i0 = j+b*sy;
int i1 = i0+sy;
for (int i=i0+t; i<i1; i+=64) {
xi=x[i]; xi=xi; ai=(ai<xi?ai:xi);
}
}
}
buffer[t] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(t < stride) {
ai=buffer[t]; xi=buffer[stride+t]; buffer[t]=(ai<xi?ai:xi);
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(t<32) {
_minimum_32_21_0(buffer,t); // This reuses warpSum from 20 scalar reduction.
}
__syncthreads();
if(t==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
void minimum_32_21(int nx, float *x, int sy, int ny, float *y) {
// x[i] goes into y[(i/sy)%ny]
// _minimum_32_21<<<64,64>>>(nx,x,sy,ny,y);
_minimum_32_21<<<ny,64>>>(nx,x,sy,ny,y);
}
#ifdef __cplusplus
}
#endif
__device__ void _minimum_64_21_0(volatile double *x, int i) {
//for optimizing warps, volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+16]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 8]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 4]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 2]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 1]; x[i]=(ai<xi?ai:xi);
}
__global__ void _minimum_64_21(int nx, double *x, int sy, int ny, double *y) {
// x[i] goes into y[(i/sy)%ny]
__shared__ double buffer[64];
int t = threadIdx.x;
int b = blockIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = INFINITY;
if (sy == 1) {
int istep = 64*ny;
for (int i=b+t*ny; i<nx; i+=istep) {
xi=x[i]; xi=xi; ai=(ai<xi?ai:xi);
}
} else {
int jstep = sy*ny;
for (int j=0; j<nx; j+=jstep) {
int i0 = j+b*sy;
int i1 = i0+sy;
for (int i=i0+t; i<i1; i+=64) {
xi=x[i]; xi=xi; ai=(ai<xi?ai:xi);
}
}
}
buffer[t] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(t < stride) {
ai=buffer[t]; xi=buffer[stride+t]; buffer[t]=(ai<xi?ai:xi);
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(t<32) {
_minimum_64_21_0(buffer,t); // This reuses warpSum from 20 scalar reduction.
}
__syncthreads();
if(t==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
void minimum_64_21(int nx, double *x, int sy, int ny, double *y) {
// x[i] goes into y[(i/sy)%ny]
// _minimum_64_21<<<64,64>>>(nx,x,sy,ny,y);
_minimum_64_21<<<ny,64>>>(nx,x,sy,ny,y);
}
#ifdef __cplusplus
}
#endif
__device__ void _sumabs_32_21_0(volatile float *x, int i) {
//for optimizing warps, volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sumabs_32_21(int nx, float *x, int sy, int ny, float *y) {
// x[i] goes into y[(i/sy)%ny]
__shared__ float buffer[64];
int t = threadIdx.x;
int b = blockIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 0;
if (sy == 1) {
int istep = 64*ny;
for (int i=b+t*ny; i<nx; i+=istep) {
xi=x[i]; xi=(xi<0?-xi:xi); ai=ai+xi;
}
} else {
int jstep = sy*ny;
for (int j=0; j<nx; j+=jstep) {
int i0 = j+b*sy;
int i1 = i0+sy;
for (int i=i0+t; i<i1; i+=64) {
xi=x[i]; xi=(xi<0?-xi:xi); ai=ai+xi;
}
}
}
buffer[t] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(t < stride) {
ai=buffer[t]; xi=buffer[stride+t]; buffer[t]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(t<32) {
_sumabs_32_21_0(buffer,t); // This reuses warpSum from 20 scalar reduction.
}
__syncthreads();
if(t==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sumabs_32_21(int nx, float *x, int sy, int ny, float *y) {
// x[i] goes into y[(i/sy)%ny]
// _sumabs_32_21<<<64,64>>>(nx,x,sy,ny,y);
_sumabs_32_21<<<ny,64>>>(nx,x,sy,ny,y);
}
#ifdef __cplusplus
}
#endif
__device__ void _sumabs_64_21_0(volatile double *x, int i) {
//for optimizing warps, volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sumabs_64_21(int nx, double *x, int sy, int ny, double *y) {
// x[i] goes into y[(i/sy)%ny]
__shared__ double buffer[64];
int t = threadIdx.x;
int b = blockIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 0;
if (sy == 1) {
int istep = 64*ny;
for (int i=b+t*ny; i<nx; i+=istep) {
xi=x[i]; xi=(xi<0?-xi:xi); ai=ai+xi;
}
} else {
int jstep = sy*ny;
for (int j=0; j<nx; j+=jstep) {
int i0 = j+b*sy;
int i1 = i0+sy;
for (int i=i0+t; i<i1; i+=64) {
xi=x[i]; xi=(xi<0?-xi:xi); ai=ai+xi;
}
}
}
buffer[t] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(t < stride) {
ai=buffer[t]; xi=buffer[stride+t]; buffer[t]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(t<32) {
_sumabs_64_21_0(buffer,t); // This reuses warpSum from 20 scalar reduction.
}
__syncthreads();
if(t==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sumabs_64_21(int nx, double *x, int sy, int ny, double *y) {
// x[i] goes into y[(i/sy)%ny]
// _sumabs_64_21<<<64,64>>>(nx,x,sy,ny,y);
_sumabs_64_21<<<ny,64>>>(nx,x,sy,ny,y);
}
#ifdef __cplusplus
}
#endif
__device__ void _sumabs2_32_21_0(volatile float *x, int i) {
//for optimizing warps, volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sumabs2_32_21(int nx, float *x, int sy, int ny, float *y) {
// x[i] goes into y[(i/sy)%ny]
__shared__ float buffer[64];
int t = threadIdx.x;
int b = blockIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 0;
if (sy == 1) {
int istep = 64*ny;
for (int i=b+t*ny; i<nx; i+=istep) {
xi=x[i]; xi=(xi*xi); ai=ai+xi;
}
} else {
int jstep = sy*ny;
for (int j=0; j<nx; j+=jstep) {
int i0 = j+b*sy;
int i1 = i0+sy;
for (int i=i0+t; i<i1; i+=64) {
xi=x[i]; xi=(xi*xi); ai=ai+xi;
}
}
}
buffer[t] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(t < stride) {
ai=buffer[t]; xi=buffer[stride+t]; buffer[t]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(t<32) {
_sumabs2_32_21_0(buffer,t); // This reuses warpSum from 20 scalar reduction.
}
__syncthreads();
if(t==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sumabs2_32_21(int nx, float *x, int sy, int ny, float *y) {
// x[i] goes into y[(i/sy)%ny]
// _sumabs2_32_21<<<64,64>>>(nx,x,sy,ny,y);
_sumabs2_32_21<<<ny,64>>>(nx,x,sy,ny,y);
}
#ifdef __cplusplus
}
#endif
__device__ void _sumabs2_64_21_0(volatile double *x, int i) {
//for optimizing warps, volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sumabs2_64_21(int nx, double *x, int sy, int ny, double *y) {
// x[i] goes into y[(i/sy)%ny]
__shared__ double buffer[64];
int t = threadIdx.x;
int b = blockIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 0;
if (sy == 1) {
int istep = 64*ny;
for (int i=b+t*ny; i<nx; i+=istep) {
xi=x[i]; xi=(xi*xi); ai=ai+xi;
}
} else {
int jstep = sy*ny;
for (int j=0; j<nx; j+=jstep) {
int i0 = j+b*sy;
int i1 = i0+sy;
for (int i=i0+t; i<i1; i+=64) {
xi=x[i]; xi=(xi*xi); ai=ai+xi;
}
}
}
buffer[t] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(t < stride) {
ai=buffer[t]; xi=buffer[stride+t]; buffer[t]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(t<32) {
_sumabs2_64_21_0(buffer,t); // This reuses warpSum from 20 scalar reduction.
}
__syncthreads();
if(t==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
void sumabs2_64_21(int nx, double *x, int sy, int ny, double *y) {
// x[i] goes into y[(i/sy)%ny]
// _sumabs2_64_21<<<64,64>>>(nx,x,sy,ny,y);
_sumabs2_64_21<<<ny,64>>>(nx,x,sy,ny,y);
}
#ifdef __cplusplus
}
#endif
__device__ void _countnz_32_21_0(volatile float *x, int i) {
//for optimizing warps, volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _countnz_32_21(int nx, float *x, int sy, int ny, float *y) {
// x[i] goes into y[(i/sy)%ny]
__shared__ float buffer[64];
int t = threadIdx.x;
int b = blockIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 0;
if (sy == 1) {
int istep = 64*ny;
for (int i=b+t*ny; i<nx; i+=istep) {
xi=x[i]; xi=(xi!=0); ai=ai+xi;
}
} else {
int jstep = sy*ny;
for (int j=0; j<nx; j+=jstep) {
int i0 = j+b*sy;
int i1 = i0+sy;
for (int i=i0+t; i<i1; i+=64) {
xi=x[i]; xi=(xi!=0); ai=ai+xi;
}
}
}
buffer[t] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(t < stride) {
ai=buffer[t]; xi=buffer[stride+t]; buffer[t]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(t<32) {
_countnz_32_21_0(buffer,t); // This reuses warpSum from 20 scalar reduction.
}
__syncthreads();
if(t==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
void countnz_32_21(int nx, float *x, int sy, int ny, float *y) {
// x[i] goes into y[(i/sy)%ny]
// _countnz_32_21<<<64,64>>>(nx,x,sy,ny,y);
_countnz_32_21<<<ny,64>>>(nx,x,sy,ny,y);
}
#ifdef __cplusplus
}
#endif
__device__ void _countnz_64_21_0(volatile double *x, int i) {
//for optimizing warps, volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _countnz_64_21(int nx, double *x, int sy, int ny, double *y) {
// x[i] goes into y[(i/sy)%ny]
__shared__ double buffer[64];
int t = threadIdx.x;
int b = blockIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 0;
if (sy == 1) {
int istep = 64*ny;
for (int i=b+t*ny; i<nx; i+=istep) {
xi=x[i]; xi=(xi!=0); ai=ai+xi;
}
} else {
int jstep = sy*ny;
for (int j=0; j<nx; j+=jstep) {
int i0 = j+b*sy;
int i1 = i0+sy;
for (int i=i0+t; i<i1; i+=64) {
xi=x[i]; xi=(xi!=0); ai=ai+xi;
}
}
}
buffer[t] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(t < stride) {
ai=buffer[t]; xi=buffer[stride+t]; buffer[t]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(t<32) {
_countnz_64_21_0(buffer,t); // This reuses warpSum from 20 scalar reduction.
}
__syncthreads();
if(t==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
void countnz_64_21(int nx, double *x, int sy, int ny, double *y) {
// x[i] goes into y[(i/sy)%ny]
// _countnz_64_21<<<64,64>>>(nx,x,sy,ny,y);
_countnz_64_21<<<ny,64>>>(nx,x,sy,ny,y);
}
#ifdef __cplusplus
}
#endif
|
12,315 | #include "includes.h"
__global__ void ReduceCsKernel (double *SoundSpeed, double *cs0, double *cs1, double *csnrm1, double *csnrm2, int nsec, int nrad)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
int i=0;
if(j<nsec){
cs0[j] = SoundSpeed[i*nsec +j];
cs1[j] = SoundSpeed[(i+1)*nsec +j];
}
i = nrad-1;
if(j<nsec){
csnrm2[j] = SoundSpeed[(i-1)*nsec +j];
csnrm1[j] = SoundSpeed[i*nsec +j];
}
} |
12,316 | #include "includes.h"
__global__ void parallelReduction(int *d_array , int numberOfElements, int elementsPerThread,int numberOfThreadsPerBlock,int numberOfBlocks,int *d_global)
{
int index = blockIdx.x * blockDim.x + threadIdx.x ;
index = index * elementsPerThread;
if(index>numberOfElements)
{
return;
}
int sum = 0;
for(int i=index;i<index+elementsPerThread;i++)
{
sum = sum + d_array[i];
}
extern __shared__ int d_blockMemmory[];
d_blockMemmory[threadIdx.x] = sum;
sum =0;
__syncthreads();
if(threadIdx.x == 0)
{
for(int i =0; i<numberOfThreadsPerBlock;i++)
{
sum = sum+ d_blockMemmory[i];
}
d_global[blockIdx.x] = sum;
}
} |
12,317 | #include <stdio.h>
__global__
void HelloThreadIdx() { printf("Hello World from GPU! %d %d\n", blockIdx.x, threadIdx.x); }
__global__
void HelloBlockThreadIdx() { printf("Hello World from GPU! %d %d\n", blockIdx.x, threadIdx.x); }
int main() {
HelloThreadIdx<<<2, 4>>>();
HelloBlockThreadIdx<<<2, 4>>>();
cudaDeviceSynchronize();
return 0;
}
|
12,318 | /* c4.cu
* Jonathan Lehman
* April 18, 2012
*
*/
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <sstream>
#include <string>
#include <cuda.h>
#include <sys/time.h>
#include <math.h>
using namespace std;
//macros user can change
#define width 7 //board width (if use GPU don't make more than 8)
#define height 6 //board height
#define toWin 4 //number in a row needed to win
#define maxDepth 5 //max depth for cpu recursive alg to search (for CPU calc)
#define useGPU 1 //boolean to determine if run comp move search on GPU (default false)
//macros user shouldn't change
#define numB 3 //number moves generate per block id (if gpu used)
#define numTX 1 //number moves generate per thread id x (if gpu used)
#define numTY 1 //number moves generate per thread id y (if gpu used)
#define tx width //number of threads in x dir
#define ty width //number threads in y dir
#define numMoves 2 * numB + numTX + numTY //number of total moves generated per thread
#define maxMoves width * height //used for determining game end by draw
#define winScore 100000
#define invalidScore 100000000
//board object to hold all necessary information about
//current board config
typedef struct{
int moveScore[width];//array scores of moves
char square[width][height];//array of element at each board square
int lastRow, lastCol;//last row and col played (for purpose of undoing)
int totalPieces;
}Board;
//function prototypes
void init(Board &board);
void checkArgs(int argc, char *argv[], int numArgs);
void printBoard(Board &board);
char getSquare(Board &board, int col, int row);
void doTurn(Board &board, int playerType, char currentPlayer);
void humanTurn(Board &board, char currentPlayer);
void compTurn(Board &board, char currentPlayer);
char checkWin(Board &board);
void changePlayer(char ¤tPlayer);
int canMove(Board &board, int col);
void doMove(Board &board, int col, char currentPlayer);
int isDraw(Board &board);
void undoMove(Board &board, int col, char currentPlayer);
int evaluate(Board &board);
void checkGPUCapabilities(int, int, int, int, int);
double getTime();
//computer logic
int determineMove(Board& board, char player);
int alphabeta(Board& board, char player, int alpha, int beta, int depth);
//cuda functions
__global__ void generateMove(Board *board, int *move, char currentPlayer, long *scoreArray, long *finalScores);
__device__ void transferBoard(Board *newBoard, Board *oldBoard);
__device__ void devDoMove(Board &board, int col, char currentPlayer);
__device__ int devCanMove(Board &board, int col);
__device__ void devChangePlayer(char ¤tPlayer);
__device__ int devEvaluate(Board &board, char maxPlayer);
__device__ char devCheckWin(Board &board);
__device__ int devIsDraw(Board &board);
__device__ char devGetSquare(Board &board, int col, int row);
//Keep track of the gpu time.
cudaEvent_t start, stop;
float elapsedTime;
int humanPlayers;
// Keep track of the cpu time.
double startTime, stopTime;
int main(int argc, char *argv[]){
//check arguments
checkArgs(argc, argv, 2);
//create and initialize board
Board board;
init(board);
printf("\nConnect 4 Game:\n\nInitial Board:\n\n");
printBoard(board);
//represents players, 0 for computer, 1 for human player
int player1, player2;
if(humanPlayers == 0){
player1 = 0;
player2 = 0;
}
else if(humanPlayers == 1){
player1 = 1;//human goes first
player2 = 0;
}
else{
player1 = 1;
player2 = 1;
}
char winner;
char currentPlayer = 'X';//X goes first
//loop while players move
while(1){
//player 1 turn
doTurn(board, player1, currentPlayer);
if(isDraw(board) || (winner = checkWin(board)) != ' '){//break on draw or win
break;
}
//change players
changePlayer(currentPlayer);
//player 2 turn
doTurn(board, player2, currentPlayer);
if(isDraw(board) || (winner = checkWin(board)) != ' '){//break on draw or win
break;
}
//change players
changePlayer(currentPlayer);
}
//do something with winner
if(winner != ' '){
printf("\nGame Over.\nPlayer %c wins!\n", winner);
}
else{
printf("\nGame Over.\nIt's a draw.\n");
}
}
//check arguments (should allow user to set the number of players)
void checkArgs(int argc, char *argv[], int numArgs){
//check number of arguments
if(argc != numArgs){
fprintf(stderr, "\nIncorrect number of arguments, %d\nCorrect usage: \"c4 [0-2]\",\nwhere the number specified between 0 and 2 is the number of human players\n", argc - 1);
exit(1);
}
//check first argument
char* invalChar;
long arg;
//convert first argument to int
arg = strtol(argv[1], &invalChar, 10);
//check that first argument is between 0 and 2 and an int value
if((arg < 0) || (arg > 2) || (*invalChar)){
fprintf(stderr, "\nInvalid argument for c4, '%s'.\nThe argument must be an integer between 0 and 2 inclusive.\n", argv[1]);
exit(1);
}
//set number of human players
humanPlayers = arg;
}
//initialize the game board
void init(Board &board){
//set last moves to 0
board.lastCol = board.lastRow = 0;
//set number of pieces on board to 0
board.totalPieces = 0;
//set each board square to blank
for(int col = 0; col < width; col++){
for(int row = 0; row < height; row++){
board.square[col][row] = ' ';
}
//set move score to 0
board.moveScore[col] = 0;
}
}
//print board to terminal
void printBoard(Board &board){
//print elements on board
for(int row = height - 1; row >= 0; row--){
printf("|");
for(int col = 0; col < width; col++){
printf("%c|", getSquare(board, col, row));
}
printf("\n");
}
for(int col = 0; col < width; col++){
printf("--");
}
//print bottom of board with column numbers
printf("-\n|");
for(int col = 0; col < width; col++){
printf("%d|", col);
}
printf("\n\n");
}
//gets character at specific square on board
char getSquare(Board &board, int col, int row){
return board.square[col][row];
}
//do turn based on player type (human or computer)
void doTurn(Board &board, int playerType, char currentPlayer){
//state who's move it is
if(currentPlayer == 'X'){
printf("Player 1's Turn (X):\n");
}
else{
printf("Player 2's Turn (O):\n");
}
//determine whether to prompt for move, or generate computer move
if(playerType){//human
humanTurn(board, currentPlayer);
}
else{//computer
compTurn(board, currentPlayer);
}
}
//do human turn
void humanTurn(Board &board, char currentPlayer){
int move;
//prompt user for move, accept only if valid
do{
string str;
printf("\nPlease enter a valid column (0-6) as your move:\n");
getline(cin, str);
stringstream(str) >> move;
}while(move < 0 || move >= width || !canMove(board, move));
//make move
doMove(board, move, currentPlayer);
printBoard(board);
}
//do computer turn
void compTurn(Board &board, char currentPlayer){
int move;
//get computer move
if(!useGPU){
/* Start the timer. */
startTime = getTime();
move = determineMove(board, currentPlayer);
/* Stop the timer and print the resulting time. */
stopTime = getTime();
double totalTime = stopTime - startTime;
printf("CPU Time: %f secs\n", totalTime);
}
else{
do{
//make cuda kernel call
Board *cudaBoard;
int *cudaMove;
long *scoreArray;
long *finalScores;
int threadX = tx;
int threadY = ty;
int gridSize = pow(width, numB);
//allocate memory on GPU device
cudaMalloc((void**)&cudaBoard, sizeof(Board));
cudaMalloc((void**)&cudaMove, sizeof(int));
cudaMalloc((void**)&scoreArray, sizeof(long) * gridSize * gridSize * width);
cudaMalloc((void**)&finalScores, sizeof(long) * width);
//copy board to device
cudaMemcpy(cudaBoard, &board, sizeof(Board), cudaMemcpyHostToDevice);
//check that GPU can handle arguments
//checkGPUCapabilities(gridSize, gridSize, threadX, threadY, gridSize * gridSize);
/* Start the timer. */
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
/* Execute the kernel. */
dim3 block(threadX, threadY); //threads w x h
dim3 grid(gridSize, gridSize); //blocks w x h
//passes current board config and current player, and empty shell to store best move
generateMove<<<grid, block>>>(cudaBoard, cudaMove, currentPlayer, scoreArray, finalScores);
/* Wait for the kernel to complete. Needed for timing. */
cudaThreadSynchronize();//or device sync? apparently thread sync is outdated
/* Stop the timer and print the resulting time. */
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
//retrieve the results
cudaMemcpy(&move, cudaMove, sizeof(int), cudaMemcpyDeviceToHost);
/*int scorePerMove[7];
cudaMemcpy(scorePerMove, finalScores, sizeof(long) * 7, cudaMemcpyDeviceToHost);
for(int i = 0; i < 7; i++){
printf("%d\n", scorePerMove[i]);
}*/
//print any cuda error messages
const char* errorString = cudaGetErrorString(cudaGetLastError());
printf("GPU Error: %s\n", errorString);
//print gpu time
printf("GPU Time: %f secs\n", (elapsedTime / 1000.0));
//printf("move = %d\n", move);
//destroy cuda event
cudaEventDestroy(start);
cudaEventDestroy(stop);
/* Free the allocated device memory. */
cudaFree(cudaMove);
cudaFree(cudaBoard);
cudaFree(scoreArray);
cudaFree(finalScores);
}while(move < 0 || move > width);//in case something weird happens on GPU
}
//do move and print results
doMove(board, move, currentPlayer);
printf("\nComputer put piece in column %d\n", move);
printBoard(board);
}
//check if most recent move has caused a win
//return winners character piece, or blank for no win
char checkWin(Board &board){
//only check near most recently placed piece
//to see if it causes a win
int col1,row1,col2,row2;
char player = getSquare(board, board.lastCol, board.lastRow);
//check for horizontal win
col1 = col2 = board.lastCol;
//check right
while(col1 < width && getSquare(board, col1, board.lastRow) == player){
col1++;
}
//Go left
while(col2 >= 0 && getSquare(board, col2, board.lastRow) == player){
col2--;
}
//check 4 in a row
if(col1 - col2 > toWin){
return player;
}
//check for a vertical win
row1 = row2 = board.lastRow;
//check up
while(row1 < height && getSquare(board, board.lastCol, row1) == player){
row1++;
}
//check down
while(row2 >= 0 && getSquare(board, board.lastCol, row2) == player){
row2--;
}
//check 4 in a row
if(row1 - row2 > toWin){
return player;
}
//check southeast/northwest diagonal win
col1 = col2 = board.lastCol;
row1 = row2 = board.lastRow;
//check southeast
while(row1 >= 0 && col1 < width && getSquare(board, col1, row1) == player){
col1++;
row1--;
}
//check northwest
while(row2 < height && col2 >= 0 && getSquare(board, col2, row2) == player) {
col2--;
row2++;
}
//check 4 in a row
if(col1 - col2 > toWin){
return player;
}
//check for northeast/southwest win
col1 = col2 = board.lastCol;
row1 = row2 = board.lastRow;
//check southwest
while(row1 >= 0 && col1 >= 0 && getSquare(board, col1, row1) == player){
col1--;
row1--;
}
//check northeast
while(row2 < height && col2 < width && getSquare(board, col2, row2) == player){
col2++;
row2++;
}
//check 4 in a row
if(col2 - col1 > toWin){
return player;
}
//no winner, return blank
return ' ';
}
//change players
void changePlayer(char ¤tPlayer){
if(currentPlayer == 'X'){
currentPlayer = 'O';
}
else{
currentPlayer = 'X';
}
}
//check if a move can be made in colum col
int canMove(Board &board, int col){
return board.square[col][height - 1] == ' ';
}
//make move on board (at colum col with piece current player)
void doMove(Board &board, int col, char currentPlayer){
//iterate through row in column and place in first empty spot
for(int row = 0; row < height; row++){
if(getSquare(board, col, row) == ' '){
//set data
board.square[col][row] = currentPlayer;
board.lastCol = col;
board.lastRow = row;
board.totalPieces++;//increment number of pieces
return;
}
}
}
//checks if game is over, assuming win has not been made at this point
//so draw
int isDraw(Board &board){
return board.totalPieces >= maxMoves;
}
//undo last move
void undoMove(Board &board, int col, char currentPlayer){
//remove last piece placed in row
int row = height-1;
//iterate down row in column piece was placed in until find piece
while (row >= 0 && getSquare(board, col, row) == ' '){
row--;
}
if (getSquare(board, col, row) == currentPlayer){
board.square[col][row] = ' ';
}
//decrement total
board.totalPieces--;
}
//returns best move out of possible moves (uses alphabeta function as subroutine)
int determineMove(Board &board, char currentPlayer){
//player X turn, maximize
if(currentPlayer == 'X'){
//iterate through moves and get scores
int maxScore = -invalidScore;
int maxMove = 0;
for (int move = 0; move < width; move++)
if(canMove(board, move)){
doMove(board, move, 'X');
int score = alphabeta(board, 'O', -invalidScore, invalidScore, 0);
board.moveScore[move] = score;
if(score >= maxScore){
maxScore = score;
maxMove = move;
}
undoMove(board,move,'X');
}
else{
//set move score to invalid score if can't move there
board.moveScore[move] = invalidScore;
}
//return move with highest score
return maxMove;
}
//player O turn, minimize
else if(currentPlayer == 'O'){
//iterate through moves and get scores
int minScore = invalidScore;
int minMove = 0;
for(int move = 0; move < width; move++){
if(canMove(board, move)){
doMove(board, move, 'O');
int score = alphabeta(board, 'X', -invalidScore, invalidScore, 0);
board.moveScore[move] = score;
if(score < minScore){
minScore = score;
minMove = move;
}
undoMove(board,move,'O');
}
else{
//set move score to invalid score if can't move there
board.moveScore[move] = invalidScore;
}
}
//Return the move with the least score
return minMove;
}
else{
//never gets called
return 0;
}
}
//returns highest score based on possible moves
int alphabeta(Board& board, char player, int alpha, int beta, int depth){
//check if win
char winner = checkWin(board);
if(winner == 'X'){
return winScore;
}
else if(winner == 'O'){
return -winScore;
}
if(depth >= maxDepth || isDraw(board)){
//return score of winless board
return evaluate(board);
}
//player X turn, will maximize
if(player == 'X'){
//iterate through moves and get scores
//int maxScore = -invalidScore;
for(int move = 0; move < width; move++)
if(canMove(board, move))
{
doMove(board, move, 'X');
int score = alphabeta(board, 'O', alpha, beta, depth + 1);
undoMove(board, move, 'X');
if(score > alpha){
alpha = score;
}
if(alpha >= beta){
return alpha;
}
}
return alpha;
}
//player O turn, minimize
else if(player == 'O'){
//iterate through moves and get scores
//int minScore = invalidScore;
for(int move = 0; move < width; move++)
if(canMove(board, move)){
doMove(board, move, 'O');
int score = alphabeta(board, 'X', alpha, beta, depth + 1);
undoMove(board,move,'O');
if(score < beta){
beta = score;
}
if(alpha >= beta){
return beta;
}
}
return beta;
}
else{
//never gets called
return 0;
}
}
//evaluation function used to get a score when the game does not reach the end
int evaluate(Board &board){
int score = 0;
//Score for each position
//middle is favorable, because more 4 in a row possibilites
//and blocks more of opponents
//|1|2|3|4|3|2|1|
//|2|3|4|5|4|3|2|
//|3|4|5|6|5|4|3|
//|2|3|4|5|4|3|2|
//|1|2|3|4|3|2|1|
//|0|1|2|3|2|1|0|
//iterate through columns
for(int col = 0; col < width; col++){
int colScore = (width / 2) - col;
//make positive if negative
if (colScore < 0){
colScore = -colScore;
}
colScore = (width / 2) - colScore;
//Count the number of pieces in each column
//and score accordingly
for(int row = 0; row < height; row++){
int rowScore = (height / 2) - row;
//make positive if negative
if(rowScore < 0){
rowScore = -rowScore;
}
rowScore = (height / 2) - rowScore;
if(getSquare(board, col, row) == 'X'){
score += colScore + rowScore;
}
else if(getSquare(board, col, row) == 'O'){
score -= colScore + rowScore;
}
}
}
return score;
}
double getTime(){
timeval thetime;
gettimeofday( &thetime, 0 );
return thetime.tv_sec + thetime.tv_usec / 1000000.0;
}
void checkGPUCapabilities(int gridW, int gridH, int blockW, int blockH, int size){
//check what GPU is being used
int devId;
cudaGetDevice( &devId );
//get device properties for GPU being used
cudaDeviceProp gpuProp;
cudaGetDeviceProperties( &gpuProp, devId );
//check if GPU has enough memory
if(gpuProp.totalGlobalMem < (size * sizeof(int))){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU does not have enough memory to handle the data size: %ld. It can only handle data sizes up to %ld.\n", (size * sizeof(float)) * 3, gpuProp.totalGlobalMem);
exit(1);
}
//check if GPU can handle the number of threads per bloc
if(gpuProp.maxThreadsPerBlock < (blockW * blockH)){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d threads per block, not %d.\n", gpuProp.maxThreadsPerBlock, (blockW * blockH));
exit(1);
}
//check that GPU can handle the number of threads in the block width
if(gpuProp.maxThreadsDim[0] < blockW){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d threads as the block width of each block, not %d.\n", gpuProp.maxThreadsDim[0], blockW );
exit(1);
}
//check that GPU can handle the number of threads in the block height
if(gpuProp.maxThreadsDim[1] < blockH){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d threads as the block height of each block, not %d.\n", gpuProp.maxThreadsDim[1], blockH );
exit(1);
}
//check that GPU can handle the number of blocks in the grid width
if(gpuProp.maxGridSize[0] < gridW){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d blocks as the grid width of each grid, not %d.\n", gpuProp.maxGridSize[0], gridW );
exit(1);
}
//check that GPU can handle the number of blocks in the grid height
if(gpuProp.maxGridSize[1] < gridH){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d blocks as the grid height of each grid, not %d.\n", gpuProp.maxGridSize[1], gridH );
exit(1);
}
}
//cuda kernel to generate best move for computer
__global__
void generateMove(Board *board, int *move, char currentPlayer, long *scoreArray, long *finalScores){
//shared memory to reduce global mem access, speeds up algorithm
__shared__ long evalVals[tx][ty][numMoves];//array to store each threads evaluation (then used to store partial summations of block totals per move)
__shared__ char moves[tx][ty][numMoves];//stores moves generated by block and thread IDs
//stores board in local memory so each thread is not writing over each others board data
Board newBoard;
transferBoard(&newBoard, board);
char maxPlayer = currentPlayer;
int mvCtr = 0;
int start = mvCtr;
int end = mvCtr + numB;
mvCtr = end;
int rem = blockIdx.x;
for(int i = start; i < end; i++){
moves[threadIdx.x][threadIdx.y][i] = rem % width;
rem = rem / width;
}
rem = blockIdx.y;
start = mvCtr;
end = mvCtr + numB;
mvCtr = end;
for(int i = start; i < end; i++){
moves[threadIdx.x][threadIdx.y][i] = rem % width;
rem = rem / width;
}
rem = threadIdx.x;
start = mvCtr;
end = mvCtr + numTX;
mvCtr = end;
for(int i = start; i < end; i++){
moves[threadIdx.x][threadIdx.y][i] = rem % width;
rem = rem / width;
}
moves[threadIdx.x][threadIdx.y][mvCtr] = threadIdx.y;
//reset any existing mem to 0
evalVals[threadIdx.x][threadIdx.y][0] = 0;
//iterate through generated moves, trying then doing if possible
for(int i = 0; i < numMoves; i++){
//see if move can be made
if(devCanMove(newBoard, moves[threadIdx.x][threadIdx.y][i])){
//make move
devDoMove(newBoard, moves[threadIdx.x][threadIdx.y][i], currentPlayer);
char winner;
//check win or evaluate, add to score
if((winner = devCheckWin(newBoard)) == ' '){//no winner
evalVals[threadIdx.x][threadIdx.y][0] += devEvaluate(newBoard, maxPlayer);
}
else{//winner
if(winner == maxPlayer){
evalVals[threadIdx.x][threadIdx.y][0] = winScore;//comp wins
if(i == 0){
*move = moves[threadIdx.x][threadIdx.y][0];
//return;
}
}
else{
evalVals[threadIdx.x][threadIdx.y][0] = -winScore;//opp wins
if(i == 1){
//only block move if playing in col doesnt actually help them
if(moves[threadIdx.x][threadIdx.y][0] != moves[threadIdx.x][threadIdx.y][1]){
*move = moves[threadIdx.x][threadIdx.y][1];
//return;
}
}
/*if(i == 3){
if(moves[threadIdx.x][threadIdx.y][0] != moves[threadIdx.x][threadIdx.y][3] && moves[threadIdx.x][threadIdx.y][2] != moves[threadIdx.x][threadIdx.y][3]){
*move = moves[threadIdx.x][threadIdx.y][3];
//return;
}
}*/
}
break;//don't do any more work than necessary, win
}
//change players
devChangePlayer(currentPlayer);
}
else{
if(i == 0){
evalVals[threadIdx.x][threadIdx.y][0] = -invalidScore;
}
break;//don't do any more work than necessary, invalid move or draw
}
}
//synchronize threads to do following calculations and comparisons etc
__syncthreads();
//compare thread scores, to get total score for each first move in each block block
if(threadIdx.x == 0 && threadIdx.y == 0){
//iterate through scores per thread, and add to get block total
for(int i = 0; i < blockDim.x; i++){
for(int j = 0; j < blockDim.y; j++){
scoreArray[(blockIdx.y * gridDim.x + blockIdx.x) + (gridDim.x * moves[i][j][0])] += evalVals[i][j][0];
}
}
}
//synchronize threads to do following calculations and comparisons etc
__syncthreads();
//split work among all threads in block zero to find sum, then find sum of these
if((blockIdx.y * gridDim.x + blockIdx.x) == 0){
int work = (gridDim.x * gridDim.y) / (blockDim.x * blockDim.y);//work per thread
int start = work * (threadIdx.y * blockDim.x + threadIdx.x);
//spit work among threads and combine scores for starting moves
for(int i = start; i < start + work; i++){
for(int j = 0; j < width; j++){
evalVals[threadIdx.x][threadIdx.y][j] += scoreArray[i + (gridDim.x * j)];
}
}
//synchronize threads to do following calculations and comparisons etc
__syncthreads();
//compute in thread 0 of block 0
if(threadIdx.x == 0 && threadIdx.y == 0){
for(int i = 0; i < blockDim.x; i++){
for(int j = 0; j < blockDim.y; j++){
for(int k = 0; k < width; k++){
finalScores[k] += evalVals[i][j][k];
}
}
}
//synchronize threads to do following calculations and comparisons etc
__syncthreads();
//compare threads max
char bestOverallMove;
int bestOverallScore = finalScores[0] - 1;
Board b2;
transferBoard(&b2, board);
for(int i = 0; i < width; i++){
int scr = finalScores[i];//reduce global mem accesses
if(scr > bestOverallScore && devCanMove(b2, i)){//also ensure that move is valid
bestOverallScore = scr;
bestOverallMove = i;
}
}
//set final move
(*move) = bestOverallMove;
}
}
//ALSO TODO: debug macro, for printing timing output etc (stuff user wouldnt want to see if playing game)
}
/*cuda device functions*/
//copy data from old board to new board
__device__
void transferBoard(Board *newBoard, Board *oldBoard){
(*newBoard).lastRow = (*oldBoard).lastRow;
(*newBoard).lastCol = (*oldBoard).lastCol;
(*newBoard).totalPieces = (*oldBoard).totalPieces;
for(int col = 0; col < width; col++){
(*newBoard).moveScore[col] = (*oldBoard).moveScore[col];
for(int row = 0; row < height; row++){
(*newBoard).square[col][row] = (*oldBoard).square[col][row];
}
}
}
//check if a move can be made in colum col
__device__
int devCanMove(Board &board, int col){
return board.square[col][height - 1] == ' ';
}
//make move on board (at colum col with piece current player)
__device__
void devDoMove(Board &board, int col, char currentPlayer){
//iterate through row in column and place in first empty spot
for(int row = 0; row < height; row++){
if(devGetSquare(board, col, row) == ' '){
//set data
board.square[col][row] = currentPlayer;
board.lastCol = col;
board.lastRow = row;
board.totalPieces++;//increment number of pieces
return;
}
}
}
//change players
__device__
void devChangePlayer(char ¤tPlayer){
if(currentPlayer == 'X'){
currentPlayer = 'O';
}
else{
currentPlayer = 'X';
}
}
//evaluation function used to get a score when the game does not reach the end
__device__
int devEvaluate(Board &board, char maxPlayer){
int score = 0;
//Score for each position
//middle is favorable, because more 4 in a row possibilites
//and blocks more of opponents
//|1|2|3|4|3|2|1|
//|2|3|4|5|4|3|2|
//|3|4|5|6|5|4|3|
//|2|3|4|5|4|3|2|
//|1|2|3|4|3|2|1|
//|0|1|2|3|2|1|0|
//iterate through columns
for(int col = 0; col < width; col++){
int colScore = (width / 2) - col;
//make positive if negative
if (colScore < 0){
colScore = -colScore;
}
colScore = (width / 2) - colScore;
//Count the number of pieces in each column
//and score accordingly
for(int row = 0; row < height; row++){
int rowScore = (height / 2) - row;
//make positive if negative
if(rowScore < 0){
rowScore = -rowScore;
}
rowScore = (height / 2) - rowScore;
if(devGetSquare(board, col, row) == maxPlayer){
score += colScore + rowScore;
}
else{
score -= colScore + rowScore;
}
}
}
return score;
}
//gets character at specific square on board
__device__
char devGetSquare(Board &board, int col, int row){
return board.square[col][row];
}
//check if most recent move has caused a win
//return winners character piece, or blank for no win
__device__
char devCheckWin(Board &board){
//only check near most recently placed piece
//to see if it causes a win
int col1,row1,col2,row2;
char player = devGetSquare(board, board.lastCol, board.lastRow);
//check for horizontal win
col1 = col2 = board.lastCol;
//check right
while(col1 < width && devGetSquare(board, col1, board.lastRow) == player){
col1++;
}
//Go left
while(col2 >= 0 && devGetSquare(board, col2, board.lastRow) == player){
col2--;
}
//check 4 in a row
if(col1 - col2 > toWin){
return player;
}
//check for a vertical win
row1 = row2 = board.lastRow;
//check up
while(row1 < height && devGetSquare(board, board.lastCol, row1) == player){
row1++;
}
//check down
while(row2 >= 0 && devGetSquare(board, board.lastCol, row2) == player){
row2--;
}
//check 4 in a row
if(row1 - row2 > toWin){
return player;
}
//check southeast/northwest diagonal win
col1 = col2 = board.lastCol;
row1 = row2 = board.lastRow;
//check southeast
while(row1 >= 0 && col1 < width && devGetSquare(board, col1, row1) == player){
col1++;
row1--;
}
//check northwest
while(row2 < height && col2 >= 0 && devGetSquare(board, col2, row2) == player) {
col2--;
row2++;
}
//check 4 in a row
if(col1 - col2 > toWin){
return player;
}
//check for northeast/southwest win
col1 = col2 = board.lastCol;
row1 = row2 = board.lastRow;
//check southwest
while(row1 >= 0 && col1 >= 0 && devGetSquare(board, col1, row1) == player){
col1--;
row1--;
}
//check northeast
while(row2 < height && col2 < width && devGetSquare(board, col2, row2) == player){
col2++;
row2++;
}
//check 4 in a row
if(col2 - col1 > toWin){
return player;
}
//no winner, return blank
return ' ';
}
//checks if game is over, assuming win has not been made at this point
//so draw
__device__
int devIsDraw(Board &board){
return board.totalPieces >= maxMoves;
} |
12,319 | /*利用cuda完成两个1024*1024矩阵的加法*/
#include<iostream>
#include<stdlib.h>
#include<sys/time.h>
#include<math.h>
#include"cuda_runtime.h"
#define cols 1024
#define rows 1024
using namespace std;
__global__ void Add(float** Ad,float** Bd,float** Cd)
{
int x = blockDim.x*blockIdx.x+threadIdx.x;
int y = blockDim.y*blockIdx.y+threadIdx.y;
if(x<cols && y<rows)
{
Cd[y][x]=Ad[y][x]+Bd[y][x];
}
}
int main()
{
struct timeval start, end;
float **A,**B,**C,**Ad,**Bd,**Cd;
float *a,*b,*c,*ad,*bd,*cd;
int n=rows * cols;
A=new float* [cols];
B=new float* [cols];
C=new float* [cols];
a=new float [n];
b=new float [n];
c=new float [n];
cudaMalloc((void**)&Ad,sizeof(float*)*cols);
cudaMalloc((void**)&Bd,sizeof(float*)*cols);
cudaMalloc((void**)&Cd,sizeof(float*)*cols);
cudaMalloc((void**)&ad,sizeof(float)*n);
cudaMalloc((void**)&bd,sizeof(float)*n);
cudaMalloc((void**)&cd,sizeof(float)*n);
for(int i=0;i<n;i++)
{
a[i]=2.0;
b[i]=2.0;
}
for(int i=0;i<cols;i++)
{
//ad, bd, cd是一维向量,如果在gpu上按照二维矩阵进行运算,则需要将其和Ad, Bd, Cd建立对应关系,建立对应关系的过程在cpu上完成
A[i]=ad+i*rows;
B[i]=bd+i*rows;
C[i]=cd+i*rows;
}
gettimeofday( &start, NULL);
cudaMemcpy(Ad,A,cols*sizeof(float*),cudaMemcpyHostToDevice);
cudaMemcpy(Bd,B,cols*sizeof(float*),cudaMemcpyHostToDevice);
cudaMemcpy(Cd,C,cols*sizeof(float*),cudaMemcpyHostToDevice);
cudaMemcpy(ad,a,n*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(bd,b,n*sizeof(float),cudaMemcpyHostToDevice);
dim3 dimBlock(16,16);
dim3 dimGrid(cols/16+1,rows/16+1);
Add<<<dimGrid,dimBlock>>>(Ad,Bd,Cd);
cudaMemcpy(c,cd,n*sizeof(float),cudaMemcpyDeviceToHost);
gettimeofday( &end, NULL );
float target=4.0;
float error=0.0;
for(int i=0;i<n;i++)
{
error+=abs(target-c[i]);
}
cout<<"error is "<<error<<endl;
int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
cout << "total time is " << timeuse/1000 << "ms" <<endl;
delete [] a;
delete [] b;
delete [] c;
delete [] A;
delete [] B;
delete [] C;
cudaFree(Ad);
cudaFree(Bd);
cudaFree(Cd);
cudaFree(ad);
cudaFree(bd);
cudaFree(cd);
return 0;
}
|
12,320 | #include <curand_kernel.h>
__device__ float generate(
curandState * globalState,
int idx )
{
idx = threadIdx.x + blockIdx.x * blockDim.x;
curandState localState = globalState[ idx ];
float RANDOM = curand_uniform( &localState );
globalState[ idx ] = localState;
return RANDOM;
}
__global__ void setup_kernel(
curandState * state,
unsigned long seed )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
curand_init( seed, id, 0, &state[ id ] );
return;
}
__global__ void InitializeTemperature(
float * const ioArray,
curandState * globalState,
const int inArrSize )
{
// generate random numbers
for ( int i = 0; i < inArrSize; i++ )
{
float k = generate( globalState, i );
ioArray[ i ] = k;
}
return;
}
/* ========================================================================== */
/* HeatConduction */
/* -------------------------------------------------------------------------- */
/*!
* @function HeatConduction2D
*
* @abstract function to calculate the 2D heat conduction in a body
*
* @ Initial conditions:
T(0,y,t) = T(Lx,y,t) = 0
T(x,0,t) = T(x,Ly,t) = 0
T(x,y,0) = initial temperature
0 <= x <= Lx
0 <= y <= Ly
0 <= t <= T
We are assuming a square body and we divide it to small squares each of
them having a temperature.
The temperature flows from warmer to colder square.
Temperature can flow to the neighboor squares (left,right,top,bottom)
We are using the appropriate offset in order to move in the above places.
We are applying the appropriate boundary conditions when trying to move
to neighbor places.
* @param inWidth [ input ] The width of the body (cm)
*
* @param inHeight [ input ] The heoght of the body (cm)
*
* @param inTemp [ input ] The initial temperature of body
*
* @param ouTemp [ output ] The temperature of body after solving the system
*/
/* ========================================================================== */
__global__ void HeatConduction2D(
const int inWidth,
const int inHeight,
const float * const inTemp,
float * const ouTemp )
{
int rowIdx = threadIdx.y + blockIdx.y * blockDim.y;
int colIdx = threadIdx.x + blockIdx.x * blockIdx.x;
int offset = rowIdx * inWidth + colIdx;
if ( rowIdx >= inHeight || colIdx >= inWidth ) return;
// new offsets
int left = offset - 1;
int right = offset + 1;
int top = offset + inWidth;
int bottom = offset - inWidth;
//boundary conditions
if ( 0 == colIdx ) left += inWidth;
if ( inWidth - 1 == colIdx ) right -= inWidth;
if ( 0 == rowIdx ) bottom += inWidth * inHeight;
if ( inHeight - 1 == rowIdx ) top -= inWidth * inHeight;
ouTemp[ offset ] = inTemp[ offset ] + (1.f/4.f) * ( inTemp[ left ] + inTemp[ right ] + inTemp[ top ] + inTemp[ bottom ] - 4 * inTemp[ offset ] );
}
|
12,321 | #include "includes.h"
__global__ void kBesselRatioActivationContinuedFraction(float* mat, float* target, float order, int num_terms, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
float k = mat[i];
float result = 2 * (order + num_terms) / k;
for(int j = num_terms - 1; j > 0; j--) {
result = 2 * (order + j) / k + 1 / result;
}
target[i] = 1 / result;
}
} |
12,322 | __device__
int rectanglesSum(int** integralImage, int x, int y, int w, int h)
{
int A = x > 0 && y > 0 ? integralImage[x - 1][y - 1] : 0;
int B = x + w > 0 && y > 0 ? integralImage[x + w - 1][y - 1] : 0;
int C = x > 0 && y + h > 0 ? integralImage[x - 1][y + h - 1] : 0;
int D = x + w > 0 && y + h > 0 ? integralImage[x + w - 1][y + h - 1] : 0;
return A + D - B - C;
}
extern "C"
__global__ void haar_type_C(int** integralImage, int* allRectangles, int numRectangles, float coeff, int* haarFeatures)
{
// Get an "unique id" of the thread that correspond to one pixel
const unsigned int tidX = blockIdx.x * blockDim.x + threadIdx.x;
if (tidX < numRectangles)
{
int x = (int) (allRectangles[tidX * 4] * coeff);
int y = (int) (allRectangles[tidX * 4 + 1] * coeff);
int w = (int) (allRectangles[tidX * 4 + 2] * coeff);
int h = (int) (allRectangles[tidX * 4 + 3] * coeff);
int mid = h / 2;
int r1 = rectanglesSum(integralImage, x, y, w, mid);
int r2 = rectanglesSum(integralImage, x, y + mid, w, mid);
haarFeatures[tidX] = r2 - r1;
}
__syncthreads();
}
|
12,323 | /* This file is part of the Marching Cubes GPU based algorithm based on
* Paul Bourke's tabulation approach to marching cubes
* http://paulbourke.net/geometry/polygonise/
*
*
* We model cubes with 8 vertices labelled as below
*
*
* 4--------(4)---------5
* /| /|
* / | / |
* / | / |
* (7) | (5) |
* / | / |
* / (8) / (9)
* / | / |
* 7---------(6)--------6 |
| | | |
* | 0------(0)---|-------1
* | / | /
* (11) / (10) /
* | / | /
* | (3) | (1)
* | / | /
* | / | /
* |/ |/
* 3---------(2)--------2
*
* where X axis is horizontal, +ve to right
* Y axis is vertical, +ve upwards
* Z axis is into page, +ve towards back
*
* 0: ( x, y, z+1 ) 4: ( x, y+1, z+1 )
* 1: ( x+1, y, z+1 ) 5: ( x+1, y+1, z+1 )
* 2: ( x+1, y, z ) 6: ( x+1, y+1, z )
* 3: ( x, y, z ) 7: ( x, y+1, z )
*
* There are 12 edges, 0 - 11 where each edge connectes two vertices as follows:
*
* 0: 0, 1 1: 1, 2 2: 2, 3 3: 3, 0
* 4: 4, 5 5: 5, 6 6: 6, 7 7: 7, 4
* 8: 0, 4 9: 1, 5 10: 2, 6 11: 3, 7
*/
// NB Below, these are ordered from lower to higher value
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
__constant__
uint16_t EDGE_VERTICES[12][2] = {
{ 0, 1 }, { 2, 1 }, { 3, 2 }, { 3, 0 },
{ 4, 5 }, { 6, 5 }, { 7, 6 }, { 7, 4 },
{ 0, 4 }, { 1, 5 }, { 2, 6 }, { 3, 7 }
};
/*
* This file describes the relationship between the vertices under the surface
* and the edges which are therefore impacted
* There are 256 distinct entries
*/
__constant__
uint16_t EDGE_TABLE[256]={
0x0 , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,
0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,
0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,
0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,
0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,
0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,
0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,
0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,
0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,
0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,
0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0
};
__constant__
uint8_t VERTICES_FOR_CUBE_TYPE[256] = {
0, 3, 3, 6, 3, 6, 6, 9,
3, 6, 6, 9, 6, 9, 9, 6,
3, 6, 6, 9, 6, 9, 9, 12,
6, 9, 9, 12, 9, 12, 12, 9,
3, 6, 6, 9, 6, 9, 9, 12,
6, 9, 9, 12, 9, 12, 12, 9,
6, 9, 9, 6, 9, 12, 12, 9,
9, 12, 12, 9, 12, 15, 15, 6,
3, 6, 6, 9, 6, 9, 9, 12,
6, 9, 9, 12, 9, 12, 12, 9,
6, 9, 9, 12, 9, 12, 12, 15,
9, 12, 12, 15, 12, 15, 15, 12,
6, 9, 9, 12, 9, 12, 6, 9,
9, 12, 12, 15, 12, 15, 9, 6,
9, 12, 12, 9, 12, 15, 9, 6,
12, 15, 15, 12, 15, 6, 12, 3,
3, 6, 6, 9, 6, 9, 9, 12,
6, 9, 9, 12, 9, 12, 12, 9,
6, 9, 9, 12, 9, 12, 12, 15,
9, 6, 12, 9, 12, 9, 15, 6,
6, 9, 9, 12, 9, 12, 12, 15,
9, 12, 12, 15, 12, 15, 15, 12,
9, 12, 12, 9, 12, 15, 15, 12,
12, 9, 15, 6, 15, 12, 6, 3,
6, 9, 9, 12, 9, 12, 12, 15,
9, 12, 12, 15, 6, 9, 9, 6,
9, 12, 12, 15, 12, 15, 15, 6,
12, 9, 15, 12, 9, 6, 12, 3,
9, 12, 12, 15, 12, 15, 9, 12,
12, 15, 15, 6, 9, 12, 6, 3,
6, 9, 9, 6, 9, 12, 6, 3,
9, 6, 12, 3, 6, 3, 3, 0
};
/**
* The Triangle Table specifies, for each type of cube, which edges are intersceted by the vertices of a sequence
* of triangles.
* e.g. for cube type 1, the vertices of the (only) triangle lie on edges 4, 3, 0
*/
__constant__
int TRIANGLE_TABLE[256][16] = {
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1},
{3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1},
{3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1},
{3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1},
{9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
{2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1},
{8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
{4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1},
{3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1},
{1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1},
{4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1},
{4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
{5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1},
{2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1},
{9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
{0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
{2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1},
{10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1},
{5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1},
{5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1},
{9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1},
{1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1},
{10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1},
{8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1},
{2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1},
{7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1},
{2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1},
{11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1},
{5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1},
{11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1},
{11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1},
{9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1},
{2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1},
{6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1},
{3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1},
{6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
{10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1},
{6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1},
{8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1},
{7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1},
{3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1},
{0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1},
{9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1},
{8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1},
{5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1},
{0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1},
{6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1},
{10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1},
{10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1},
{8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1},
{1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1},
{0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1},
{10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1},
{3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1},
{6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1},
{9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1},
{8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1},
{3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1},
{6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1},
{0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1},
{10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1},
{10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1},
{2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1},
{7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1},
{7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1},
{2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1},
{1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1},
{11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1},
{8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1},
{0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1},
{7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
{10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
{2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
{6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1},
{7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1},
{2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1},
{10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1},
{10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1},
{0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1},
{7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1},
{6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1},
{8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1},
{9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1},
{6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1},
{4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1},
{10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1},
{8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1},
{0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1},
{1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1},
{8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1},
{10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1},
{4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1},
{10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
{11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1},
{9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
{6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1},
{7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1},
{3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1},
{7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1},
{3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1},
{6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1},
{9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1},
{1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1},
{4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1},
{7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1},
{6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1},
{3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1},
{0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1},
{6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1},
{0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1},
{11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1},
{6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1},
{5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1},
{9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1},
{1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1},
{1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1},
{10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1},
{0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1},
{5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1},
{10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1},
{11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1},
{9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1},
{7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1},
{2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1},
{8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1},
{9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1},
{9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1},
{1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1},
{9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1},
{5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1},
{0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1},
{10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1},
{2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1},
{0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1},
{0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1},
{9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1},
{5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1},
{3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1},
{5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1},
{8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1},
{0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1},
{9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1},
{1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1},
{3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1},
{4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1},
{9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1},
{11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1},
{11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1},
{2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1},
{9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1},
{3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1},
{1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1},
{4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1},
{3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1},
{0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1},
{1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}
};
|
12,324 |
__device__ __constant__ float
gc_fAtoms[200][4];
// CUDA values. This is constant for now as all known CUDA implementations
// have a warp size of 32 and it makes all the calculations constant.
#define WARP_SIZE (32) //(warpSize)
#define HALF_WARP (WARP_SIZE / 2)
#define CEILDIV(m,n) \
(((m) + (n) - 1) / (n))
#define THREAD_CALCS (6)
extern "C"
void
CopyAtoms(
float *
atoms,
int
atomsCount)
{
cudaMemcpyToSymbol(gc_fAtoms, atoms, atomsCount * 4 * sizeof (float));
}
__global__ void
DoAtomsKernel(
float * p_pfGrid,
int p_iHeight,
int p_iWidth,
int p_iPitch,
int p_iAtomCount)
{
// This version only calculates one slice, not a full grid. blockDim.x is
// HALF_WARP.
const int
iy = (blockIdx.y * blockDim.y) + threadIdx.y;
if (iy >= p_iHeight)
{
return;
}
const int
bx = blockIdx.x * HALF_WARP * THREAD_CALCS,
ix = bx + threadIdx.x;
// The original version of this code assumed the data was an exact multiple
// of THREAD_CALCS * HALF_WARP (128) wide - this may not always be the case.
const float
x = ix,
y = iy;
float * const
outp = &p_pfGrid[(iy * p_iPitch) + ix];
float
ev0 = 0,
ev1 = 0,
ev2 = 0,
ev3 = 0,
ev4 = 0,
ev5 = 0;
for (int atom = 0; atom != p_iAtomCount; ++atom)
{
// All the points being done are in a line so y and z are constant.
const float
fy = y - gc_fAtoms[atom][1],
fz = 1.0f - gc_fAtoms[atom][2],
offset = (fy * fy) + (fz * fz),
charge = gc_fAtoms[atom][3];
// Do the same code THREAD_CALCS times.
const float
x0 = x - gc_fAtoms[atom][0],
x1 = x0 + HALF_WARP * 1,
x2 = x0 + HALF_WARP * 2,
x3 = x0 + HALF_WARP * 3,
x4 = x0 + HALF_WARP * 4,
x5 = x0 + HALF_WARP * 5;
// Add the effect of this charge to the running total of all current
// points being compared.
ev0 += charge * rsqrt((x0 * x0) + offset);
ev1 += charge * rsqrt((x1 * x1) + offset);
ev2 += charge * rsqrt((x2 * x2) + offset);
ev3 += charge * rsqrt((x3 * x3) + offset);
ev4 += charge * rsqrt((x4 * x4) + offset);
ev5 += charge * rsqrt((x5 * x5) + offset);
}
// We don't need any complex code to half-warp align the writes as
// we can just launch the threads cleverly to do it all for us.
if ((p_iWidth - bx) < (HALF_WARP * THREAD_CALCS))
{
// If we are in here then the number of grid points is not an exact
// multiple of blockDim.x * 8, so some results will be discarded.
// However, calculating and discarding results is probably the best
// method for speed sake in the large majority of cases.
if (ix + HALF_WARP * 0 < p_iWidth)
{
outp[HALF_WARP * 0] = ev0;
// The heirarchical structure should mean that warps are better
// processed.
if (ix + HALF_WARP * 1 < p_iWidth)
{
outp[HALF_WARP * 1] = ev1;
if (ix + HALF_WARP * 2 < p_iWidth)
{
outp[HALF_WARP * 2] = ev2;
if (ix + HALF_WARP * 3 < p_iWidth)
{
outp[HALF_WARP * 3] = ev3;
if (ix + HALF_WARP * 4 < p_iWidth)
{
outp[HALF_WARP * 4] = ev4;
if (ix + HALF_WARP * 5 < p_iWidth)
{
outp[HALF_WARP * 5] = ev5;
}
}
}
}
}
}
}
else
{
// Fast code for the common case - based on code from "Programming
// Massively Parallel Processors".
outp[HALF_WARP * 0] = ev0;
outp[HALF_WARP * 1] = ev1;
outp[HALF_WARP * 2] = ev2;
outp[HALF_WARP * 3] = ev3;
outp[HALF_WARP * 4] = ev4;
outp[HALF_WARP * 5] = ev5;
}
}
extern "C"
void
DoAtoms(
float * p_pfGrid,
int p_iHeight,
int p_iWidth,
int p_iThreads,
int p_iPitch,
int p_iAtoms)
{
dim3
// Number of blocks to execute in.
dimBlocks(CEILDIV(p_iWidth, HALF_WARP * THREAD_CALCS), CEILDIV(p_iHeight, p_iThreads / HALF_WARP)),
// Number of threads per block.
dimThreads(HALF_WARP, p_iThreads / HALF_WARP);
DoAtomsKernel<<<dimBlocks, dimThreads>>>(p_pfGrid, p_iHeight, p_iWidth, p_iPitch, p_iAtoms);
}
|
12,325 | #include <stdio.h>
#include <stdlib.h>
#define LEN_F 3073
#define TILE_WIDTH 32
// 3073/32 = 97.
__global__ void sgd(float *x, float* y, float* weights,
float *loss,
float reg_strength,
float learning_rate,
int total_examples,
int max_epoch)
{
/* blockDim.x = 200 */
int tx = threadIdx.x; //200
int tid_x = blockIdx.x * blockDim.x + tx; // col
int yi;
int idx, idx_w;
// __shared__ float dw[LEN_F][10];
// float reg_strength = 5e4;
// float learning_rate = 1e-7;
float ds[10];
float distance[10];
float dot_XW[10]; // stored in private mem of GPU
float loss_i[10];
float val, dot_tmp;
float tmp_dw;
float tmpW;
float sum_ds;
float sum_value;
float W_square;
float sum_loss;
//__shared__ float mega_dw;
// printf("before for loop start ");
// each tid takes one data point
if (tid_x < total_examples) {
/* calculate dot(W, x[data_point]) */
yi = (int) y[tid_x]; //6
for(int epoch=0 ; epoch <max_epoch ; epoch++) {
// compute dot(x, W)
W_square = 0;
sum_loss = 0;
for (int w_col=0 ; w_col < 10 ; w_col++) {
sum_value = 0;
for(int k=0; k < LEN_F; k++) {
idx = tid_x * LEN_F + k;
idx_w = k * 10 + w_col;
dot_tmp = x[idx] * weights[idx_w];
sum_value += dot_tmp;
// if (w_col == 0 && tid_x == 10 && k == 0){
// printf("idx=%d ", idx); printf("idx_w=%d ", idx_w); printf("x[idx]=%f ", x[idx]);
// printf("weights[idx_w]=%f ", weights[idx_w]);printf("dot_tmp=%f ", dot_tmp);
// }
//
}//end of for-loop of features
dot_XW[w_col] = sum_value;
// if (tid_x == 10 && w_col == 0) {
// printf("A: epoch = %d, sum_value = %f; ", epoch, w_col, sum_value);
// printf("A: weights[4096] = %f", weights[4096]);
// }
//
} // end-for-w_col, dot-product finished.
// __syncthreads();
// dot_XW should finish updating by all threads now.
// if (tid_x == 10)
// printf("dot_XW[0] = %f ", dot_XW[0]);
//
// if (tid_x == 10){
// printf("after syncthreads, weights[tid] = %f, weights[%d]= %f ;", weights[tid_x], tid_x, weights[10]);
// }
//
for(int d =0 ; d< 10 ; d++) {
distance[d] = dot_XW[d] - dot_XW[yi] + 1;
}
// calculate ds
// step 1: if corresponding distance > 0, set as 1.
// if corresponding distance <= 0, set as 0.
for(int d =0; d< 10 ; d++) {
if (distance[d] > 0)
ds[d] = 1.0f;
else
ds[d] = 0.0f;
}
// only calculate loss at the max_epoch
if (epoch == max_epoch - 1) {
for(int d =0; d< 10 ; d++) {
if (distance[d] > 0)
loss_i[d] = distance[d];
else
loss_i[d] = 0;
}
loss_i[yi] = 0;
for (int d=0; d< 10; d++)
sum_loss += loss_i[d];
}
// If yi = 5, set ds[5] = 0;
ds[yi] = 0;
sum_ds = 0;
// sum up 10 ds to sum_ds;
for (int d=0; d< 10; d++) {
sum_ds+= ds[d];
}
// __syncthreads();
// set ds[5] as -sum_ds
ds[yi] = -1 * sum_ds;
// if (tid_x == 10){
// printf("In middle, weights[0] = %f ;", weights[0]);
// }
//
for(int f=0 ; f < LEN_F ; f++) {
for (int c=0 ; c < 10; c++) {
idx = f * 10 + c;
tmp_dw = x[tid_x * LEN_F + f] * ds[c];
tmpW = weights[idx];
// if (tid_x == 10 && f==0 && c == 0) {
// printf("x[%d]= %f, ds[%d]= %f ;", tid_x * LEN_F + f, x[tid_x * LEN_F + f], c, ds[c]);
// printf("weights[%d] = %f ;", idx, weights[idx]);
// }
//
if (epoch == max_epoch - 1)
W_square += tmpW * tmpW;
if (tmp_dw != 0) {
val = learning_rate * tmp_dw + 2 * reg_strength * tmpW;
weights[idx] = tmpW - val;
}
}
} // end-of-for-features
__syncthreads();
// if (tid_x == 10) {
// printf("epoch-%d, weights[0] = %f; ", epoch, weights[0]);
// }
//
}//End--of--epoch
sum_loss += reg_strength * W_square;
// if (tid_x == 10) {
// printf("W_square[%d] = %f; ", tid_x, W_square);
//// printf("loss[%d] = %f; ", tid_x, sum_loss);
// }
//
loss[tid_x] = sum_loss;
__syncthreads();
}// end of if
}//End--of--global
|
12,326 | __global__ void vector_add(float *c, const float *a, const float *b, const int n) {
int i = threadIdx.x; /* <--- Oops! something is not right here! */
if (i<n) {
c[i] = a[i] + b[i];
}
}
|
12,327 | #include <iostream>
#include <stdio.h>
#include <math.h>
using namespace std;
#define TILE_WIDTH 2
__global__ void MatrixMult(int m, int n, int k, float *a, float *b, float *c)
{
int row = threadIdx.y + blockIdx.y*blockDim.y;
int col = threadIdx.x + blockIdx.x*blockDim.x;
if((row < m) && (col < k))
{
float temp = 0.0;
for (int i = 0; i < n; ++i)
{
temp += a[row*n+i]*b[col+i*k];
}
c[row*k+col] = temp;
}
}
// main fn
int main(void)
{
int m = 4;
int n = 6;
int k = 7;
float* a = new float[m*n];
float* b = new float[n*k];
float* c = new float[m*k];
float *dev_a, *dev_b, *dev_c;
dim3 dimGrid((k-1)/TILE_WIDTH+1,(m-1)/TILE_WIDTH+1,1);
dim3 dimBlock(TILE_WIDTH,TILE_WIDTH,1);
cudaMalloc((void**)&dev_a, m*n*sizeof(float));
cudaMalloc((void**)&dev_b, n*k*sizeof(float));
cudaMalloc((void**)&dev_c, m*k*sizeof(float));
for (int i=0; i<m*n; i++)
{
a[i] = sin((float) i);
}
for (int i=0; i<n*k; i++)
{
b[i] = cos((float) i);
}
cudaMemcpy(dev_a, a, m*n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, n*k*sizeof(float), cudaMemcpyHostToDevice);
MatrixMult<<<dimGrid,dimBlock>>>(m,n,k,dev_a,dev_b,dev_c);
cudaMemcpy(c, dev_c, m*k*sizeof(float), cudaMemcpyDeviceToHost);
cout<<"a matrix: \n";
for (int i=0; i<m; i++)
{
for (int j=0; j<n; j++)
{
cout<<a[n*i+j]<<" ";
}
cout<<"\n";
}
cout<<"b matrix: \n";
for (int i=0; i<n; i++)
{
for (int j=0; j<k; j++)
{
cout<<b[k*i+j]<<" ";
}
cout<<"\n";
}
cout<<"c matrix: \n";
for (int i=0; i<m; i++)
{
for (int j=0; j<k; j++)
{
cout<<c[k*i+j]<<" ";
}
cout<<"\n";
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
delete [] a;
delete [] b;
delete [] c;
}
|
12,328 | #include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <vector>
#include <map>
#include <iterator>
#include <algorithm>
#include <assert.h>
#include <cstdlib>
//#include <time.h>
using namespace std;
int vertex_num;
int edge_num;
string fn;
float const PAGERANK_COEFFICIENT = 0.85f;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "Error: %s\nFile %s, line %d\n", cudaGetErrorString(code), file, line);
exit(code);
}
}
__host__
void Graphpreproc(
string const filename,
vector<int> &vertex_begin,
int* edge)
{
ifstream in_f;
vector<int> t;
vector<int>::iterator itr;
int count = 0;
int e = 0;
in_f.open(filename.c_str(), ios::in);
string line;
char delim[3] = " \t"; //In most benchmarks, the delimiter is usually the space character or the tab character.
char* pch;
// Read the input graph line-by-line.
while( !in_f.eof()) {
getline(in_f, line);
if( line[0] < '0' || line[0] > '9' ) // Skipping any line blank or starting with a character rather than a number.
continue;
char cstrLine[256];
strcpy( cstrLine, line.c_str() );
pch = strtok(cstrLine, delim);
if( pch != NULL )
t.push_back(atoi(pch));
else
continue;
pch = strtok( NULL, delim );
if( pch != NULL )
edge[e++] = atoi(pch);
else
continue;
}
itr = t.begin();
vertex_begin.push_back(0);
for (int i = 0; i < vertex_num - 1; i++){
while ((itr != t.end()) && (*itr == i)){
count++;
itr++;
}
count += vertex_begin.at(i);
vertex_begin.push_back(count);
count = 0;
}
vertex_begin.push_back(edge_num);
}
__host__
void greedy_ord(
int* edge,
int* trans)
{
bool* vis = new bool[vertex_num];
memset(vis, false, sizeof(bool)*vertex_num);
int curr_pos = 0;
for(int e = 0; e < edge_num; e++){
if(!vis[edge[e]]){
vis[edge[e]] = true;
trans[edge[e]] = curr_pos;
edge[e] = curr_pos++;
}
else
edge[e] = trans[edge[e]];
}
}
/*__global__ void kernel_vertex(
int const vertex_num,
const int* const vertex_begin,
float* const values,
float* const tmp)
{
int n = blockDim.x * gridDim.x/32; //total warp number
int tid = threadIdx.x % 32;
int wid = blockIdx.x * blockDim.x/32 + threadIdx.x/32;
for(int i = wid; i < vertex_num; i += n){
int degree = vertex_begin[i + 1] - vertex_begin[i];
if(degree > 0){
int loop_num = degree / 32;
if(tid < degree){
for(int j = 0; j <= loop_num; j++)
atomicAdd(&tmp[i], values[vertex_begin[i] + 32*j + tid%degree]);
}
if(tid == 0){
tmp[i] = PAGERANK_COEFFICIENT * tmp[i] + 1.0f - PAGERANK_COEFFICIENT;
values[i] = tmp[i] / degree;
}
}
}
}*/
__global__ void kernel_vertex(
int const vertex_num,
const int* const vertex_begin,
const int* const edge,
float* const values,
float* const tmp)
{
int n = blockDim.x * gridDim.x/32; //total warp number
int tid = threadIdx.x % 32;
int wid = blockIdx.x * blockDim.x/32 + threadIdx.x/32;
for(int i = wid; i < vertex_num; i += n){
int degree = vertex_begin[i + 1] - vertex_begin[i];
if(degree > 0){
int loop_num = degree / 32;
if(tid < degree){
for(int j = 0; j <= loop_num; j++)
atomicAdd(&tmp[i], values[edge[vertex_begin[i] + 32*j + tid%degree]]);
}
if(tid == 0){
tmp[i] = PAGERANK_COEFFICIENT * tmp[i] + 1.0f - PAGERANK_COEFFICIENT;
values[i] = tmp[i] / degree;
}
}
}
}
int main(int argc, const char * argv[])
{
if(argc < 4){
cout << "parameter should be three!";
return 0;
}
fn = argv[1];
vertex_num = atoi(argv[2]);
edge_num = atoi(argv[3]);
vector<int> vertex_begin;
vertex_begin.reserve(vertex_num + 1);
int* edge = new int[edge_num];
Graphpreproc(fn, vertex_begin, edge);
int* trans = new int[vertex_num];
greedy_ord(edge, trans);
int * dev_vertex_begin;
int * dev_edge;
float * dev_values;
float * dev_tmp;
size_t memSize_R = (vertex_num + 1) * sizeof(int);
size_t memSize_C = edge_num * sizeof(int);
gpuErrchk(cudaMalloc(&dev_vertex_begin, memSize_R));
gpuErrchk(cudaMemcpy(dev_vertex_begin, vertex_begin.data(), memSize_R, cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc(&dev_edge, memSize_C));
gpuErrchk(cudaMemcpy(dev_edge, edge, memSize_C, cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc(&dev_values, memSize_C));
gpuErrchk(cudaMemset(dev_values, 0.0, memSize_C));
gpuErrchk(cudaMalloc(&dev_tmp, memSize_R));
gpuErrchk(cudaMemset(dev_tmp, 0.0, memSize_R));
int bn = 256;
int tn = 128;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
kernel_vertex<<<tn,bn>>>(
vertex_num,
dev_vertex_begin,
dev_edge,
dev_values,
dev_tmp);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float time;
cudaEventElapsedTime(&time,start,stop);
printf("time is %f\n",time);
gpuErrchk(cudaFree(dev_values));
gpuErrchk(cudaFree(dev_edge));
gpuErrchk(cudaFree(dev_vertex_begin));
gpuErrchk(cudaFree(dev_tmp));
delete []edge;
delete []trans;
return 0;
}
|
12,329 | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-04-25
*/
#include "Sigmoid.h"
#include "Sigmoid.cuh"
#include "Loss.cuh"
#include "../loss/CrossEntropy.cuh"
#include "../XDevice.h"
#ifdef USE_CUDA
// the CUDA stuff
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cuda.h>
#endif
namespace nts{ // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
sigmoid function y = 1/(1+exp(-x)) (Cuda kernel)
>> x - input data pointer
>> y - output data pointer
>> size - size of input/output
*/
__global__
void KernelSigmoidCompute(DTYPE * x, DTYPE * y, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
y[i] = 1/(1+exp(-x[i]));
}
}
/*
sigmoid function y = 1/(1+exp(-x)) (Cuda version)
>> x - input vector
>> y - result
*/
void _CudaSigmoid(const XTensor * x, XTensor * y)
{
CheckNTErrors(!x->isSparse && !y->isSparse, "the activation function (rectify) does not support sparse matrices.");
CheckNTErrors(x->unitNum && y->unitNum, "we require two vectors with the same length.");
int gridSize[3], blockSize[3];
GDevs.GetCudaThread(x->devID, x->unitNum, gridSize, blockSize);
int devIDBackup;
ProtectCudaDev(x->devID, devIDBackup);
KernelSigmoidCompute<<<dim3(gridSize[0]), dim3(blockSize[0])>>>((DTYPE*)x->data, (DTYPE*)y->data, x->unitNum);
BacktoCudaDev(x->devID, devIDBackup);
}
/*
sigmoid backward computation of dE/dx (Cuda kernel)
dE/ds = dE/dy * dy/dx
sigmoid: y = 1/(1+exp(-x))
and dy/ds = y * (1 -y)
>> dedy - dE/dy
>> dedx - dE/ds
>> y - output of the function
>> x - input of the function
>> size - size of output/input
*/
__global__
void KernelSigmoidBackward(DTYPE * dedy, DTYPE * dedx, DTYPE * y, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
dedx[i] = dedy[i] * y[i] * ((DTYPE)1.0 - y[i]);
}
}
/*
backward computation (Cuda version)
dE/ds = dE/dy * dy/dx
sigmoid: y = 1/(1+exp(-x))
and dy/dx = y * (1 -y)
>> y - output of the function
>> x - input of the function
>> dedy - dE/dy
>> dedx - dE/dx
*/
void _CudaSigmoidBackward(XTensor * y, XTensor * x,
XTensor * dedy, XTensor * dedx)
{
int gridSize[3], blockSize[3];
GDevs.GetCudaThread(y->devID, y->unitNum, gridSize, blockSize);
int devIDBackup;
ProtectCudaDev(y->devID, devIDBackup);
/* dE/dx = dE/dy * dy/dx */
KernelSigmoidBackward<<<dim3(gridSize[0]),dim3(blockSize[0])>>>
((DTYPE*)dedy->data,
(DTYPE*)dedx->data,
(DTYPE*)y->data,
y->unitNum);
BacktoCudaDev(x->devID, devIDBackup);
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor) |
12,330 | #include "includes.h"
__device__ int sign(DECNUM x)
{
return((x > 0.0f) - (x < 0.0f));
}
__device__ int mminus2(int ix, int nx)
{
int xminus;
if (ix <= 1)
{
xminus = 0;
}
else
{
xminus = ix - 2;
}
return(xminus);
}
__device__ int pplus(int ix, int nx)
{
int xplus;
if (ix == nx - 1)
{
xplus = nx - 1;
}
else
{
xplus = ix + 1;
}
return(xplus);
}
__device__ int mminus(int ix, int nx)
{
int xminus;
if (ix == 0)
{
xminus = 0;
}
else
{
xminus = ix - 1;
}
return(xminus);
}
__global__ void vvlatbnd(int nx, int ny, DECNUM * uu)
{
// Neumann_v boundary
unsigned int ix = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int i = ix + iy*nx;
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ DECNUM uut[16][16];
__shared__ DECNUM uub[16][16];
if (ix < nx && iy < ny)
{
unsigned int yminus = mminus(iy, ny);
unsigned int yminus2 = mminus2(iy, ny);
unsigned int yplus = pplus(iy, ny);
uut[tx][ty] = uu[ix + yplus*nx];
uub[tx][ty] = uu[ix + yminus*nx];
if (iy == 0)
{
uu[i] = uut[tx][ty];
}
if (iy == ny - 2)
{
uu[i] = uub[tx][ty];
}
}
//
} |
12,331 | #include <cuda.h>
#include <cstdio>
#include <ctime>
#include <iostream>
#define TILE_WIDTH 32
#define H 100000
#define W 100000
using namespace std;
void foo(float* v) {
for (int i = 0; i < H; i++) {
for (int j = 0; j < W; j++) {
v[i * W + j] = 2;
}
}
}
void mult(float* A, float* B, float* C) {
int aux = 0;
for (int i = 0; i < H; i++) {
for (int j = 0; j < W; j++) {
aux = 0;
for (int k = 0; k < W; k++) aux += A[i * W + k] * B[k * W + j];
C[i * W + j] = aux;
}
}
}
void mostrar(float* v) {
for (int i = 0; i < H; i++) {
for (int j = 0; j < W; j++) {
cout << v[i * W + j] << " ";
}
cout << endl;
}
}
__global__ void multMat(float* d_A, float* d_B, float* d_C) {
__shared__ float ds_A[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_B[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
int Pvalue = 0;
for (size_t phase = 0; phase < W / TILE_WIDTH; phase++) {
ds_A[ty][tx] = d_A[row * W + phase * TILE_WIDTH + tx];
ds_B[ty][tx] = d_B[(phase * TILE_WIDTH + ty) * W + col];
__syncthreads();
for (int k = 0; k < W; k++) {
Pvalue += ds_A[ty][k] * ds_B[k][tx];
}
__syncthreads();
}
d_C[col * W + row] = Pvalue;
}
int main() {
float* A = new float[H * W];
float* B = new float[H * W];
float* C = new float[H * W];
float* D = new float[H * W];
foo(A);
foo(B);
// {
// clock_t start = clock();
//
// mult(A, B, C);
//
// clock_t end = clock();
// double cpu_time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
// printf("Tiempo invertido CPU = %lf s\n", cpu_time_used);
// }
float *d_A, *d_B, *d_D;
float blockSize = TILE_WIDTH;
dim3 dimBlock(blockSize, blockSize);
dim3 dimGrid(ceil(W / float(blockSize)), ceil(H / float(blockSize)), 1);
cudaMalloc((void**)&d_A, sizeof(float) * H * W);
cudaMalloc((void**)&d_B, sizeof(float) * H * W);
cudaMalloc((void**)&d_D, sizeof(float) * H * W);
{
clock_t start = clock();
cudaMemcpy(d_A, A, sizeof(float) * H * W, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, sizeof(float) * H * W, cudaMemcpyHostToDevice);
multMat<<<dimGrid, dimBlock>>>(d_A, d_B, d_D);
cudaMemcpy(D, d_D, sizeof(float) * H * W, cudaMemcpyDeviceToHost);
clock_t end = clock();
double cpu_time_used = ((double)(end - start)) / CLOCKS_PER_SEC;
std::cout << "Tiempo invertido GPU = " << cpu_time_used << "s\n";
}
delete A;
delete B;
delete C;
delete D;
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_D);
}
|
12,332 | #include "device_error.cuh"
DeviceError::DeviceError(std::string message ) : _message(message) {}
DeviceError::DeviceError() : _message("") {}
MallocError::MallocError(std::string message) : DeviceError(message) {}
MallocError::MallocError() : DeviceError("") {}
CopyError::CopyError(std::string message) : DeviceError(message) {}
CopyError::CopyError() : DeviceError("") {}
|
12,333 | #include <stdio.h>
#include <cuda_runtime.h>
#define MIN(a, b) (a > b? b: a)
const int N = 32 * 1024;
const int threadsPerBlock = 256; // 每个block下的线程数
const int blocksPerGrid = MIN(32, (N + threadsPerBlock -1)/threadsPerBlock); // 分配的block数
// 点乘
__global__ void dot(float *a, float *b, float *c){
__shared__ float cache[threadsPerBlock]; // 共享内存 每个block下的共享内存
int tid = blockIdx.x * blockDim.x + threadIdx.x; // 当前线程的编号
int cacheIndex = threadIdx.x;
float temp =0;
while(tid < N){
temp += a[tid] * b[tid];
tid += gridDim.x * blockDim.x;
}
cache[cacheIndex] = temp;
__syncthreads();
// 规约每个线程块内的点积和
int i = blockDim.x /2;
while(i != 0){
if ( cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
c[blockIdx.x] = cache[0];
}
int main(void){
float *a, *b, *partial_c;
float *dev_a, *dev_b, *dev_partial_c;
a = (float *)malloc(N * sizeof(float));
b = (float *)malloc(N * sizeof(float));
partial_c = (float *)malloc(blocksPerGrid*sizeof(float));
// 赋值
for(int i=0;i< N; ++i){
a[i] = (float)i;
b[i] = i * 2.0;
}
// device 上分配内存
cudaMalloc((void **)&dev_a, N * sizeof(float));
cudaMalloc((void **)&dev_b, N * sizeof(float));
cudaMalloc((void **)&dev_partial_c, blocksPerGrid * sizeof(float));
// host 复制到 device上
cudaMemcpy(dev_a, a, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(float), cudaMemcpyHostToDevice);
// 执行
dot<<<blocksPerGrid, threadsPerBlock>>>(dev_a, dev_b, dev_partial_c);
// device 数据下载到Host
cudaMemcpy(partial_c, dev_partial_c, blocksPerGrid * sizeof(float), cudaMemcpyDeviceToHost);
//求和
float sum=0.0;
for(int i =0; i<blocksPerGrid; ++i)
{
sum += partial_c[i];
}
printf("a dot b is %.2g \n", sum);
//释放内存
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_partial_c);
free(a);
free(b);
free(partial_c);
return 0;
}
|
12,334 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define E 2.71828182845904523536
__global__ void euler_gpu_2(float * array, float * suma, float dt, int n){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if (tId < n) {
array[tId] = -1 + dt*suma[tId];
};
};
int main(int argc, char const *argv[]){
cudaEvent_t start, stop;
clock_t t1, t2;
float dts[6] = {0.1, 0.01, 0.001, 0.0001, 0.00001, 0.000001};
int block_size = 256;
for(size_t i = 0; i < 6; i++){
int n = (int)(10/dts[i]);
int grid_size = (int) ceil((float)n / block_size);
float elapsed=0;
double cpu_time = 0;
double error = 0;
float * resultados = (float *) malloc(n * sizeof(float));
float * sumatoria = (float *) malloc(n * sizeof(float));
float * d_r;
float * d_s;
sumatoria[0] = 1;
t1 = clock();
for(int j =1; j < n; j++){
sumatoria[j] = powf(E, -dts[i]*j) + sumatoria[j-1];
}
t2 = clock();
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMalloc(&d_r, n * sizeof(float));
cudaMalloc(&d_s, n * sizeof(float));
cudaMemcpy(d_r, resultados, n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_s, sumatoria, n * sizeof(float), cudaMemcpyHostToDevice);
euler_gpu_2<<<grid_size, block_size>>>(d_r,d_s, dts[i], n);
cudaMemcpy(resultados, d_r, n * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(sumatoria, d_s, n * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
for(int g = 0; g < n; g++){
float real = -powf(E, -dts[i]*g);
error = error + powf((resultados[g]-real),2);
}
cpu_time = 1000.0 * (double)(t2 - t1) / CLOCKS_PER_SEC;
printf("Executed with %f dt\n", dts[i]);
printf("Mean squared error: %.16f \n", error/n);
printf("The elapsed time in gpu was %.2f ms \n", elapsed);
printf("The elapsed time in cpu was %.2f ms \n", cpu_time);
printf("The total time was %.2f ms \n", elapsed + cpu_time);
}
return 0;
}
|
12,335 | #include "includes.h"
namespace ann {
// CUDA2
}
__global__ void kernel_calc_gL_2( int layer_id, int *l, int *s_ext, float *z_ext_arr, float *a_ext_arr, float *t_arr, float *gjl_ext ){
volatile int idx = threadIdx.x + blockDim.x*blockIdx.x;
int neuron_count = l[layer_id];
if(idx >= neuron_count-1) return;
float z = z_ext_arr[s_ext[layer_id] + idx];
float tmp = 1 + expf(-z);
float f_deriv=expf(-z) / (tmp*tmp);
gjl_ext[s_ext[layer_id] + idx] = f_deriv*(a_ext_arr[s_ext[layer_id] + idx] - t_arr[idx]);
} |
12,336 | #include <stdio.h>
#include <stdlib.h>
#define INF 2147483647
extern "C" {
__global__ void init(int * tab, int len) {
for(int i = threadIdx.x + len*blockIdx.x; i < len*blockIdx.x + len; i += 1024) {
tab[i] = INF;
}
}
__global__ void oneReduction(int * tab, int len, int mod) {
__shared__ int begin, end;
__shared__ int tmp_T[1024];
if(threadIdx.x == 0) {
begin = blockIdx.x*len;
end = blockIdx.x*len + len;
}
__syncthreads();
if(blockIdx.x % mod < mod/2) {
for(int k = len/2; k >= 1024; k /= 2) {
for(int g = begin; g < end; g += 2*k) {
for(int j = g; j < g + k; j += 512) {
__syncthreads();
if(threadIdx.x < 512)
tmp_T[threadIdx.x] = tab[j + threadIdx.x];
else
tmp_T[threadIdx.x] = tab[j + threadIdx.x - 512 + k];
__syncthreads();
if(threadIdx.x < 512 && tmp_T[threadIdx.x] > tmp_T[threadIdx.x + 512]) {
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512];
tmp_T[threadIdx.x + 512] ^= tmp_T[threadIdx.x];
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512];
}
__syncthreads();
if(threadIdx.x < 512)
tab[j + threadIdx.x] = tmp_T[threadIdx.x];
else
tab[j + threadIdx.x - 512 + k] = tmp_T[threadIdx.x];
}
}
}
for(int i = begin; i < begin+len; i += 1024) {
__syncthreads();
tmp_T[threadIdx.x] = tab[i + threadIdx.x];
__syncthreads();
for(int jump = 512; jump >= 1; jump /= 2) {
if(threadIdx.x % (jump*2) < jump && threadIdx.x + jump < 1024 && tmp_T[threadIdx.x] > tmp_T[threadIdx.x + jump]) {
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + jump];
tmp_T[threadIdx.x + jump] ^= tmp_T[threadIdx.x];
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + jump];
}
__syncthreads();
}
tab[i + threadIdx.x] = tmp_T[threadIdx.x];
}
} else {
for(int k = len/2; k >= 1024; k /= 2) {
for(int g = begin; g < end; g += 2*k) {
for(int j = g; j < g + k; j += 512) {
__syncthreads();
if(threadIdx.x < 512)
tmp_T[threadIdx.x] = tab[j + threadIdx.x];
else
tmp_T[threadIdx.x] = tab[j + threadIdx.x - 512 + k];
__syncthreads();
if(threadIdx.x < 512 && tmp_T[threadIdx.x] < tmp_T[threadIdx.x + 512]) {
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512];
tmp_T[threadIdx.x + 512] ^= tmp_T[threadIdx.x];
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512];
}
__syncthreads();
if(threadIdx.x < 512)
tab[j + threadIdx.x] = tmp_T[threadIdx.x];
else
tab[j + threadIdx.x - 512 + k] = tmp_T[threadIdx.x];
}
}
}
for(int i = begin; i < begin + len; i += 1024) {
__syncthreads();
tmp_T[threadIdx.x] = tab[i + threadIdx.x];
__syncthreads();
for(int jump = 512; jump >= 1; jump /= 2) {
if(threadIdx.x % (jump*2) < jump && threadIdx.x + jump < 1024 && tmp_T[threadIdx.x] < tmp_T[threadIdx.x + jump]) {
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + jump];
tmp_T[threadIdx.x + jump] ^= tmp_T[threadIdx.x];
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + jump];
}
__syncthreads();
}
tab[i + threadIdx.x] = tmp_T[threadIdx.x];
}
}
}
__global__ void oneBlock(int * tab, int len) {
__shared__ int begin, end;
__shared__ int tmp_T[1024];
if(threadIdx.x == 0) {
begin = blockIdx.x*len;
end = blockIdx.x*len + len;
}
__syncthreads();
//first phase
for(int i = begin; i < end; i += 2048) {
tmp_T[threadIdx.x] = tab[i + threadIdx.x];
__syncthreads();
for(int bSize = 2; bSize <= 1024; bSize *= 2) {
for(int jump = bSize/2; jump >= 1; jump /= 2) {
if(threadIdx.x % (jump*2) < jump && threadIdx.x + jump < 1024 && (
( tmp_T[threadIdx.x] > tmp_T[threadIdx.x + jump] && threadIdx.x % (bSize*2) < bSize ) ||
( tmp_T[threadIdx.x] < tmp_T[threadIdx.x + jump] && threadIdx.x % (bSize*2) >= bSize ))) {
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + jump];
tmp_T[threadIdx.x + jump] ^= tmp_T[threadIdx.x];
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + jump];
}
__syncthreads();
}
}
tab[i + threadIdx.x] = tmp_T[threadIdx.x];
__syncthreads();
tmp_T[threadIdx.x] = tab[i + 1024 + threadIdx.x];
__syncthreads();
for(int bSize = 2; bSize <= 1024; bSize *= 2) {
for(int jump = bSize/2; jump >= 1; jump /= 2) {
if(threadIdx.x % (jump*2) < jump && threadIdx.x + jump < 1024 && (
( tmp_T[threadIdx.x] < tmp_T[threadIdx.x + jump] && threadIdx.x % (bSize*2) < bSize ) ||
( tmp_T[threadIdx.x] > tmp_T[threadIdx.x + jump] && threadIdx.x % (bSize*2) >= bSize ))) {
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + jump];
tmp_T[threadIdx.x + jump] ^= tmp_T[threadIdx.x];
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + jump];
}
__syncthreads();
}
}
tab[i + 1024 + threadIdx.x] = tmp_T[threadIdx.x];
__syncthreads();
}
// second phase
for(int task_size = 2048; task_size < len; task_size *= 2) {
for(int pos = begin; pos < end; pos += 2*task_size) {
for(int k = task_size/2; k >= 1024; k /= 2) {
for(int lilPos = pos; lilPos < pos + task_size; lilPos += 2*k) {
for(int i = lilPos; i < lilPos + k; i += 512) {
__syncthreads();
if(threadIdx.x < 512)
tmp_T[threadIdx.x] = tab[i + threadIdx.x];
else
tmp_T[threadIdx.x] = tab[i + threadIdx.x - 512 + k];
__syncthreads();
if(threadIdx.x < 512 && tmp_T[threadIdx.x] > tmp_T[threadIdx.x + 512]) {
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512];
tmp_T[threadIdx.x + 512] ^= tmp_T[threadIdx.x];
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512];
}
__syncthreads();
if(threadIdx.x < 512)
tab[i + threadIdx.x] = tmp_T[threadIdx.x];
else
tab[i + threadIdx.x - 512 + k] = tmp_T[threadIdx.x];
}
}
}
for(int i = pos; i < pos + task_size; i += 1024) {
tmp_T[threadIdx.x] = tab[i + threadIdx.x];
for(int jump = 512; jump >= 1; jump /= 2) {
__syncthreads();
if(threadIdx.x % (jump*2) < jump && threadIdx.x + jump < 1024 && tmp_T[threadIdx.x] > tmp_T[threadIdx.x + jump]) {
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + jump];
tmp_T[threadIdx.x + jump] ^= tmp_T[threadIdx.x];
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + jump];
}
__syncthreads();
}
tab[i + threadIdx.x] = tmp_T[threadIdx.x];
__syncthreads();
}
}
__syncthreads();
for(int pos = begin + task_size; pos < end; pos += 2*task_size) {
for(int k = task_size/2; k >= 1024; k /= 2) {
for(int lilPos = pos; lilPos < pos + task_size; lilPos += 2*k) {
for(int i = lilPos; i < lilPos + k; i += 512) {
__syncthreads();
if(threadIdx.x < 512)
tmp_T[threadIdx.x] = tab[i + threadIdx.x];
else
tmp_T[threadIdx.x] = tab[i + threadIdx.x - 512 + k];
__syncthreads();
if(threadIdx.x < 512 && tmp_T[threadIdx.x] < tmp_T[threadIdx.x + 512]) {
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512];
tmp_T[threadIdx.x + 512] ^= tmp_T[threadIdx.x];
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512];
}
__syncthreads();
if(threadIdx.x < 512)
tab[i + threadIdx.x] = tmp_T[threadIdx.x];
else
tab[i + threadIdx.x - 512 + k] = tmp_T[threadIdx.x];
}
}
}
for(int i = pos; i < pos + task_size; i += 1024) {
tmp_T[threadIdx.x] = tab[i + threadIdx.x];
__syncthreads();
for(int jump = 512; jump >= 1; jump /= 2) {
__syncthreads();
if(threadIdx.x % (jump*2) < jump && threadIdx.x + jump < 1024 && tmp_T[threadIdx.x] < tmp_T[threadIdx.x + jump]) {
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + jump];
tmp_T[threadIdx.x + jump] ^= tmp_T[threadIdx.x];
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + jump];
}
__syncthreads();
}
__syncthreads();
tab[i + threadIdx.x] = tmp_T[threadIdx.x];
__syncthreads();
}
}
}
}
__global__ void oneMove(int * tab, int dist, int pow, int blocksPerTask, int period) {
__shared__ int tmp_T[1024];
__shared__ int begin;
if(threadIdx.x == 0)
begin = (blockIdx.x/blocksPerTask)*dist*2 + (blockIdx.x%blocksPerTask)*512*pow;
__syncthreads();
if((blockIdx.x / period) % 2 == 0) {
for(int i = begin; i < begin + pow*512; i += 512) {
if(threadIdx.x < 512) tmp_T[threadIdx.x] = tab[i + threadIdx.x];
else tmp_T[threadIdx.x] = tab[i + threadIdx.x - 512 + dist];
__syncthreads();
if(threadIdx.x < 512 && tmp_T[threadIdx.x] > tmp_T[threadIdx.x + 512]) {
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512];
tmp_T[threadIdx.x + 512] ^= tmp_T[threadIdx.x];
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512];
}
__syncthreads();
if(threadIdx.x < 512) tab[i + threadIdx.x] = tmp_T[threadIdx.x];
else tab[i + threadIdx.x - 512 + dist] = tmp_T[threadIdx.x];
__syncthreads();
}
} else {
for(int i = begin; i < begin + pow*512; i += 512) {
if(threadIdx.x < 512) tmp_T[threadIdx.x] = tab[i + threadIdx.x];
else tmp_T[threadIdx.x] = tab[i + threadIdx.x - 512 + dist];
__syncthreads();
if(threadIdx.x < 512 && tmp_T[threadIdx.x] < tmp_T[threadIdx.x + 512]) {
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512];
tmp_T[threadIdx.x + 512] ^= tmp_T[threadIdx.x];
tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512];
}
__syncthreads();
if(threadIdx.x < 512) tab[i + threadIdx.x] = tmp_T[threadIdx.x];
else tab[i + threadIdx.x - 512 + dist] = tmp_T[threadIdx.x];
__syncthreads();
}
}
}
}
|
12,337 | /**
* @file pctdemo_processMandelbrotElement.cu
*
* CUDA code to calculate the Mandelbrot Set on a GPU.
*
* Copyright 2011 The MathWorks, Inc.
*/
/** Work out which piece of the global array this thread should operate on */
__device__ size_t calculateGlobalIndex() {
// Which block are we?
size_t const globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
// Which thread are we within the block?
size_t const localThreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
// How big is each block?
size_t const threadsPerBlock = blockDim.x*blockDim.y;
// Which thread are we overall?
return localThreadIdx + globalBlockIndex*threadsPerBlock;
}
/** The mandelbrot or julia algorithm for a single location */
__device__ unsigned int doIterations( double const x0,
double const y0,
double const a,
double const b,
unsigned int const k,
unsigned int const maxIters ) {
// Initialise: z = z0
// depending on x0, y0 we calc the mandelbrot or julia set
double x = x0;
double y = y0;
unsigned int count = 0;
// Loop until escape
while ( ( count <= maxIters )
&& ((x*x + y*y) <= 4.0) ) {
++count;
// Update: z = z*z + z0;
double const oldx = x;
// real part
x = x*x - y*y + a;
// imaginary part
y = 2.0*oldx*y + b;
}
return count;
}
/** Main entry point.
* Works out where the current thread should read/write to global memory
* and calls doIterations to do the actual work.
*/
__global__ void processMandelbrotElementTest(
double * out,
const double * x,
const double * y,
const double a,
const double b,
const unsigned int k,
const unsigned int maxIters,
const unsigned int mandelbrot,
const unsigned int numel ) {
// Work out which thread we are
size_t const globalThreadIdx = calculateGlobalIndex();
// If we're off the end, return now
if (globalThreadIdx >= numel) {
return;
}
// Get our X and Y coords
double x0 = x[globalThreadIdx];
double y0 = y[globalThreadIdx];
double aVal = a;
double bVal = b;
if(mandelbrot == 1){
aVal = a*x0;
bVal = b*y0;
}
// Run the itearations on this location
unsigned int const count = doIterations( x0, y0, aVal, bVal, k, maxIters );
out[globalThreadIdx] = log( double( count + 1 ) );
}
|
12,338 | #include "mat-sum.hh"
#include <cassert>
#include <stdexcept>
#include "graph.hh"
#include "ops-builder.hh"
#include "sigmoid-grad.hh"
#include "../runtime/node.hh"
#include "../memory/alloc.hh"
namespace ops
{
MatSum::MatSum(Op* arg, std::size_t axis)
: Op("mat_sum", Shape({arg->shape_get()[!axis]}), {arg})
, axis_(axis)
{}
void MatSum::compile()
{
auto& g = Graph::instance();
auto& carg = g.compiled(preds()[0]);
std::size_t rows = carg.out_shape[0];
std::size_t cols = carg.out_shape[1];
Shape out_shape = Shape({carg.out_shape[!axis_]});
dbl_t* out_data = tensor_alloc(out_shape.total());
auto out_node = axis_ == 0 ?
rt::Node::op_mat_sum_cols(carg.out_data, out_data,
rows, cols,
{carg.out_node})
: rt::Node::op_mat_sum_rows(carg.out_data, out_data,
rows, cols,
{carg.out_node});
g.add_compiled(this, {out_node}, {out_data}, out_node, out_shape, out_data);
}
}
|
12,339 | #include <cuda.h>
#include <iostream>
using namespace std;
#define TILE_WIDTH 16
/**
* C = A * B
*/
__global__
void matMul(float* C, const float* A, const float* B, int dim)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if(col < dim && row < dim) {
float prod = 0.0f;
for(int i = 0; i < dim; i++)
prod += A[row*dim+i]*B[i*dim+col];
C[row*dim+col] = prod;
}
}
/**
* C = A * B (tiled)
*/
__global__
void matMulTiled(float* C, const float* A, const float* B, int dim)
{
__shared__ float As[TILE_WIDTH][TILE_WIDTH];
__shared__ float Bs[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int tx = threadIdx.x;
int by = blockIdx.y; int ty = threadIdx.y;
int row = by*TILE_WIDTH + ty;
int col = bx*TILE_WIDTH + tx;
// Loop over the tiles required to compute the element
float prod = 0.0f;
for(int ph = 0; ph < ceil(dim/(float)TILE_WIDTH); ++ph) {
// 1. Load the tiles into shared memory
if((row < dim) && (ph*TILE_WIDTH + tx < dim))
As[ty][tx] = A[row*dim + ph*TILE_WIDTH + tx];
if((ph*TILE_WIDTH + ty < dim) && (col < dim))
Bs[ty][tx] = B[(ph*TILE_WIDTH + ty)*dim + col];
__syncthreads();
// 2. Dot product
for(int i = 0; i < TILE_WIDTH; ++i)
prod += As[ty][i]*Bs[i][tx];
__syncthreads();
}
// 3. Write result
if((row < dim) && (col < dim)) C[row*dim+col] = prod;
}
int main(int argc, char* argv[])
{
// Query GPU properties
cudaDeviceProp dev_prop;
cudaGetDeviceProperties(&dev_prop, 0);
cout << "---------------------------------------------" << endl;
cout << " GPU PROPERTIES " << endl;
cout << "---------------------------------------------" << endl;
cout << "Device Name: " << dev_prop.name << endl;
cout << "Memory Clock Rate: " << dev_prop.memoryClockRate/1.0e6 << " GHz" << endl;
cout << "Memory Bandwidth: " << 2.0*dev_prop.memoryClockRate*(dev_prop.memoryBusWidth/8)/1.0e6 << " GB/s" << endl;
cout << "Number of SM: " << dev_prop.multiProcessorCount << endl;
cout << "Max Threads per SM: " << dev_prop.maxThreadsPerMultiProcessor << endl;
cout << "Registers per Block: " << dev_prop.regsPerBlock << endl;
cout << "Shared Memory per Block: " << dev_prop.sharedMemPerBlock << " B" << endl;
cout << "Total Global Memory per Block: " << dev_prop.totalGlobalMem/1.0e9 << " GB" << endl;
cout << endl;
int dim = atoi(argv[1]);
int size = dim*dim;
float sharedMemPerBlock = 2*TILE_WIDTH*TILE_WIDTH*4;
cout << "shared memory per block: " << sharedMemPerBlock << " B" << endl;
cout << "can run at most " << int(dev_prop.sharedMemPerBlock/sharedMemPerBlock) << " blocks" << endl;
// creating matrices on host side
float* h_A = new float[size];
float* h_B = new float[size];
for (int i = 0; i < size; ++i) {
h_A[i] = 3.0f;
h_B[i] = 0.0f;
}
for (int i = 0; i < size; i+=dim+1)
h_B[i] = 1.0f;
// Copy matrices on device side
float* d_A;
cudaMalloc((void**)&d_A, size*sizeof(float));
cudaMemcpy((void*)d_A, (void*)h_A, size*sizeof(float), cudaMemcpyHostToDevice);
float* d_B;
cudaMalloc((void**)&d_B, size*sizeof(float));
cudaMemcpy((void*)d_B, (void*)h_B, size*sizeof(float), cudaMemcpyHostToDevice);
// Allocate C matrix on device
float* d_C;
cudaMalloc((void**)&d_C, size*sizeof(float));
// call Kernel
int type = atoi(argv[2]);
if (type == 1) { // "regular" matrix multiplication
dim3 dimGrid(ceil(dim/16.0f), ceil(dim/16.0f), 1);
dim3 dimBlock(16, 16, 1);
matMul<<<dimGrid, dimBlock>>> (d_C, d_A, d_B, dim);
}
else if (type == 2) { // "tiled" matrix multiplication
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
dim3 dimGrid(ceil(dim/dimBlock.x), dim/dimBlock.y, 1);
matMulTiled<<<dimGrid, dimBlock>>> (d_C, d_A, d_B, dim);
}
else
cout << "invalid argument!" << endl;
// Recover C matrix from device to host
float* h_C = new float[size];
cudaMemcpy((void*)h_C, (void*)d_C, size*sizeof(float), cudaMemcpyDeviceToHost);
// Check results
for (int i = 0; i < size; ++i) {
if (fabs(h_C[i] - 3.0f) > 0.0001f) {
cout << "ERROR: something is not right." << endl;
break;
}
}
// Finalize storage
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
delete [] h_A;
delete [] h_B;
delete [] h_C;
cout << "Closing..." << endl;
return 0;
}
|
12,340 | #include "includes.h"
__global__ void swapVals_kernel(unsigned int * d_newArray, unsigned int * d_oldArray, unsigned int numElems)
{
unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (gIdx < numElems)
{
d_newArray[gIdx] = d_oldArray[gIdx];
}
} |
12,341 | #include <stdio.h>
#include <iostream>
#include <fstream>
#include <string>
#include <sstream>
#include <vector>
#include <chrono>
using namespace std;
#define kernel_width 7
#define threadsPerBlock 512
#define NUM_ITER_DFS 3
#define DFS_BLOCK_SIZE 8
float stoff(const char* s){
float rez = 0, fact = 1;
if (*s == '-'){
s++;
fact = -1;
};
for (int point_seen = 0; *s; s++){
if (*s == '.'){
point_seen = 1;
continue;
};
int d = *s - '0';
if (d >= 0 && d <= 9){
if (point_seen) fact /= 10.0f;
rez = rez * 10.0f + (float)d;
};
};
return rez * fact;
}
__global__ void kernel_blur(float* pixels, float* output, int width, int height, int N) {
const float kernel[kernel_width][kernel_width] = {
{0.00000067, 0.00002292, 0.00019117, 0.00038771, 0.00019117, 0.00002292, 0.00000067},
{0.00002292, 0.00078633, 0.00655965, 0.01330373, 0.00655965, 0.00078633, 0.00002292},
{0.00019117, 0.00655965, 0.05472157, 0.11098164, 0.05472157, 0.00655965, 0.00019117},
{0.00038771, 0.01330373, 0.11098164, 0.22508352, 0.11098164, 0.01330373, 0.00038771},
{0.00019117, 0.00655965, 0.05472157, 0.11098164, 0.05472157, 0.00655965, 0.00019117},
{0.00002292, 0.00078633, 0.00655965, 0.01330373, 0.00655965, 0.00078633, 0.00002292},
{0.00000067, 0.00002292, 0.00019117, 0.00038771, 0.00019117, 0.00002292, 0.00000067}
};
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
int row = index / width;
int col = index % width;
float sum = 0;
float denom = 0;
int rowStart = row - kernel_width/2;
int rowEnd = row + kernel_width/2 + 1;
int colStart = col - kernel_width/2;
int colEnd = col + kernel_width/2 + 1;
for (int smallRow = rowStart; smallRow < rowEnd; smallRow ++) {
for (int smallCol = colStart; smallCol < colEnd; smallCol ++) {
if (smallRow >= 0 && smallRow < height && smallCol >= 0 && smallCol < width) {
sum += kernel[smallRow - rowStart][smallCol - colStart] * pixels[smallRow * width + smallCol];
denom += kernel[smallRow - rowStart][smallCol - colStart];
}
}
}
output[index] = sum/denom;
}
}
__global__ void kernel_calculateGradient(float* pixelsAfterBlur, float* gradientMag, int* gradientAng, int width, int height, int N) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
int row = index / width;
int col = index % width;
float gy = (float) row == height - 1? 0 : pixelsAfterBlur[index + width] - pixelsAfterBlur[index];
float gx = (float) col == width - 1? 0 : pixelsAfterBlur[index + 1] - pixelsAfterBlur[index];
gradientMag[index] = sqrt(gx * gx + gy * gy);
float ang;
if (gx < 0.000001 && gx > -0.000001) ang = 90;
else ang = atan(gy / gx) / 3.1415926 * 180.0;
if (ang < 0)
ang += 180;
gradientAng[index] = ((int) (ang + 22.5) / 45) * 45;
}
}
__global__ void kernel_doubleThreshold(float* pixelsAfterThin, int* pixelsStrongEdges, int* pixelsWeakEdges, int width, int height, float low_threshold, float high_threshold, int N) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
float val = pixelsAfterThin[index];
if (val >= high_threshold){
pixelsStrongEdges[index] = 1;
}
if (val < high_threshold && val >= low_threshold){
pixelsWeakEdges[index] = 1;
}
}
}
__global__ void kernel_thin(float* pixelsAfterThin, int* gradientAng, float* gradientMag, int width, int height, int N) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
int row = index / width;
int col = index % width;
float mag = gradientMag[row * width + col];
float magL = 0;
float magR = 0;
int ang = gradientAng[row * width + col];
if (ang == 0 || ang == 180) {
if (row > 0) magL = gradientMag[row * width + col - 1];
if (row < height - 1) magR = gradientMag[row * width + col + 1];
}
else if (ang == 45 || ang == 225) {
if (row > 0 && col < width - 1) magL = gradientMag[(row + 1) * width + col + 1];
if (row < height - 1 && col > 0) magR = gradientMag[(row - 1) * width + col - 1];
}
else if (ang == 90 || ang == 270) {
if (col > 0) magL = gradientMag[(row - 1) * width + col];
if (col < width - 1) magR = gradientMag[(row + 1) * width + col];
}
else if (ang == 135 || ang == 315) {
if (row > 0 && col > 0) magL = gradientMag[(row + 1) * width + col - 1];
if (row < height - 1 && col < width - 1) magR = gradientMag[(row - 1) * width + col + 1];
}
if (mag > magL && mag > magR) {
pixelsAfterThin[row * width + col] = mag;
}
else {
pixelsAfterThin[row * width + col] = 0;
}
}
}
void blur(float* pixels, float* output, int width, int height, int N, int blocks) {
float* cudaPixels;
float* cudaOutput;
cudaMalloc(&cudaPixels, N * sizeof(float));
cudaMalloc(&cudaOutput, N * sizeof(float));
cudaMemcpy(cudaPixels, pixels, N * sizeof(float), cudaMemcpyHostToDevice);
kernel_blur<<<blocks, threadsPerBlock>>>(cudaPixels, cudaOutput, width, height, N);
cudaDeviceSynchronize();
cudaMemcpy(output, cudaOutput, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(cudaPixels);
cudaFree(cudaOutput);
}
void calculateGradient(float* pixelsAfterBlur, float* gradientMag, int* gradientAng, int width, int height, float* maxMag, int N, int blocks) {
float* cudaPixels;
float* cudaGradientMag;
int* cudaGradientAng;
cudaMalloc(&cudaPixels, N * sizeof(float));
cudaMalloc(&cudaGradientAng, N * sizeof(int));
cudaMalloc(&cudaGradientMag, N * sizeof(float));
cudaMemcpy(cudaPixels, pixelsAfterBlur, N * sizeof(float), cudaMemcpyHostToDevice);
kernel_calculateGradient<<<blocks, threadsPerBlock>>>(cudaPixels, cudaGradientMag, cudaGradientAng, width, height, N);
cudaDeviceSynchronize();
cudaMemcpy(gradientMag, cudaGradientMag, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(gradientAng, cudaGradientAng, N * sizeof(int), cudaMemcpyDeviceToHost);
float max = 0;
for (int i = 0; i < N; i++) {
if (gradientMag[i] > max) {
max = gradientMag[i];
}
}
*maxMag = max;
cudaFree(cudaPixels);
cudaFree(cudaGradientAng);
cudaFree(cudaGradientMag);
}
void thin(float* gradientMag, int* gradientAng, float* pixelsAfterThin, int width, int height, int N, int blocks) {
float* cudaPixels;
float* cudaGradientMag;
int* cudaGradientAng;
cudaMalloc(&cudaPixels, N * sizeof(float));
cudaMalloc(&cudaGradientAng, N * sizeof(int));
cudaMalloc(&cudaGradientMag, N * sizeof(float));
cudaMemcpy(cudaPixels, pixelsAfterThin, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(cudaGradientAng, gradientAng, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cudaGradientMag, gradientMag, N * sizeof(float), cudaMemcpyHostToDevice);
kernel_thin<<<blocks, threadsPerBlock>>>(cudaPixels, cudaGradientAng, cudaGradientMag, width, height, N);
cudaDeviceSynchronize();
cudaMemcpy(pixelsAfterThin, cudaPixels, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(cudaPixels);
cudaFree(cudaGradientMag);
cudaFree(cudaGradientAng);
}
void doubleThreshold(float* pixelsAfterThin, int* pixelsStrongEdges, int* pixelsWeakEdges, int width, int height, float low_threshold, float high_threshold, int N, int blocks) {
float* cudaPixels;
int* cudaStrongEdges;
int* cudaWeakEdges;
cudaMalloc(&cudaPixels, N * sizeof(float));
cudaMalloc(&cudaStrongEdges, N * sizeof(int));
cudaMalloc(&cudaWeakEdges, N * sizeof(int));
cudaMemcpy(cudaPixels, pixelsAfterThin, N * sizeof(float), cudaMemcpyHostToDevice);
kernel_doubleThreshold<<<blocks, threadsPerBlock>>>(cudaPixels, cudaStrongEdges, cudaWeakEdges, width, height, low_threshold, high_threshold, N);
cudaDeviceSynchronize();
cudaMemcpy(pixelsStrongEdges, cudaStrongEdges, N * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(pixelsWeakEdges, cudaWeakEdges, N * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(cudaPixels);
cudaFree(cudaStrongEdges);
cudaFree(cudaWeakEdges);
}
__device__ __inline__ void push_back(int* stack, int* stack_pt, int val) {
stack[*stack_pt] = val;
*stack_pt ++;
}
__device__ __inline__ int empty(int* stack, int* stack_pt) {
if (*stack_pt == 0) {
return 1;
} else {
return 0;
}
}
__device__ __inline__ int pop_back(int* stack, int* stack_pt) {
int val = stack[*stack_pt - 1];
*stack_pt --;
return val;
}
__device__ __inline__ void dfsRange(int row, int col, int lorow, int hirow, int locol, int hicol, int* pixelsStrongEdges, int* pixelsWeakEdges, int* visited, int width, int height) {
int stack[DFS_BLOCK_SIZE * DFS_BLOCK_SIZE];
int stack_pt = 0;
int idx = row * width + col;
push_back(stack, &stack_pt, idx);
while (!empty(stack, &stack_pt)) {
idx = pop_back(stack, &stack_pt);
if (pixelsWeakEdges[idx]) {
pixelsStrongEdges[idx] = 1;
}
int id;
if (pixelsStrongEdges[idx]) {
if (row > lorow) {
id = (row - 1) * width + col;
if (!visited[id]) {
push_back(stack, &stack_pt, id);
visited[id] = 1;
}
if (col > locol) {
id = (row - 1) * width + col - 1;
if (!visited[id]){
push_back(stack, &stack_pt, id);
visited[id] = 1;
}
}
if (col < hicol - 1) {
id = (row - 1) * width + col + 1;
if (!visited[id]) {
push_back(stack, &stack_pt, id);
visited[id] = 1;
}
}
}
if (row < hirow - 1) {
id = (row + 1) * width + col;
if (!visited[id]) {
push_back(stack, &stack_pt, id);
visited[id] = 1;
}
if (col > locol) {
id = (row + 1) * width + col - 1;
if (!visited[id]){
push_back(stack, &stack_pt, id);
visited[id] = 1;
}
}
if (col < hicol - 1) {
id = (row + 1) * width + col + 1;
if (!visited[id]) {
push_back(stack, &stack_pt, id);
visited[id] = 1;
}
}
}
if (col > locol) {
id = row * width + col - 1;
if (!visited[id]) {
push_back(stack, &stack_pt, id);
visited[id] = 1;
}
}
if (col < hicol - 1) {
id = row * width + col + 1;
if (!visited[id]) {
push_back(stack, &stack_pt, id);
visited[id] = 1;
}
}
}
}
}
__global__ void kernel_dfs(int numDiv, int* pixelsStrongEdges, int* pixelsWeakEdges, int* visited, int width, int height) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numDiv * numDiv) return;
int colIndex = index % numDiv;
int rowIndex = index / numDiv;
int colStart = colIndex * width / numDiv;
int colEnd = (colIndex + 1) * width / numDiv;
int rowStart = rowIndex * width / numDiv;
int rowEnd = (rowIndex + 1) * width / numDiv;
for (int row = rowStart; row < rowEnd; row ++) {
for (int col = colStart; col < colEnd; col ++) {
if (pixelsStrongEdges[row * width + col] == 1)
dfsRange(row, col, 0, height, 0, width, pixelsStrongEdges, pixelsWeakEdges, visited, width, height);
}
}
}
__global__ void kernel_exchange(int numDiv, int* pixelsStrongEdges, int* pixelsWeakEdges, int* visited, int width, int height) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numDiv * numDiv) return;
int colIndex = index % numDiv;
int rowIndex = index / numDiv;
int colStart = colIndex * width / numDiv;
int colEnd = (colIndex + 1) * width / numDiv;
int rowStart = rowIndex * width / numDiv;
int rowEnd = (rowIndex + 1) * width / numDiv;
// Left
if (colStart > 0) {
for (int row = rowStart; row < rowEnd; row ++) {
if (pixelsStrongEdges[row * width + colStart] == 1) {
pixelsStrongEdges[row * width + colStart - 1] = 1;
}
}
}
// Right
if (colEnd < width) {
for (int row = rowStart; row < rowEnd; row ++) {
if (pixelsStrongEdges[row * width + colEnd - 1] == 1) {
pixelsStrongEdges[row * width + colEnd] = 1;
}
}
}
// Top
if (rowStart > 0) {
for (int col = colStart; col < colEnd; col ++) {
if (pixelsStrongEdges[rowStart * width + col] == 1) {
pixelsStrongEdges[(rowStart - 1) * width + col] = 1;
}
}
}
// Bottom
if (rowEnd < height) {
for (int col = colStart; col < colEnd; col ++) {
if (pixelsStrongEdges[(rowEnd - 1) * width + col] == 1) {
pixelsStrongEdges[rowEnd * width + col] = 1;
}
}
}
}
void edgeTrack(int* pixelsStrongEdges, int* pixelsWeakEdges, int width, int height) {
int* visited = (int*) calloc(sizeof(int), width * height);
int numDiv = min((height + DFS_BLOCK_SIZE - 1)/DFS_BLOCK_SIZE
, (width + DFS_BLOCK_SIZE - 1)/DFS_BLOCK_SIZE);
int blocks = (numDiv * numDiv + threadsPerBlock - 1) / threadsPerBlock;
for (int i = 0; i < NUM_ITER_DFS; i ++) {
kernel_exchange<<<blocks, threadsPerBlock>>>(numDiv, pixelsStrongEdges, pixelsWeakEdges, visited, width, height);
cudaDeviceSynchronize();
kernel_dfs<<<blocks, threadsPerBlock>>>(numDiv, pixelsStrongEdges, pixelsWeakEdges, visited, width, height);
cudaDeviceSynchronize();
}
}
float* split(string str, char delimiter, int numElts) {
float* elts = (float*) malloc(sizeof(float) * numElts);
stringstream ss(str);
string tok;
int i = 0;
while(getline(ss, tok, delimiter)) {
elts[i++] = stoff(tok.c_str());
}
return elts;
}
int main(int argc, char** argv) {
if (argc != 2) {
printf("usage: DisplayImage.out <Image_Path>\n");
return -1;
}
float low_threshold = 0.1;
float high_threshold = 0.15;
float* pixels;
int height;
int width;
string line;
ifstream myfile (argv[1]);
if (myfile.is_open()) {
getline(myfile, line);
height = stoff(line.c_str());
getline(myfile, line);
width = stoff(line.c_str());
pixels = (float*) malloc(sizeof(float) * height * width);
int idx = 0;
while (getline(myfile, line)) {
float* content = split(line, ' ', width);
memcpy(pixels+idx, content, sizeof(float) * width);
idx += width;
free(content);
}
myfile.close();
}
else {
printf("Unable to open file");
return -1;
}
int N = height * width;
int blocks = (N + threadsPerBlock - 1) / threadsPerBlock;
auto start = std::chrono::high_resolution_clock::now();
// /* 1. blur */
float* pixelsAfterBlur = (float*) malloc(sizeof(float)*height*width);
blur(pixels, pixelsAfterBlur, width, height, N, blocks);
auto ck2 = std::chrono::high_resolution_clock::now();
/* 2. gradient */
float* gradientMag = (float*) malloc(sizeof(float)*height*width);
int* gradientAng = (int*) malloc(sizeof(int)*height*width);
float maxMag = -1;
calculateGradient(pixelsAfterBlur, gradientMag, gradientAng, width, height, &maxMag, N, blocks);
auto ck3 = std::chrono::high_resolution_clock::now();
/* 3. non-maximum suppresion */
float* pixelsAfterThin = (float*) malloc(sizeof(float)*height*width);
thin(gradientMag, gradientAng, pixelsAfterThin, width, height, N, blocks);
auto ck4 = std::chrono::high_resolution_clock::now();
/* 4. double thresholding */
int* pixelsStrongEdges = (int*) calloc(sizeof(int), height*width);
int* pixelsWeakEdges = (int*) calloc(sizeof(int), height*width);
doubleThreshold(pixelsAfterThin, pixelsStrongEdges, pixelsWeakEdges, width, height, low_threshold * maxMag, high_threshold * maxMag, N, blocks);
auto ck5 = std::chrono::high_resolution_clock::now();
/* 5. edge tracking */
edgeTrack(pixelsStrongEdges, pixelsWeakEdges, width, height);
auto finish = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsed = std::chrono::duration_cast<std::chrono::duration<double>>(finish - start);
std::chrono::duration<double> blur_time = std::chrono::duration_cast<std::chrono::duration<double>>(ck2 - start);
std::chrono::duration<double> grad_time = std::chrono::duration_cast<std::chrono::duration<double>>(ck3 - ck2);
std::chrono::duration<double> sup_time = std::chrono::duration_cast<std::chrono::duration<double>>(ck4 - ck3);
std::chrono::duration<double> db_ts = std::chrono::duration_cast<std::chrono::duration<double>>(ck5 - ck4);
std::chrono::duration<double> ed_tk = std::chrono::duration_cast<std::chrono::duration<double>>(finish - ck5);
std::cout << "Total: " << elapsed.count() << " seconds.\n";
std::cout << "Blur: " << blur_time.count() << " seconds.\n";
std::cout << "Gradient: " << grad_time.count() << " seconds.\n";
std::cout << "Non-max sup: " << sup_time.count() << " seconds.\n";
std::cout << "Double thresholding: " << db_ts.count() << " seconds.\n";
std::cout << "Edge tracking: " << ed_tk.count() << " seconds.\n";
/* 6. display */
ofstream outfile ("result.txt");
if (outfile.is_open()) {
outfile << height << "\n";
outfile << width << "\n";
int idx = 0;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
outfile << pixelsStrongEdges[idx++] * 255 << " ";
}
outfile << "\n";
}
outfile.close();
}
return 0;
} |
12,342 | #include "includes.h"
__global__ void UpdateExtNeuron(float *port_input_pt, float *port_value_pt, int n_node, int n_var, int n_port_var, int n_port)
{
int i_thread = threadIdx.x + blockIdx.x * blockDim.x;
if (i_thread<n_node*n_port) {
int i_port = i_thread%n_port;
int i_node = i_thread/n_port;
float *pip = port_input_pt + i_node*n_var + n_port_var*i_port;
//printf("port %d node %d pip %f\n", i_port, i_node, *pip);
port_value_pt[i_node*n_var + n_port_var*i_port]
= *pip;
*pip = 0.0;
}
} |
12,343 | #include <cuda_runtime.h>
/*
status:☹️
sequence:NCHW
*/
struct arg
{
int image_height;
int image_width;
int channel;
int stride;
int filter_height;
int filter_width;
};
__global__ void conv_2d(float *image,float *filter,float *out,arg *args)
{
int x=blockIdx.x*blockDim.x+threadIdx.x;
int y=blockIdx.y*blockDim.y+threadIdx.y;
int out_height=args->image_height-args->filter_height;
int out_width=args->image_width-args->filter_width;
float local=0.0;
for (int i=0;i<args->filter_height;i++)
{
for (int j=0;j<args->filter_width;i++)
local+=filter[i*args->filter_width+j]*image[(i+x)*args->image_width+(j+y)];
}
} |
12,344 | #include <stdio.h>
#include <sys/time.h>
#define CHECK_CUDA(func) { \
const cudaError_t err = (func); \
if (err != cudaSuccess) { \
fprintf(stderr, "CUDA Error @ %s:%d - %s\n", __FILE__, __LINE__, \
cudaGetErrorString(err)); \
exit(1); \
} \
}
static double seconds() {
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp, &tzp);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
}
__global__ void kernel(float *nxt, float *curr, const int N) {
unsigned tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
tid++;
nxt[tid] = (curr[tid - 1] + curr[tid + 1]) / 2.0;
}
}
int main(int argc, char **argv) {
if (argc != 3) {
fprintf(stderr, "usage: %s <N> <niters>\n", argv[0]);
return 1;
}
const int N = atoi(argv[1]);
const int niters = atoi(argv[2]);
float *seed = (float *)malloc((N + 2) * sizeof(*seed));
memset(seed, 0x00, (N + 2) * sizeof(*seed));
seed[N + 1] = 1.0;
float *d_curr, *d_nxt;
CHECK_CUDA(cudaMalloc((void **)&d_curr, (N + 2) * sizeof(*d_curr)));
CHECK_CUDA(cudaMalloc((void **)&d_nxt, (N + 2) * sizeof(*d_nxt)));
CHECK_CUDA(cudaMemcpy(d_curr, seed, (N + 2) * sizeof(*d_curr),
cudaMemcpyHostToDevice));
double start_time = seconds();
for (int iter = 0; iter < niters; iter++) {
const int threads_per_block = 256;
const int blocks_per_grid = (N + threads_per_block - 1) /
threads_per_block;
kernel<<<blocks_per_grid, threads_per_block>>>(d_nxt, d_curr, N);
float *tmp = d_nxt;
d_nxt = d_curr;
d_curr = tmp;
}
CHECK_CUDA(cudaMemcpy(seed, d_curr, (N + 2) * sizeof(*seed),
cudaMemcpyDeviceToHost));
double elapsed_time = seconds() - start_time;
printf("Elapsed time for N=%d, # iters=%d is %f s\n", N, niters, elapsed_time);
printf("%f iters / s\n", (float)niters / elapsed_time);
return 0;
}
|
12,345 | #include <stdio.h>
//This function will return the threadID from 0..31
//This ID is unique within the warp.
//Different from CUDA's ID which is unique within the block
__device__ int gemtcThreadID()
{
return threadIdx.x % 32;
}
//This function will return the warpID of the calling thread
//This number will be unique within the block
__device__ int gemtcWarpID()
{
return threadIdx.x / 32;
}
//SHARED MEMORY MANAGEMENT
__device__ void *SHARED_MEMORY;
__device__ int SHARED_SIZE;
__device__ int WARPS_PER_SM;
__device__ void gemtcInitSharedMemory(void *shared_mem, int mem_size, int warps)
{
//This needs to initialize the three variables above
//We need to statically have a large block on Shared Memory
// allocated when the program launches to divide
SHARED_MEMORY = shared_mem;
SHARED_SIZE = mem_size/warps;
WARPS_PER_SM = warps;
}
__device__ void *gemtcSharedMemory()
{
return ((char *)SHARED_MEMORY) + SHARED_SIZE*gemtcWarpID()/WARPS_PER_SM;
}
|
12,346 | #include <queue>;
#include <unordered_map>
#include <iostream>;
#include <string>;
#include <limits>;
#include <cstdio>
#include <ctime>
#include <vector>;
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
// Dummy kernel
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
struct SimpleGrid {
int ** grid;
int width, height;
SimpleGrid(int ** grid, int width, int height) {
this->grid = grid;
this->width = width;
this->height = height;
}
int GetCost(std::pair<int, int> from, std::pair<int, int> to) {
return 1;
}
int * operator [](int i) const { return this->grid[i]; }
};
struct pair_hash {
std::size_t operator () (const std::pair<int, int> &p) const {
size_t h = (p.first * (std::numeric_limits<int>::max() + 1) + p.second);
return h;
}
};
std::unordered_map<std::pair<int, int>, std::pair<int, int>, pair_hash>
astar(SimpleGrid grid, std::pair<int, int> start, std::pair<int, int> goal) {
// Item to be stored in PQ
typedef std::pair<int, std::pair<int, int>> PQElement;
// PQ definition
std::priority_queue<PQElement, std::vector<PQElement>, std::less<PQElement>> frontier;
// Insert starting position
frontier.emplace(0, start);
// Utility memory lists
std::unordered_map<std::pair<int, int>, int, pair_hash> costSoFar;
std::unordered_map<std::pair<int, int>, std::pair<int, int>, pair_hash> cameFrom;
cameFrom[start] = start;
costSoFar[start] = 0;
// Begin searching until the goal is reached or every possible value
// evaluated with a failure.
while (!frontier.empty()) {
// Get a node to expand it.
int x, y;
std::tie(x, y) = frontier.top().second;
frontier.pop();
// If goal terminate.
if (x == goal.first && y == goal.second) break;
#ifdef _DEBUG
std::cout << "Visiting " << x << " " << y << std::endl;
#endif // DEBUG
// Add neighbors to priority queue if they are passable.
for (int i = -1; i < 2; i++) {
for (int j = -1; j < 2; j++) {
// Ignore itself.
if ((i == 0) && (j == 0)) { continue; }
// Check if it is passable and a valid point in the grid.
if (((y + i >= 0) && (y + i < grid.height))
&& ((x + j) >= 0) && (x + j < grid.width)
&& grid[y + i][x + j] != 0) {
std::pair<int, int> next = std::make_pair(x + j, y + i);
int newCost = costSoFar[std::make_pair(x, y)] + 1;
// If it the position is explored or current is cheaper than existing.
if ((costSoFar.find(next) == costSoFar.end()) || (newCost < costSoFar[next])) {
int priority = newCost + (std::abs(x - next.first) + std::abs(y - next.second));
frontier.emplace(priority, next);
cameFrom[next] = std::make_pair(x, y);
costSoFar[next] = newCost;
}
}
}
}
}
// Free memory.
std::priority_queue<PQElement, std::vector<PQElement>, std::less<PQElement>>().swap(frontier);
std::unordered_map<std::pair<int, int>, int, pair_hash>().swap(costSoFar);
return cameFrom;
}
void drawGrid(SimpleGrid grid, std::pair<int, int> goal,
std::unordered_map<std::pair<int, int>, std::pair<int, int>, pair_hash> parents) {
std::pair<int, int> current = goal;
do {
if (grid[current.second][current.first] != 0) {
grid[current.second][current.first] = 6;
}
else {
grid[current.second][current.first] = 99;
}
if (current == parents[current]) break;
current = parents[current];
} while (parents.find(goal) != parents.end());
for (int i = 0; i < grid.height; i++) {
for (int j = 0; j < grid.width; j++) {
std::cout << grid[i][j] << " ";
}
std::cout << std::endl;
}
}
int main() {
int constantGrid[][10] = {
{ 1, 0, 1, 1, 1, 1, 1, 1, 1, 1 },
{ 1, 0, 1, 1, 1, 1, 1, 1, 1, 1 },
{ 1, 0, 1, 1, 1, 1, 1, 1, 1, 1 },
{ 1, 0, 1, 0, 1, 0, 0, 1, 1, 1 },
{ 1, 0, 1, 0, 1, 0, 0, 1, 1, 1 },
{ 1, 1, 1, 0, 1, 0, 0, 1, 1, 1 },
{ 1, 1, 1, 0, 1, 1, 1, 1, 1, 1 },
{ 1, 1, 1, 0, 1, 1, 0, 0, 1, 1 },
{ 1, 1, 1, 0, 1, 1, 0, 0, 1, 1 },
{ 1, 1, 1, 0, 1, 1, 1, 1, 1, 1 }
};
int ** grid = new int*[10];
for (int i = 0; i < 10; i++) {
grid[i] = new int[10];
for (int j = 0; j < 10; j++) {
grid[i][j] = constantGrid[i][j];
}
}
SimpleGrid simpleGrid = SimpleGrid(grid, 10, 10);
std::clock_t start;
double duration;
start = std::clock();
std::unordered_map<std::pair<int, int>, std::pair<int, int>, pair_hash> parents;
parents = astar(simpleGrid, std::make_pair(0, 0), std::make_pair(9, 9));
duration = (std::clock() - start) / (double)CLOCKS_PER_SEC;
std::cout << "Duration: " << duration << " seconds\n";
drawGrid(simpleGrid, std::make_pair(9, 9), parents);
std::unordered_map<std::pair<int, int>, std::pair<int, int>, pair_hash>().swap(parents);
return 0;
} |
12,347 | #include "includes.h"
__global__ void iota(int const size, int *data, int const value)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < size)
data[idx] = idx + value;
} |
12,348 | /*
* std_page.cu
*
* examine paging with kernel launch
*/
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#define NL printf("\n")
// Standard Page based on a memory allocation of approx 1.6GiB
#define StdPageX 1024
#define StdPageY 200000
#define Elements StdPageX*StdPageY
//----------------------------------------------------------------------
__global__ void set_squares(long *d_squares, long n_squares) {
long i = threadIdx.x + (blockIdx.x * blockDim.x);
if(i < n_squares) d_squares[i] = (i+1)*(i+1);
}
//----------------------------------------------------------------------
__global__ void func_g(long *managed_sums, long N, long* d_squares, long nSquares, int pageidx) {
long i = threadIdx.x + (blockDim.x * blockIdx.x);
if((i == 0)||(i > N)) {
return;
}else if(i < 4) {
managed_sums[i] = 1;
return;
} else {
// search for largest square which divides i
for(int d = nSquares-1; d >= 0; --d) {
if((i % d_squares[d]) == 0) {
managed_sums[i] = d_squares[d];
return;
} // if...
} //for d...
} // else...
}
//----------------------------------------------------------------------
int main(int argc, char **argv) {
const long MaxN = 1e14 + 1;
cudaError_t error_id;
long *d_squares = NULL;
// extract target N
long x = 0;
if(argc == 2) {
x = atol(argv[1]);
} else {
printf("usage: stdp N\n");
exit(1);
}
const long N = x;
if(N > MaxN) {
printf("target: %ld exceeds program limitations %ld\n", N, MaxN);
exit(2);
}
// determine the standard page count nPages for N
const int nPages = (N / (StdPageX * StdPageY)) + 1;
// determine array dimensions for squares
const long nSquares = (long)(sqrt(N+1)); // defines size of array
printf("target: %ld nSquares: %ld\n", N, nSquares);
printf("nPages: %d\n", nPages);
// Allocate space on device for squares array
error_id = cudaMalloc(&d_squares, sizeof(long)*nSquares);
if(error_id != cudaSuccess) {
printf("cudaMalloc squares failed with %d\n", error_id);
exit(1);
}
// launch the generator on kernel
printf("Generating squares array...");
cudaGetLastError(); // set cuda success to 1
set_squares<<< ((nSquares/1024)+1), 1024 >>>(d_squares, nSquares);
error_id = cudaPeekAtLastError();
if(error_id != cudaSuccess) {
printf("set_squares failed with %s\n", cudaGetErrorString(error_id));
exit(1);
}
printf("done.\n");
// Allocate managed memory for standard page
cudaDeviceSynchronize();
long* managed_sums = NULL;
error_id = cudaMallocManaged(&managed_sums, sizeof(long)*Elements);
if(error_id != cudaSuccess) {
printf("cudaMallocManaged sums failed with %d\n", error_id);
exit(1);
}
printf("Allocated Managed memory: %d blocks of %d threads.\n", StdPageY, StdPageX);
// Now dp nPages of kernel launches
for(int pageidx = 0; pageidx < nPages; ++pageidx) {
printf("page:%d\n", pageidx);
// launch a kernel using calculated configuration
func_g<<<StdPageY, StdPageX>>>(managed_sums, N, d_squares, nSquares, pageidx);
cudaDeviceSynchronize();
}
// Clean up code
NL;
cudaFree(managed_sums);
cudaFree(d_squares);
}
|
12,349 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
using namespace std;
__global__ void multKernel(int *a, int *b, int *c, int n)
{
//Calculating rows and columns for a particular thread
int row = (blockIdx.x * blockDim.y) + threadIdx.x; //Calculating row
int col = (blockIdx.x * blockDim.x) + threadIdx.x; //Calculating column
int sum = 0;
//Checking boundary condition
if ((row < n) && (col < n)) {
for (int k = 0; k < n; k++) {
sum += a[row * n + k] * b[k * n + col];
}
c[row * n + col] = sum;
}
}
int main()
{
//Matrix of size 1024 x 1024
//int n = 1 << 10;
constexpr int n = 1 << 10;
//Size (in bytes) of mathrix
//size_t bytes = n * n * sizeof(int);
constexpr size_t bytes = n * n * sizeof(int);
//CPU pointers
int* a, * b, * c;
// Allocating memory for these host pointers
cudaMallocManaged(&a, bytes);
cudaMallocManaged(&b, bytes);
cudaMallocManaged(&c, bytes);
/*
//Allocate CPU memory
a = (int*)malloc(bytes);
b = (int*)malloc(bytes);
c = (int*)malloc(bytes);*/
// device ID (of GPU) for prefetching
int id = cudaGetDevice(&id);
/*
//Device pointer
int* da, * db, * dc;
//Allocate device memory
cudaMalloc(&da, bytes);
cudaMalloc(&db, bytes);
cudaMalloc(&dc, bytes);
*/
//Generating random matrix
for(int i =0; i < n; i++)
for (int j = 0; j < n; j++) {
a[i * n + j] = rand() % 100;
b[i * n + j] = rand() % 100;
}
/*
//Copy data to device from host
cudaMemcpy(da, a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(db, b, bytes, cudaMemcpyHostToDevice);
*/
//Thread per blocks
int block_size = 16; //Since we are using 2d array, therefore 16 * 16 = 256 threads per block
//Blocks in each dimensions
int grid_size = (int)ceil(n / block_size); // Dividing total elements by number of threads to get blocks for each element
dim3 grid(grid_size, grid_size); //Dimension of grid
dim3 threads(block_size, block_size); //Dimension of block
// prefetching 'a', 'b' and 'c'
cudaMemPrefetchAsync(a, bytes, id);
cudaMemPrefetchAsync(b, bytes, id);
cudaMemPrefetchAsync(c, bytes, id);
//Launching kernel
multKernel <<< grid, threads >>> (a, b, c, n);
//Synchronization needed because to make sure kernel is done
cudaDeviceSynchronize();
/*
//Copy back to host
cudaMemcpy(c, dc, bytes, cudaMemcpyDeviceToHost);
*/
//Prefetching back to CPU because we know that kernel is completed
//Needed when we are comparing the matrix between GPU and CPU
cudaMemPrefetchAsync(c, bytes, cudaCpuDeviceId); //cudaCpuDeviceId we don't need to calculate, the system automatically knows it
cout << "Done successfully" << endl;
//Free up the memory
cudaFree(a);
cudaFree(b);
cudaFree(c);
return 0;
}
|
12,350 | //--blockDim=8 --gridDim=1
__global__ void a() {
unsigned tid = threadIdx.x;
if ((tid % 2) == 0)
__syncthreads();
else
__syncthreads();
}
|
12,351 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_profiler_api.h>
__global__
void vecAddKernel(int * device)
{
int i = threadIdx.x + (blockDim.x * blockIdx.x);
device[i] = i;
}
void vecAdd()
{
int inhost[100];
int j;
int * device;
for( j = 0; j < 100; j++)
inhost[j] = 0;
for( j = 0; j < 100; j++)
printf("inhost[%d] = %d\n", j, inhost[j]);
printf("---------\n");
cudaMalloc(&device, 100*sizeof(int));
// kernel invocation
vecAddKernel<<<10,10>>>(device);
//transfer C_d from device to host
cudaMemcpy(inhost, device, 400, cudaMemcpyDeviceToHost);
cudaFree(device);
for( j = 0; j < 100; j++)
printf("inhost[%d] = %d\n", j, inhost[j]);
}
int main()
{
vecAdd();
return 0;
}
|
12,352 | //pass
//--gridDim=128 --blockDim=256
__global__ void modulateKernel(float *d_A, float *d_B, int N)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int numThreads = blockDim.x * gridDim.x;
float rcpN = 1.0f / (float)N;
for (int pos = tid; pos < N; pos += numThreads)
{
d_A[pos] *= d_B[pos] * rcpN;
}
}
|
12,353 | #include<stdio.h>
__global__ void func(void){
printf("Hello world del bloque %d del thread %d!\n", blockIdx.x, threadIdx.x);
}
int main(void){
func<<<3,3>>>(); //3 bloques de 3 threads cada uno
cudaDeviceSynchronize();
printf("Hola del cpu thread\n");
return 0;
}
|
12,354 | #include <stdio.h>
#include <iostream>
int main( void ) {
cudaDeviceProp prop;
FILE *deviceInfoOutput;
deviceInfoOutput = fopen("deviceInfoOutput.txt", "a");
int count;
cudaGetDeviceCount( &count );
printf("Devices information\n");
fprintf(deviceInfoOutput, "Devices information\n");
printf("Total Cuda devices: %d\n", count);
fprintf(deviceInfoOutput, "Total Cuda devices: %d\n", count);
for (int i=0; i< count; i++) {
cudaGetDeviceProperties( &prop, i );
//Do something with our device's properties
printf("----General information for device %d----\n",i);
fprintf(deviceInfoOutput, "----General information for device %d----\n", i);
printf("Name: %s\n", prop.name);
fprintf(deviceInfoOutput, "Name: %s\n", prop.name);
printf("Compute Capability: %d.%d\n", prop.major, prop.minor);
fprintf(deviceInfoOutput, "Compute Capability: %d.%d\n", prop.major, prop.minor);
printf("Clock Rate: %d\n", prop.clockRate);
fprintf(deviceInfoOutput, "Clock Rate: %d\n", prop.clockRate);
printf("Device Copy Overlap: ");
fprintf(deviceInfoOutput, "Device Copy Overlap: ");
if(prop.deviceOverlap){
printf("Enabled\n");
fprintf(deviceInfoOutput, "Enabled\n");
}
else{
printf("Disabled\n");
fprintf(deviceInfoOutput, "Disabled\n");
}
printf("Kernel execution timeout: ");
fprintf(deviceInfoOutput, "Kernel execution timeout: ");
if (prop.kernelExecTimeoutEnabled){
printf( "Enabled\n" );
fprintf(deviceInfoOutput, "Enabled\n");
}
else{
printf( "Disabled\n" );
fprintf(deviceInfoOutput, "Disabled\n");
}
printf( "--- Memory Information for device %d ---\n", i );
fprintf(deviceInfoOutput, "--- Memory Information for device %d ---\n", i );
printf(" Total global mem: %ld\n", prop.totalGlobalMem);
fprintf(deviceInfoOutput, " Total global mem: %ld\n", prop.totalGlobalMem);
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
fprintf(deviceInfoOutput, "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
fprintf(deviceInfoOutput, "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
fprintf(deviceInfoOutput, "Texture Alignment: %ld\n", prop.textureAlignment );
printf( "--- MP Information for device %d ---\n", i );
fprintf(deviceInfoOutput, "--- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n", prop.multiProcessorCount );
fprintf(deviceInfoOutput, "Multiprocessor count: %d\n", prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
fprintf(deviceInfoOutput, "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
fprintf(deviceInfoOutput, "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
fprintf(deviceInfoOutput, "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock );
fprintf(deviceInfoOutput, "Max threads per block: %d\n", prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",prop.maxThreadsDim[0], prop.maxThreadsDim[1],prop.maxThreadsDim[2] );
fprintf(deviceInfoOutput, "Max thread dimensions: (%d, %d, %d)\n",prop.maxThreadsDim[0], prop.maxThreadsDim[1],prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1],prop.maxGridSize[2] );
fprintf(deviceInfoOutput, "Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1],prop.maxGridSize[2]);
printf( "\n" );
fprintf(deviceInfoOutput, "\n" );
}
fclose(deviceInfoOutput);
}
|
12,355 | #include "includes.h"
//#define DEPTH 2
// dp - cost aggregation array
// cost_image - m x n x D array
// d - use every d channels of input to conserve register memory
// m - image rows
// n - image columns
// D - depth
// depth_stride - pitch along depth dimension
// row_stride - pitch along row dimension
__device__ float dp_criteria(float *dp, int ind, int depth_dim_size, int d, float P_one, float P_two, float * d_zero, float * d_one, float * d_two, float * d_three){
*d_zero = dp[ind];
if (d > 0)
*d_one = dp[ind - depth_dim_size] + P_one;
else
*d_one = 10000000;
if (d < D-1)
*d_two = dp[ind + depth_dim_size] + P_one;
else
*d_two = 10000000;
return fminf(fminf(*d_zero, *d_one), fminf(*d_two, *d_three)) - *d_three + P_two;
}
__global__ void __vertical_aggregate_down(float *dp, float *cost_image, int m, int n)
{
// which column of array to work on
int col = blockDim.x * blockIdx.x + threadIdx.x;
int depth_dim_size = m*n;
// todo: maybe it will work better to take running average of every d
// slices
while(col < n)
{
for (int row = 1; row < m; row++)
{
//int arr_ind = 0;
float prev_min = 100000000.0;
int ind = (row - 1) * n + col;
// calculate min cost disparity for this column from row-1
//#pragma unroll
for (int depth = 0; depth < D; depth+=D_STEP){
prev_min = fminf(dp[ind], prev_min);
ind += (depth_dim_size * D_STEP);
//arr[arr_ind] = cost_image[depth * m * n + (row - 1) * n + col];
//arr_ind++;
}
// float prev_min = arr_min(arr, D_SIZE);
float d0 = 0;
float d1 = 0;
float d2 = 0;
float d3 = prev_min + (float) P2;
ind = (row - 1) * n + col;
int current_ind = row * n + col;
// todo: try having this loop go from 1 to d-1 and removing the if else
for (int d = 0; d < D; d+=D_STEP){
// for each d I need dp[{d-1, d, d+1}, row-1, col],
dp[current_ind] += cost_image[current_ind] + dp_criteria(dp, ind, depth_dim_size, d, (float) P1, (float) P2, &d0, &d1, &d2, &d3);
ind += (depth_dim_size * D_STEP);
current_ind += (depth_dim_size * D_STEP);
}
}
col += blockDim.x;
}
} |
12,356 | #include <cstdlib>
#include <iostream>
#include <cuda.h>
#include <stdio.h>
__global__ void gInitializeStorage(float* storage_d){
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int N = blockDim.x * gridDim.x;
storage_d[i + j * N] = (float)(i + j * N);
}
__global__ void gTranspose0(float* storage_d, float* storage_d_t){
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int N = blockDim.x * gridDim.x;
storage_d_t[j + i * N] = storage_d[i + j * N];
}
__global__ void gTranspose11(float* storage_d,float* storage_d_t){
extern __shared__ float buffer[];
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int N = blockDim.x * gridDim.x;
buffer[threadIdx.y + threadIdx.x * blockDim.y] = storage_d[i + j * N];
__syncthreads();
i = threadIdx.x + blockIdx.y * blockDim.x;
j = threadIdx.y + blockIdx.x * blockDim.y;
storage_d_t[i + j * N] = buffer[threadIdx.x + threadIdx.y * blockDim.x];
}
#define SH_DIM 32
__global__ void gTranspose12(float* storage_d,float* storage_d_t){
__shared__ float buffer_s[SH_DIM][SH_DIM];
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int N = blockDim.x * gridDim.x;
buffer_s[threadIdx.y][threadIdx.x] = storage_d[i + j * N];
__syncthreads();
i = threadIdx.x + blockIdx.y * blockDim.x;
j = threadIdx.y + blockIdx.x * blockDim.y;
storage_d_t[i + j * N] = buffer_s[threadIdx.x][threadIdx.y];
}
__global__ void gTranspose2(float* storage_d,float* storage_d_t){
__shared__ float buffer[SH_DIM][SH_DIM + 1];
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int N = blockDim.x * gridDim.x;
buffer[threadIdx.y][threadIdx.x] = storage_d[i + j * N];
__syncthreads();
i = threadIdx.x + blockIdx.y * blockDim.x;
j = threadIdx.y + blockIdx.x * blockDim.y;
storage_d_t[i + j * N] = buffer[threadIdx.x][threadIdx.y];
}
void Output(float* a, int N){
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++)
fprintf(stdout, "%g\t", a[j + i * N]);
fprintf(stdout, "\n");
}
fprintf(stdout,"\n\n\n");
}
int main(int argc, char* argv[]){
if(argc<3){
fprintf(stderr, "USAGE: matrix <dimension of matrix> <dimension_of_threads>\n");
return -1;
}
int N = atoi(argv[1]);
int dim_of_threads = atoi(argv[2]);
if(N % dim_of_threads){
fprintf(stderr, "change dimensions\n");
return -1;
}
int dim_of_blocks = N / dim_of_threads;
const int max_size = 1 << 8;
if(dim_of_blocks > max_size){
fprintf(stderr, "too many blocks\n");
return -1;
}
float *storage_d, *storage_d_t, *storage_h;
cudaMalloc((void**)&storage_d, N * N * sizeof(float));
cudaMalloc((void**)&storage_d_t, N * N * sizeof(float));
storage_h = (float*)calloc(N * N, sizeof(float));
gInitializeStorage <<< dim3(dim_of_blocks, dim_of_blocks), dim3(dim_of_threads, dim_of_threads) >>> (storage_d);
cudaDeviceSynchronize();
memset(storage_h, 0.0, N * N * sizeof(float));
cudaMemcpy(storage_h, storage_d, N * N * sizeof(float), cudaMemcpyDeviceToHost);
Output(storage_h, N);
gTranspose0 <<< dim3(dim_of_blocks, dim_of_blocks), dim3(dim_of_threads, dim_of_threads) >>> (storage_d, storage_d_t);
cudaDeviceSynchronize();
memset(storage_h, 0.0, N * N * sizeof(float));
cudaMemcpy(storage_h, storage_d_t, N * N * sizeof(float), cudaMemcpyDeviceToHost);
Output(storage_h, N);
gTranspose11 <<< dim3(dim_of_blocks, dim_of_blocks), dim3(dim_of_threads, dim_of_threads), dim_of_threads * dim_of_threads * sizeof(float) >>> (storage_d, storage_d_t);
cudaDeviceSynchronize();
memset(storage_h, 0.0, N * N * sizeof(float));
cudaMemcpy(storage_h, storage_d_t, N * N * sizeof(float), cudaMemcpyDeviceToHost);
Output(storage_h, N);
gTranspose12 <<< dim3(dim_of_blocks, dim_of_blocks), dim3(dim_of_threads, dim_of_threads) >>> (storage_d, storage_d_t);
cudaDeviceSynchronize();
memset(storage_h, 0.0 , N * N * sizeof(float));
cudaMemcpy(storage_h, storage_d_t, N * N * sizeof(float), cudaMemcpyDeviceToHost);
Output(storage_h, N);
gTranspose2 <<< dim3(dim_of_blocks, dim_of_blocks), dim3(dim_of_threads, dim_of_threads) >>> (storage_d, storage_d_t);
cudaDeviceSynchronize();
memset(storage_h, 0.0, N * N * sizeof(float));
cudaMemcpy(storage_h, storage_d_t, N * N * sizeof(float), cudaMemcpyDeviceToHost);
Output(storage_h, N);
cudaFree(storage_d);
cudaFree(storage_d_t);
free(storage_h);
return 0;
}
|
12,357 |
extern "C"
//must be same as threads!!!
//Block_Size = blockDim.x
#define Block_Size 64
#define m 0.001/2000
#define c 10
#define rho0 1
#define p0 1
#define gamma 7
#define PI 3.14159265359f
//__device__ float calc_p(float rho){
// return c*c*rho0*(powf(rho/rho0,gamma) -1)/gamma+p0;
//}
__global__ void ker_dv(float *out, const float *x, const float *v, const float *rho, const int *ind, const float h)
{
//int IND = gridDim.z * gridDim.y * blockIdx.x + gridDim.z * blockIdx.y + blockIdx.z
int istart = ind[2 * gridDim.z * gridDim.y * blockIdx.x + 2 * gridDim.z * blockIdx.y + 2 * blockIdx.z + 0];
int iend = ind[2 * gridDim.z * gridDim.y * blockIdx.x + 2 * gridDim.z * blockIdx.y + 2 * blockIdx.z + 1];
for (int i = istart; i < iend; i += Block_Size)
{
int id = i + threadIdx.x;
float xi[3];
float vi[3];
float rhoi;
if (id < iend)
{
xi[0] = x[3 * id + 0];
xi[1] = x[3 * id + 1];
xi[2] = x[3 * id + 2];
vi[0] = v[3 * id + 0];
vi[1] = v[3 * id + 1];
vi[2] = v[3 * id + 2];
rhoi = rho[id];
}
float dx[3];
float dv[3];
float r2;
float r;
float dW;
//float pi = calc_p(rhoi);
float pi = c*c*rho0*(powf(rhoi/rho0,gamma) -1)/gamma+p0;
float pj;
float dV[3] = {0,0,0};
__shared__ float xj[Block_Size * 3];
__shared__ float vj[Block_Size * 3];
__shared__ float rhoj[Block_Size];
for (int a = -1; a < 2; a++)
{
for (int b = -1; b < 2; b++)
{
if ((int)blockIdx.x + a < 0 || (int)blockIdx.x + a >= (int)gridDim.x || (int)blockIdx.y + b < 0 || (int)blockIdx.y + b >= (int)gridDim.y)
{
continue;
}
int Zstart = max((int)blockIdx.z - 1, 0);
int Zend = min((int)blockIdx.z + 1, (int)gridDim.z - 1);
int jstart = ind[2 * gridDim.z * gridDim.y * (blockIdx.x+a) + 2 * gridDim.z * (blockIdx.y+b) + 2 * Zstart + 0];
int jend = ind[2 * gridDim.z * gridDim.y * (blockIdx.x+a) + 2 * gridDim.z * (blockIdx.y+b) + 2 * Zend + 1];
for (int j = jstart; j < jend; j += Block_Size)
{
int jd = j + threadIdx.x;
if (jd < jend)
{
xj[3 * threadIdx.x + 0] = x[3 * jd + 0];
xj[3 * threadIdx.x + 1] = x[3 * jd + 1];
xj[3 * threadIdx.x + 2] = x[3 * jd + 2];
vj[3 * threadIdx.x + 0] = v[3 * jd + 0];
vj[3 * threadIdx.x + 1] = v[3 * jd + 1];
vj[3 * threadIdx.x + 2] = v[3 * jd + 2];
rhoj[threadIdx.x] = rho[jd];
}
__syncthreads();
if (id < iend)
{
for (int k = 0; k < Block_Size; k++)
{
if (j + k < jend)
{
dx[0] = xj[3 * k + 0] - xi[0];
dx[1] = xj[3 * k + 1] - xi[1];
dx[2] = xj[3 * k + 2] - xi[2];
dv[0] = vj[3 * k + 0] - vi[0];
dv[1] = vj[3 * k + 1] - vi[1];
dv[2] = vj[3 * k + 2] - vi[2];
r2 = (dx[0] * dx[0] + dx[1] * dx[1] + dx[2] * dx[2]) / (h * h);
if (r2 < 1.0)
{
r = sqrtf(r2+0.001*h*h);
dW = (1.0 - r);
dW *= dW*dW; //(1-r)^3
dW = -dW*r*20;
dW *= 21.0 / (2.0 * PI * h * h * h * h);
//pj = calc_p(rhoj[k]);
pj = c*c*rho0*(powf(rhoj[k]/rho0,gamma) -1)/gamma+p0;
float d = ( pi/(rhoi*rhoi) + pj/(rhoj[k]*rhoj[k]) );
d -= 2*(dv[0]*dx[0]+dv[1]*dx[1]+dv[2]*dx[2])/((r2+0.001*h*h)*rhoj[k]*rhoi);
d *= m*dW/r;
dV[0] += d*dx[0];
dV[1] += d*dx[1];
dV[2] += d*dx[2];
}
}
}
}
__syncthreads();
}
//ivol = 2 * gridDim.z * gridDim.y * blockIdx.x + 2 * gridDim.z * blockIdx.y + 2 * Zend;
}
}
if (id < iend)
{
out[3*id+0] = dV[0] + 10*c*c*fmaxf(0.0,-xi[0])/h - 10*c*c*fmaxf(0.0,xi[0]-2)/h;
out[3*id+1] = dV[1] + 10*c*c*fmaxf(0.0,-xi[1])/h - 10*c*c*fmaxf(0.0,xi[1]-1)/h;
out[3*id+2] = dV[2] + 10*c*c*fmaxf(0.0,-xi[2])/h - 1;
}
}
}
|
12,358 | /**********************************************************************
* DESCRIPTION:
* Serial Concurrent Wave Equation - C Version
* This program implements the concurrent wave equation
*********************************************************************/
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <ctime>
#include <iostream>
#include <iomanip>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
#define PI 3.14159265
#define THREADPERWARP 32
#define SMNUM 80
void check_param(void);
void init_line(void);
void update (void);
void printfinal (void);
int nsteps, /* number of time steps */
tpoints, /* total points along string */
rcode; /* generic return code */
float values[MAXPOINTS]; /* values at time t */
float *cudaValues;
int cudaArraySize;
/**********************************************************************
* Checks input values from parameters
*********************************************************************/
void check_param(void)
{
char tchar[20];
/* check number of points, number of iterations */
while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) {
printf("Enter number of points along vibrating string [%d-%d]: "
,MINPOINTS, MAXPOINTS);
scanf("%s", tchar);
tpoints = atoi(tchar);
if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS))
printf("Invalid. Please enter value between %d and %d\n",
MINPOINTS, MAXPOINTS);
}
while ((nsteps < 1) || (nsteps > MAXSTEPS)) {
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if ((nsteps < 1) || (nsteps > MAXSTEPS))
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
/**********************************************************************
* Initialize points on line
*********************************************************************/
void init_line(void)
{
int i, j;
float x, tmp;
/* Calculate initial values based on sine curve */
//float fac = 2.0 * PI;
float fac = 6.2831853;
//k = 0.0;
tmp = tpoints - 1;
for (j = 0; j < tpoints; ++j) {
x = static_cast<float>(j)/tmp;
values[j] = sin (fac * x);
}
/* Initialize old values array */
}
/**********************************************************************
* Print final results
*********************************************************************/
void printfinal()
{
int i;
printf("0.0000 ");
for (i = 1; i < tpoints; ++i) {
printf("%6.4f ", values[i]);
if (i%10 == 9)
printf("\n");
}
}
__global__ void cudaExecute(float* cudaAns,int howMany,int tpoints,int tIteration)
{
float cudaValues,cudaOld,cudaNew;
double cudaTwiceValue;
double valuePar = 2.0 - static_cast<float>(0.09) * 2.0;
for(int block=0;block<howMany;++block)
{
int ansIndex=block*(SMNUM*THREADPERWARP) + blockIdx.x*blockDim.x + threadIdx.x;
cudaValues = cudaAns[ansIndex];
cudaOld = cudaValues;
for(int iter=0;iter<tIteration;++iter)
{
cudaTwiceValue = valuePar * cudaValues;
cudaNew = (cudaTwiceValue) - cudaOld;
//cudaNew = (2.0 * cudaValues)
// - cudaOld
// + (static_cast<float>(0.09) * (-2.0)*cudaValues);
cudaOld=cudaValues;
cudaValues=cudaNew;
}
cudaAns[ansIndex]=cudaValues;
}
}
/**********************************************************************
* Main program
*********************************************************************/
int main(int argc, char *argv[])
{
sscanf(argv[1],"%d",&tpoints);
sscanf(argv[2],"%d",&nsteps);
check_param();
int howManyBlock=tpoints/(SMNUM*THREADPERWARP);
if(tpoints%(SMNUM*THREADPERWARP)!=0)
++howManyBlock;
cudaArraySize = tpoints*sizeof(float);
printf("Initializing points on the line...\n");
cudaMalloc((void**)&cudaValues,cudaArraySize);
init_line();
cudaMemcpy(cudaValues,values,cudaArraySize,cudaMemcpyHostToDevice);
cudaExecute<<<SMNUM,THREADPERWARP>>>(cudaValues,howManyBlock,tpoints,nsteps);
cudaMemcpy(values,cudaValues,cudaArraySize-4,cudaMemcpyDeviceToHost);
//update();
printf("Updating all points for all time steps...\n");
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
return 0;
}
|
12,359 | // For the CUDA runtime routines (prefixed with "cuda_")
// #include <cuda.h>
#include <cuda_runtime.h>
namespace
{
__global__ void _vadd(const float *A, const float *B, float *C, int n)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
{
C[i] = A[i] + B[i];
}
}
}
extern "C" void vadd(const float *A, const float *B, float *C, int n, int threads)
{
const int blocks = (n + threads - 1) / threads;
_vadd<<<blocks, threads>>>(A, B, C, n);
}
|
12,360 | // includes
#include <stdio.h>
#include <stdlib.h>
//matriz de vecinos
void liberaVecinos(int *vec) {
free(vec);
return;
}
|
12,361 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define BLOCK_DIM 128
const int size_x = 128;
const int size_y = 1;
__global__ static void threadDemo(unsigned int * ret)
{
unsigned int xIndex = blockDim.x *blockIdx.x + threadIdx.x;
unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
printf("block Dim (%d, %d) \n", blockDim.x, blockDim.y);
printf("block id (%d, %d)\n", blockIdx.x, blockIdx.y);
printf("thread id (%d, %d) \n", threadIdx.x, threadIdx.y);
if (xIndex < size_x && yIndex < size_y) {
unsigned int index = xIndex + size_x * yIndex;
ret[index] = xIndex;
ret[index + size_x * size_y] = yIndex;
}
}
void ThreadDemo(void)
{
unsigned int * ret = 0;
unsigned int host_ret[size_x*size_y*2] = {0};
int i = 0;
cudaMalloc((void**) &ret, sizeof(unsigned int)*(size_x*size_y*2));
dim3 grid(size_x / BLOCK_DIM, 1);
dim3 block(BLOCK_DIM, 1, 1);
threadDemo<<<grid,block>>>(ret);
cudaMemcpy(&host_ret, ret, sizeof(unsigned int)*(size_x*size_y*2), cudaMemcpyDeviceToHost);
for (i = 0; i < size_x*size_y; i++) {
printf("(%u,%u)", host_ret[i], host_ret[size_x*size_y+i]);
}
cudaFree(ret);
} |
12,362 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float* var_10,float var_11,float var_12,float* var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29) {
if (comp == -1.8217E-35f - +1.8380E35f) {
float tmp_1 = (-1.3938E34f * (+1.3584E36f + (var_3 - var_4)));
float tmp_2 = (var_5 - var_6 + var_7 - var_8 - var_9 + +1.7052E-43f);
float tmp_3 = -1.4184E-37f;
comp += tmp_3 + tmp_2 - tmp_1 - -1.3002E35f + (-1.0984E-35f * -0.0f / -1.0064E-42f + -0.0f);
for (int i=0; i < var_1; ++i) {
var_10[i] = +1.6111E-43f;
comp += var_10[i] / (-1.2321E4f / var_11);
comp += (+1.1547E-42f - var_12 + logf(-1.3089E-35f + +1.2171E-37f));
}
for (int i=0; i < var_2; ++i) {
comp += (-1.6667E-3f - atan2f((-1.9244E3f + (+1.1787E-36f + var_14)), -0.0f));
var_13[i] = -1.0145E-37f;
comp += var_13[i] / var_15 + var_16 - +1.7152E-36f / coshf(expf(-0.0f / +1.8396E-36f * fabsf((var_17 / atan2f(-1.1384E-35f * (var_18 - (var_19 * -1.5425E34f)), +1.6398E-42f)))));
}
if (comp <= (+0.0f / (var_20 + (+1.5149E-37f - (var_21 - var_22))))) {
comp += (var_23 + var_24 + +1.9793E-29f - -1.6197E-36f + -1.5127E35f + var_25);
comp = var_26 / -1.2263E4f;
comp += (var_27 + (var_28 + var_29));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float* tmp_11 = initPointer( atof(argv[11]) );
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float* tmp_14 = initPointer( atof(argv[14]) );
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30);
cudaDeviceSynchronize();
return 0;
}
|
12,363 | #include "includes.h"
/*
Ball Query with BatchIdx
Written by Li Jiang
All Rights Reserved 2020.
*/
/* ================================== ballquery_batch_p ================================== */
__global__ void ballquery_batch_p_cuda_(int n, int meanActive, float radius, const float *xyz, const int *batch_idxs, const int *batch_offsets, int *idx, int *start_len, int *cumsum) {
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (pt_idx >= n) return;
start_len += (pt_idx * 2);
int idx_temp[1000];
float radius2 = radius * radius;
float o_x = xyz[pt_idx * 3 + 0];
float o_y = xyz[pt_idx * 3 + 1];
float o_z = xyz[pt_idx * 3 + 2];
int batch_idx = batch_idxs[pt_idx];
int start = batch_offsets[batch_idx];
int end = batch_offsets[batch_idx + 1];
int cnt = 0;
for(int k = start; k < end; k++){
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d2 = (o_x - x) * (o_x - x) + (o_y - y) * (o_y - y) + (o_z - z) * (o_z - z);
if(d2 < radius2){
if(cnt < 1000){
idx_temp[cnt] = k;
}
else{
break;
}
++cnt;
}
}
start_len[0] = atomicAdd(cumsum, cnt);
start_len[1] = cnt;
int thre = n * meanActive;
if(start_len[0] >= thre) return;
idx += start_len[0];
if(start_len[0] + cnt >= thre) cnt = thre - start_len[0];
for(int k = 0; k < cnt; k++){
idx[k] = idx_temp[k];
}
} |
12,364 | #include "includes.h"
__global__ void VecAdd(float* A, float* B, float* C, int N)
{
for (int i=0; i<N; i++) {
C[i] = A[i] + B[i];
}
} |
12,365 | //#define CATCH_CONFIG_MAIN // This tells Catch to provide a main() - only do this in one cpp file
#define CATCH_CONFIG_RUNNER
#include "catch.hpp"
#include <stdio.h>
#include <stdlib.h>
#include "tools/macros.cuh"
/* struct OtherOpt { */
/* OtherOpt() : deviceNumber(0), showHelp(false) {} */
/* std::string processName; */
/* int deviceNumber; */
/* bool showHelp; */
/* void setValidDeviceNumber( int i ) { */
/* int deviceCount = 0; */
/* cudaGetDeviceCount(&deviceCount); */
/* if( i < 0 || i > deviceCount ) { */
/* Catch::cout()<<"The device number is incorrect, please set valid cuda device number\n"; */
/* exit(0); */
/* } */
/* deviceNumber = i; */
/* cudaSetDevice(deviceNumber); */
/* cudaDeviceProp deviceProp; */
/* cudaGetDeviceProperties(&deviceProp, deviceNumber); */
/* Catch::cout() <<"Device "<< deviceNumber <<": "<<deviceProp.name<<"\n"; */
/* } */
/* }; */
void setValidDeviceNumber( int i ) {
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
if( i < 0 || i > deviceCount ) {
Catch::cout()<<"The device number is incorrect, please set valid cuda device number\n";
exit(0);
}
int deviceNumber = i;
cudaSetDevice(deviceNumber);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, deviceNumber);
if (if_debug()) {
Catch::cout() <<"Device "<< deviceNumber <<": "<<deviceProp.name<<"\n";
}
}
int main( int argc, char* const argv[] )
{
Catch::Session session; // There must be exactly once instance
// writing to session.configData() here sets defaults
// this is the preferred way to set them
int returnCode = session.applyCommandLine( argc, argv );
if( returnCode != 0 ) // Indicates a command line error
return returnCode;
// writing to session.configData() or session.Config() here
// overrides command line args
// only do this if you know you need to
char* GPU_DEVICE;
GPU_DEVICE = getenv ("GPU_DEVICE");
int dev_id = 0;
if (GPU_DEVICE != NULL)
dev_id = atoi(GPU_DEVICE);
if (dev_id >= 0)
setValidDeviceNumber(dev_id);
return session.run();
}
|
12,366 | #include "includes.h"
#define BLOCK_SIZE 100
#define GRID_SIZE 100
#define N GRID_SIZE * BLOCK_SIZE
__global__ void VectorAdd (int *A, int *B, int *C) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
C[x] = A[x] + B[x];
} |
12,367 | /*******************************************************************************
This program uses streams and events to asynchronously perform arithmetic
functions on the GPU. Comparison of timing is shown as output.
Author: Said Darham
*******************************************************************************/
#include <iostream>
#include <stdlib.h> //srand and rand
#include <math.h>
// Test types
#define THREAD 1
#define STREAM 2
//Timer struct declaration. Using CUDA EVENTS
typedef struct timer{
cudaEvent_t startEvent;
cudaEvent_t stopEvent;
float time_ms;
} timerEvent;
/*******************************************************************************
PROFILER FUNCTIONS USING EVENTS
*******************************************************************************/
void startEventTimer(timerEvent *timer){
/* startEventTimer()
Creates and starts recording an event
*/
cudaEventCreate(&timer->startEvent);
cudaEventCreate(&timer->stopEvent);
cudaEventRecord(timer->startEvent);
}
void stopEventTimer(timerEvent *timer){
/* stopEventTimer()
Stops an event and calculates the elapsed time between start and stop event
*/
cudaEventRecord(timer->stopEvent);
cudaEventSynchronize(timer->stopEvent);
cudaEventElapsedTime(&timer->time_ms, timer->startEvent, timer->stopEvent);
}
void freeEventTimer(timerEvent *timer){
/* freeEventTimer()
cleans up the events
*/
cudaEventDestroy(timer->startEvent);
cudaEventDestroy(timer->stopEvent);
}
void checkDevices(void){
//Check and print devices name
cudaDeviceProp prop;
int deviceCount; //number of devices found
int devId = 0; // default device Id
cudaGetDeviceCount(&deviceCount);
if(deviceCount == 0){
std::cout << "No GPU Device Found\n";
exit(0);
}else if(deviceCount == 1){
cudaSetDevice(devId); //set the device 0 as default
}
std::cout << "Number Of Devices Found: " << deviceCount << std::endl;
//Print device names and some basic associated properties
for (int i = 0; i<deviceCount; i++){
cudaGetDeviceProperties(&prop,i);
std::cout << "Device " << i << " Name: " << prop.name << std::endl;
std::cout << "Compute Capability: " << prop.major << "." << prop.minor << std::endl;
}
}
/*******************************************************************************
ARITHMETIC KERNEL FUNCTIONS
*******************************************************************************/
// Add Function
__global__ void add(int *a, int *b, int *c, int n){
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
// subtract function
__global__ void subtract(int *a, int *b, int *c, int n){
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] - b[id];
}
// multiply function
__global__ void mult(int *a, int *b, int *c, int n){
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] * b[id];
}
// Moudulu function
__global__ void mod(int *a, int *b, int *c, int n){
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] % b[id];
}
/*******************************************************************************
GPU Test
( Sequential Threads & STREAMS)
*******************************************************************************/
void gpuTest(int numBlocks, int totalThreads, const int testType){
/* gpuTest()
runs either sequential transfer and execute or asynchronous streams
testType is either THREAD or STREAM
*/
//Since both transfer and execute routines will use pinned memory, allocating
//and initializing data is the same for both
// Host input/output vectors
int *h_a, *h_b, *h_c_add,*h_c_sub,*h_c_mult,*h_c_mod;
// Device input/output vectors
int *d_a, *d_b, *d_c_add,*d_c_sub,*d_c_mult,*d_c_mod;
// Size, in bytes, of each vector
const unsigned int bytes = totalThreads*sizeof(int);
// Allocate memory for each vector on host Pinned
cudaMallocHost((void**)&h_a, bytes);
cudaMallocHost((void**)&h_b, bytes);
cudaMallocHost((void**)&h_c_add, bytes);
cudaMallocHost((void**)&h_c_sub, bytes);
cudaMallocHost((void**)&h_c_mult, bytes);
cudaMallocHost((void**)&h_c_mod, bytes);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c_add, bytes);
cudaMalloc(&d_c_sub, bytes);
cudaMalloc(&d_c_mult, bytes);
cudaMalloc(&d_c_mod, bytes);
//initialize the input vectors
for(int i = 0;i<totalThreads;i++){
//first array is 0 through number of threads
h_a[i] = i;
// second array is a random number between 0 and 3
h_b[i] = rand() % 4;
}
// create a struct which will contain info for timing using events
timerEvent timer;
switch(testType){
case THREAD:
std::cout << "\n\t\t*****Executing Arithmetic Functions Using Sequential*****" << std::endl;
//Transfer and Profile data from host to device and profile using EVENTS
startEventTimer(&timer);
cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
//Execute the kernel arithmetic functions
add<<<numBlocks, totalThreads>>>(d_a, d_b, d_c_add, totalThreads);
subtract<<<numBlocks, totalThreads>>>(d_a, d_b, d_c_sub, totalThreads);
mult<<<numBlocks, totalThreads>>>(d_a, d_b, d_c_mult, totalThreads);
mod<<<numBlocks, totalThreads>>>(d_a, d_b, d_c_mod, totalThreads);
//Transfer data from device to host
cudaMemcpy(h_c_add, d_c_add, bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(h_c_sub, d_c_sub, bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(h_c_mult, d_c_mult, bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(h_c_mod, d_c_mod, bytes, cudaMemcpyDeviceToHost);
stopEventTimer(&timer);
std::cout << "Time Elaplsed For Sequential Transfer and Execute: " << timer.time_ms << " ms" << std::endl;
break;
case STREAM:
{
std::cout << "\n\t\t*****Executing Arithmetic Functions Using Streams*****\n";
//choose 4 streams for four kernels
int nStreams = 4;
int iStream; //
//create four asynchronous streams
cudaStream_t stream[nStreams];
for(iStream = 0; iStream < nStreams; iStream++)
cudaStreamCreate(&stream[iStream]);
//Copy data from host to device and profile using EVENTS
startEventTimer(&timer);
for(iStream = 0; iStream < nStreams; iStream++ ){
cudaMemcpyAsync(d_a, h_a, bytes, cudaMemcpyHostToDevice, stream[iStream]);
cudaMemcpyAsync(d_b, h_b, bytes, cudaMemcpyHostToDevice, stream[iStream]);
}
add<<<numBlocks, totalThreads, 0, stream[0]>>>(d_a, d_b, d_c_add, totalThreads);
cudaMemcpyAsync(h_c_add, d_c_add, bytes, cudaMemcpyDeviceToHost, stream[0]);
//performing subtract function
subtract<<<numBlocks, totalThreads, 0, stream[1]>>>(d_a, d_b, d_c_sub, totalThreads);
cudaMemcpyAsync(h_c_sub, d_c_sub, bytes, cudaMemcpyDeviceToHost, stream[1]);
//performing mult function
mult<<<numBlocks, totalThreads, 0, stream[2]>>>(d_a, d_b, d_c_mult, totalThreads);
cudaMemcpyAsync(h_c_mult, d_c_mult, bytes, cudaMemcpyDeviceToHost, stream[2]);
//performing mod fuction
mod<<<numBlocks, totalThreads, 0, stream[3]>>>(d_a, d_b, d_c_mod, totalThreads);
cudaMemcpyAsync(h_c_mod, d_c_mod, bytes, cudaMemcpyDeviceToHost, stream[2]);
//Wait till all tasks referenced to streams are finished
for(iStream = 0; iStream < nStreams; iStream++)
cudaStreamSynchronize(stream[iStream]);
stopEventTimer(&timer);
std::cout << "TIME ELAPSED For Asynchronous Transfer and Execute: " << timer.time_ms << " ms" << std::endl;
break;
}
default:
std::cout << "Unknown Test Type!\n";
break;
}
//destroy Event timer
freeEventTimer(&timer);
//free up space on our GPU
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c_add);
cudaFree(d_c_sub);
cudaFree(d_c_mult);
cudaFree(d_c_add);
//free up space on our CPU use cudaFreeHost since pinnned
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c_add);
cudaFreeHost(h_c_sub);
cudaFreeHost(h_c_mult);
cudaFreeHost(h_c_mod);
}
/*******************************************************************************
MAIN
*******************************************************************************/
int main(int argc, char** argv)
{
int totalThreads = (1 << 10);
int blockSize = 256;
//User wants to run the Global vs Pinned Examples
if( argc > 2 && argc < 4){
// Ensure the user supplies both number of threads and block size
// otherwise use default values
totalThreads = atoi(argv[1]);
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
std::cout << "\nUsing " << totalThreads << " Threads and " << blockSize << " BlockSize\n" ;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
std::cout << "Warning: Total thread count is not evenly divisible by the block size\n";
std::cout << "The total number of threads will be rounded up to %d\n";
}
// get number of devices and print some basic properties
checkDevices();
//perform Sequential transfer and execute Test
gpuTest( totalThreads, numBlocks, THREAD);
//perform asynchronous transfer and execute using streams
gpuTest( totalThreads, numBlocks, STREAM);
return 0;
}
|
12,368 | #include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <ctime>
#include <cstdint>
#include <thrust/reduce.h>
#include <cuda.h>
using namespace std;
__device__ int binarySearch(int* arr, int l, int r, int x)
{
while (l <= r)
{
int m = (l+r)/2;
if (arr[m] == x)
return m;
if (arr[m] < x)
l = m + 1;
else
r = m - 1;
}
return -1;
}
/*__device__ int index;
__global__ void arrfind(int* adjlist, int start , int end,int entries,int find)
{
int threadID = blockIdx.x * blockDim.x + threadIdx.x;
if(threadID < entries)
{
if( adjlist[threadID] == find )
{
index = threadID;
}
}
}*/
__global__ void Tricount(int* beginposition , int* graphpartition , int* d_counts , int* adjver , int vertices , int entries,int partitionvertex,int partitionedge,int* adjlist,int part)
{
int thread = blockIdx.x * blockDim.x + threadIdx.x;
if(thread < partitionedge ) // limit thread to how many edges
{
if( part ==2 ) //SECOND PARTITON
{
if(graphpartition[thread] > partitionvertex ) // if the first vertex is whithin the partition (POSSUIBEL ERROR)
{
int vertex1 = graphpartition[thread];
int sizeofarray1 = beginposition[ vertex1+1 ]- beginposition[ vertex1 ];
if( graphpartition[thread]+1 == vertices) //vertices has to be changed too
{
sizeofarray1 = entries-beginposition[vertex1];
}
int vertex2 = adjver[thread];
int sizeofarray2 = beginposition[ vertex2+1 ]-beginposition[ vertex2 ];
if( vertex2+1 == vertices)
{
sizeofarray2 = entries-beginposition[vertex2];
}
int posofelement = beginposition[vertex1];
for(int i = 0 ; i < sizeofarray1 ; i++)
{
int find = graphpartition[ posofelement + i ];
int result = binarySearch (adjlist ,beginposition[vertex2] , beginposition[vertex2] + sizeofarray2 - 1 ,find);
if(result != -1)
{
//printf("found an triangle with vertex %d and vertex %d with vertex %d \n",adjlist[adjindex],vertex2,find);
d_counts[thread] = d_counts[thread] + 1;
//printf("I found a triangle");
}
}
}
}
else //FIRST PARTITION
{
if(graphpartition[thread] <= partitionvertex ) // if the first vertex is whithin the partition
{
int vertex1 = graphpartition[thread];
int sizeofarray1 = beginposition[ vertex1+1 ]- beginposition[ vertex1 ];
if( graphpartition[thread]+1 == partitionvertex +1) //vertices has to be changed too
{
sizeofarray1 = entries-beginposition[vertex1];
}
int vertex2 = adjver[thread];
int sizeofarray2 = beginposition[vertex2+1]-beginposition[vertex2];
if( vertex2+1 == partitionvertex +1)
{
sizeofarray2 = entries-beginposition[vertex2];
}
int posofelement = beginposition[vertex1];
for(int i = 0 ; i < sizeofarray1 ; i++)
{
int find = graphpartition[ posofelement + i ];
int result = binarySearch (adjlist ,beginposition[vertex2] , beginposition[vertex2] + sizeofarray2 - 1 ,find);//adjust (Find Intersection)
if(result != -1)
{
//printf("found an triangle with vertex %d and vertex %d with vertex %d \n",adjlist[adjindex],vertex2,find);
d_counts[thread] = d_counts[thread] + 1;
//printf("I found a triangle");
}
}
}
}
}
}
int mmioread(int* adjlist , int* beginposition) {
string line;
string file1 = "amazon0312_adj.tsv";
ifstream myfile (file1);
cout << endl;
cout << " reading " << file1 << " ... " <<endl;
cout <<endl;
long linecount =0;
// 0 - adjlist 1 - vertex 2 - N/A
beginposition[0] = 0;
long adjlistpos = 0;
long beginlistpos = 1;
long prevnum = 0;
if (myfile.is_open())
{
while ( getline (myfile,line) )
{
istringstream buf(line);
long type =0;
for(string word; buf >> word; )
{
if( type == 0 ) // add adjlist
{
adjlist[adjlistpos] = stoi(word);
adjlistpos++;
type++;
}
else if( type == 1 ) // add begin pos
{
if(prevnum != stoi(word) )
{
if (prevnum+1 != stoi(word) )
{
//printf("now is %d but before was %d\n",stoi(word),prevnum );
for(int a = 0 ; a <stoi(word)-prevnum-1 ; a++) //Parsing Error Fix
{
beginposition[beginlistpos] = adjlistpos-1;
beginlistpos++;
}
}
beginposition[beginlistpos] = adjlistpos-1;
beginlistpos++;
prevnum = stoi(word);
}
type++;
}
else if (type == 2)
type++;
//forcount++;
}
linecount++;
}
myfile.close();
}
else cout << "Unable to open file";
return 1;
};
int main(){
int vertices = 400728;
int entries = 4699738;
int* h_beginposition= new int[vertices];
int* h_adjlist= new int[entries];
int* h_adjvertex= new int[entries];
int* h_count = new int [entries];
int* h_count2 = new int [entries];
int* d_begin;
int* d_adj;
int* d_counts;
int* d_counts2;
int* d_adjvertex;
cout <<"Converting MMIO to array form..." <<endl;
clock_t startTime = clock();
mmioread(h_adjlist,h_beginposition);
int pos =0;
for(int x = 1 ; x < vertices ; x++)
{
int size = h_beginposition[x+1] - h_beginposition[x];
//printf("%d \n ",size);
if( x+1 == vertices )
size = entries-h_beginposition[x];
for(int y = 0 ; y < size ; y++)
{
h_adjvertex[pos] = x;
pos++;
}
}
//*****************************************************************************************************
int partition = vertices/2;
cout << "partition vertex is : " << partition << endl;
int sizeofpart1 = h_beginposition[partition+1];
cout << "sizeof partion is : " << sizeofpart1 << endl;
int* h_graphpartition1 = new int[ sizeofpart1 ];
int* h_graphpartition2 = new int[ entries - sizeofpart1 ];
int* h_adjver1 = new int[h_beginposition[partition+1]];
int* h_adjver2 = new int[entries - sizeofpart1];
int* d_graphpartition1;
int* d_graphpartition2;
int* d_adjver1;
int* d_adjver2;
//*****************************************************************************************************
//PARTITION DATASETS
//**************************************************************************************************
for(int i = 0 ; i < h_beginposition[partition+1] ; i++)
{
h_graphpartition1[i] = h_adjlist[i];
h_adjver1[i] = h_adjvertex[i];
}
for(int i = 0 ; i < entries - (h_beginposition[partition+1]) ; i++)
{
h_graphpartition2[i] = h_adjlist[ i + h_beginposition[partition+1] ];
h_adjver2[i] = h_adjvertex[ i + h_beginposition[partition+1] ];
}
cout <<"last is : " << h_graphpartition2[entries - (h_beginposition[partition+1])-1] <<endl;
int checkvertex = h_adjvertex[ h_beginposition[partition+1] -1 ]; //UPTO WHERE TO COPY BP
int* h_BP1 = new int[checkvertex+1];
int* h_BP2 = new int[ vertices ];
for(int i = 0 ; i < (checkvertex+1) ; i++)
{
h_BP1[i] = h_beginposition[i];
}
for(int i =0 ; i < vertices-1 ; i++)
{
if(i>checkvertex)
h_BP2[i] = h_beginposition[i]-h_beginposition[checkvertex+1]; //convert to partition
}
h_BP2[3] =0;
//********************************************************************************************************
//DEBUG SESSION
//printf("pos is %d is %d \n",h_adjlist[718264] ,h_adjvertex[718264]);
//printf("last is %d \n", h_beginposition[4]);
/*
printf("adjlist consist of");
for(int a = 0 ; a < entries ; a++)
printf(" %d ", h_adjlist[a]);
printf("\n");
printf("bp consist of");
for(int a = 0 ; a < vertices ; a++)
printf(" %d ", h_beginposition[a]);
printf("\n");*/
//********************************************************************************************************
//MEMORY ALLOCATION ON DEVICE & MEMORY TRANSFER TO DEVICE
double secondsPassed = (clock() - startTime) / CLOCKS_PER_SEC;
cout <<"Transform complete : "<< secondsPassed << " seconds have passed" << endl;
cout <<"Allocating space on GPU and transfer data..."<< endl;
cout <<"index 2 value is " << h_graphpartition1[3]<<endl;
cudaMalloc(&d_begin, vertices*sizeof(int));
cudaMalloc(&d_adj, entries*sizeof(int));
//cudaMalloc(&d_adjvertex, entries*sizeof(int));
cudaMalloc((void**)&d_counts, entries*sizeof(int));
cudaMalloc((void**)&d_counts2, entries*sizeof(int));
cudaMalloc(&d_graphpartition1,sizeofpart1*sizeof(int));
cudaMalloc(&d_graphpartition2,(entries-sizeofpart1)*sizeof(int));
cudaMalloc(&d_adjver1,sizeofpart1*sizeof(int));
cudaMalloc(&d_adjver2,(entries-sizeofpart1)*sizeof(int));
//cudaMemset((void*)d_counts,0,10*sizeof(int));
//**********************************************************************************************************************
cudaMemcpy(d_begin, h_beginposition, vertices*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_adj, h_adjlist, entries*sizeof(int), cudaMemcpyHostToDevice);
//cudaMemcpy(d_adjvertex, h_adjvertex, entries*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_graphpartition1,h_graphpartition1,sizeofpart1*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_graphpartition2,h_graphpartition2,(entries-sizeofpart1)*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_adjver1,h_adjver1,sizeofpart1*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_adjver2,h_adjver2,(entries-sizeofpart1)*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_counts2,h_count2,(entries-sizeofpart1)*sizeof(int),cudaMemcpyHostToDevice);
int blocks = (entries/1024)+1;
cout << "Now counting Triangles" <<endl;
Tricount<<<blocks, 1024>>>(d_begin ,d_graphpartition1 ,d_counts ,d_adjver1 ,vertices , entries,partition,sizeofpart1,d_adj,1);
Tricount<<<blocks, 1024>>>(d_begin ,d_graphpartition2 ,d_counts2 ,d_adjver2 ,vertices , entries,partition,sizeofpart1,d_adj,2);
cudaMemcpy(h_count,d_counts,entries*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(h_count2,d_counts2,entries*sizeof(int),cudaMemcpyDeviceToHost);
cout << "Done..." <<endl;
cout << "Done with MEMCOPY...Now counting" <<endl;
int result = thrust::reduce(h_count, h_count+ entries);
int result2 = thrust::reduce(h_count2, h_count2+ entries);
printf("First Partition Triangles >>>>> %d \n",result/6);
printf("Second Partition Triangles >>>>> %d \n",result2/6);
printf("Total number is %d\n",(result2+result)/6 );
cudaFree(d_begin);
cudaFree(d_adj);
cudaFree(d_counts);
cudaFree(d_graphpartition1);
cudaFree(d_graphpartition2);
cudaFree(d_adjver1);
cudaFree(d_adjver2);
//cudaDeviceReset();
//3686467
}
|
12,369 |
#include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
#include <curand.h>
#include <time.h>
#include <vector>
using namespace std;
#define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \
printf("Error at %s:%d : err => %s\n",__FILE__,__LINE__,cudaGetErrorString(x));\
return EXIT_FAILURE;}} while(0)
#define CURAND_CALL(x) do { if((x)!=CURAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
/**
1- Gerar uma matriz aleatoria
2- Aplicar um blur ou filtro (gerar uma nova matriz de saida, com a media aritimetica da vizinhanca aplicada a cada elemento da matriz)
3- Testar e mandar resultados de tempo para os segintes casos:
- memoria unificada
- copia manual de memoria
- usando stream para copia CPU->GPU
- usando streams para os dois sentidos de copias (ida e volta)
4- Testar para matrizes de 100x100 , 1000x1000, 10000x10000
**/
__global__ void blur(unsigned int origData[],unsigned result[],int L,int ox,int oy) {
int thread_idx = ox + (threadIdx.x + blockIdx.x * blockDim.x);
int thread_idy = oy + (threadIdx.y + blockIdx.y * blockDim.y);
if(thread_idx-1 >= 0 && thread_idx+1 < L && thread_idy-1 >= 0 && thread_idy+1 < L)
{
int temp = origData[(thread_idx) + (thread_idy)*L];
temp += origData[(thread_idx-1) + (thread_idy-1)*L];
temp += origData[(thread_idx) + (thread_idy-1)*L];
temp += origData[(thread_idx+1) + (thread_idy-1)*L];
temp += origData[(thread_idx-1) + (thread_idy)*L];
//temp += origData[(thread_idx) + (thread_idy)*L];
temp += origData[(thread_idx+1) + (thread_idy)*L];
temp += origData[(thread_idx-1) + (thread_idy+1)*L];
temp += origData[(thread_idx) + (thread_idy+1)*L];
temp += origData[(thread_idx+1) + (thread_idy+1)*L];
result[(thread_idx) + (thread_idy)*L] = temp/9;
}else
{
result[(thread_idx) + (thread_idy)*L] = origData[(thread_idx) + (thread_idy)*L];
}
}
/*
*argumentos
*1 - n_elementos
*2 - threads por bloco
*/
int main(int argc, char* argv[]) {
unsigned int L, tam, *h_data,*d_data,*d_res;
size_t size;
cudaError_t err = cudaSuccess;
L = 40;
if(argc > 1)
L = atoi(argv[1]);
tam = L*L;
size = tam*sizeof(unsigned int);
dim3 block_dim(L,L,1);
dim3 grid_dim(1,1,1);
if(L>32)
{
block_dim = dim3(32,32,1);
grid_dim = dim3(ceil(L/32),ceil(L/32),1);
}
// Allocate memory for the vectors on host memory.
h_data = (unsigned int*) malloc(size);
/* Allocate vectors in device memory */
CUDA_CALL(cudaMalloc(&d_data, size));
CUDA_CALL(cudaMalloc(&d_res, size));
cudaEvent_t start, stop;
CUDA_CALL(cudaEventCreate (&start));
CUDA_CALL(cudaEventCreate (&stop));
CUDA_CALL(cudaEventRecord (start, 0)); // 0 is the stream number
// do Work…
srand(time(NULL));
for(int i=0; i<tam;i++)
h_data[i]=rand();
//CUDA_CALL(cudaMemcpy(d_data,h_data, size, cudaMemcpyHostToDevice));
int rStreams = 4;
dim3 block_dim_temp = dim3(32,32,1);
dim3 grid_dim_temp = dim3(ceil(L/32),ceil((L/rStreams)/32),1);
if(grid_dim_temp.y<1)
grid_dim_temp = dim3(grid_dim_temp.x,1,1);
//printf("\n grid_dim_temp - %d , %d, %d \n",grid_dim_temp.x,grid_dim_temp.y,grid_dim_temp.z);
vector<cudaStream_t> streams;
/* Kernel Call */
for(int oy=0; oy < L; oy+=L/rStreams)
{
streams.push_back(cudaStream_t());
cudaStreamCreate(&streams.back());
cudaMemcpyAsync (d_data+(oy*L), h_data+(oy*L),(L/rStreams*L)*sizeof(unsigned int), cudaMemcpyHostToDevice,streams.back());
}
for(int oy=0,i=0; oy < L; oy+=L/rStreams,i++)
if(i==0)
blur<<<grid_dim_temp,block_dim_temp,0,streams[i]>>>(d_data, d_res, L, 0, oy);
else
blur<<<grid_dim_temp,block_dim_temp,0,streams[i]>>>(d_data, d_res, L+1, 0, oy-1);
for(int oy=0,i=0; oy < L; oy+=L/rStreams,i++)
{
cudaMemcpyAsync (h_data+(oy*L),d_data+(oy*L),(L/rStreams*L)*sizeof(unsigned int), cudaMemcpyDeviceToHost,streams[i]);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
CUDA_CALL(cudaDeviceSynchronize());
for(int i=0; i<tam;i++)
h_data[i]=0;
CUDA_CALL(cudaEventRecord (stop, 0));
CUDA_CALL(cudaEventSynchronize (stop));
float elapsedTime;
CUDA_CALL(cudaEventElapsedTime (&elapsedTime, start, stop));
printf ("[%d,%.5f],\n", tam,elapsedTime);
CUDA_CALL(cudaEventDestroy(start));
CUDA_CALL(cudaEventDestroy(stop));
/* Free device memory */
CUDA_CALL( cudaFree(d_data));
CUDA_CALL( cudaFree(d_res));
/* Free host memory */
free(h_data);
return 0;
} /* main */
|
12,370 | #include <stdio.h>
__global__ void SineKernel(float *A)
{{
// Block index
const uint bx = blockIdx.x;
const uint bw = blockDim.x;
// Thread index
const uint tx = threadIdx.x;
// Stride access locations
const uint aBegin = bw*bx;
const uint idx = aBegin+tx;
// Grab a value from global memory
float this_val = A[idx];
float sinval = sin(this_val);
A[idx] = sinval;
// printf("Sin(%f) = %f\n", this_val, sinval);
}} |
12,371 | /*
============================================================================
Filename : algorithm.c
Author : Vincent Rinaldi
SCIPER : 239759
============================================================================
*/
#include <iostream>
#include <iomanip>
#include <sys/time.h>
#include <cuda_runtime.h>
using namespace std;
// CPU Baseline
void array_process(double *input, double *output, int length, int iterations)
{
double *temp;
for(int n=0; n<(int) iterations; n++)
{
for(int i=1; i<length-1; i++)
{
for(int j=1; j<length-1; j++)
{
output[(i)*(length)+(j)] = (input[(i-1)*(length)+(j-1)] +
input[(i-1)*(length)+(j)] +
input[(i-1)*(length)+(j+1)] +
input[(i)*(length)+(j-1)] +
input[(i)*(length)+(j)] +
input[(i)*(length)+(j+1)] +
input[(i+1)*(length)+(j-1)] +
input[(i+1)*(length)+(j)] +
input[(i+1)*(length)+(j+1)] ) / 9;
}
}
output[(length/2-1)*length+(length/2-1)] = 1000;
output[(length/2)*length+(length/2-1)] = 1000;
output[(length/2-1)*length+(length/2)] = 1000;
output[(length/2)*length+(length/2)] = 1000;
temp = input;
input = output;
output = temp;
}
}
// CUDA Kernel function
__global__ void kernel(double* input, double* output, int length) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int midSquare = (i == length/2 - 1 && j == length/2 - 1) || (i == length/2 && j == length/2 - 1) || (i == length/2 - 1 && j == length/2) || (i == length/2 && j == length/2);
if ((i > 0) && (i < length-1) && (j > 0) && (j < length-1) && (!midSquare)) {
output[(i)*(length)+(j)] = (input[(i-1)*(length)+(j-1)] +
input[(i-1)*(length)+(j)] +
input[(i-1)*(length)+(j+1)] +
input[(i)*(length)+(j-1)] +
input[(i)*(length)+(j)] +
input[(i)*(length)+(j+1)] +
input[(i+1)*(length)+(j-1)] +
input[(i+1)*(length)+(j)] +
input[(i+1)*(length)+(j+1)] ) / 9;
}
}
// GPU Optimized function
void GPU_array_process(double *input, double *output, int length, int iterations)
{
//Cuda events for calculating elapsed time
cudaEvent_t cpy_H2D_start, cpy_H2D_end, comp_start, comp_end, cpy_D2H_start, cpy_D2H_end;
cudaEventCreate(&cpy_H2D_start);
cudaEventCreate(&cpy_H2D_end);
cudaEventCreate(&cpy_D2H_start);
cudaEventCreate(&cpy_D2H_end);
cudaEventCreate(&comp_start);
cudaEventCreate(&comp_end);
/* Preprocessing goes here */
// declare device arrays
double *input_d;
double *output_d;
// set device to be used for GPU executions
cudaSetDevice(0);
// set number of threads per block and number of blocks in the grid used in a kernel invocation
int numThreadsBlock = 8;
int numBlocksGrid = (length % numThreadsBlock != 0) ? (length / numThreadsBlock + 1) : (length / numThreadsBlock);
// makes coordinates of blocks and threads indexes to work in 2 dimensions
dim3 numThreadsPerBlock(numThreadsBlock, numThreadsBlock);
dim3 numBlocksInGrid(numBlocksGrid, numBlocksGrid);
// allocate arrays on device
if (cudaMalloc((void **) &input_d, length*length*sizeof(double)) != cudaSuccess)
cout << "error in cudaMalloc" << endl;
if (cudaMalloc((void **) &output_d, length*length*sizeof(double)) != cudaSuccess)
cout << "error in cudaMalloc" << endl;
// copy from host to device step
cudaEventRecord(cpy_H2D_start);
/* Copying array from host to device goes here */
if (cudaMemcpy(input_d, input, length*length*sizeof(double), cudaMemcpyHostToDevice) != cudaSuccess)
cout << "error in cudaMemcpy" << endl;
if (cudaMemcpy(output_d, output, length*length*sizeof(double), cudaMemcpyHostToDevice) != cudaSuccess)
cout << "error in cudaMemcpy" << endl;
cudaEventRecord(cpy_H2D_end);
cudaEventSynchronize(cpy_H2D_end);
// GPU calculation step
cudaEventRecord(comp_start);
/* GPU calculation goes here */
double *temp_d;
for (int i = 0; i < iterations; i++) {
kernel<<<numBlocksInGrid, numThreadsPerBlock>>>(input_d, output_d, length);
if (i != iterations-1) {
temp_d = input_d;
input_d = output_d;
output_d = temp_d;
}
}
cudaThreadSynchronize();
cudaEventRecord(comp_end);
cudaEventSynchronize(comp_end);
// copy from device to host step
cudaEventRecord(cpy_D2H_start);
/* Copying array from device to host goes here */
if (cudaMemcpy(output, output_d, length*length*sizeof(double), cudaMemcpyDeviceToHost) != cudaSuccess)
cout << "error in cudaMemcpy" << endl;
cudaEventRecord(cpy_D2H_end);
cudaEventSynchronize(cpy_D2H_end);
/* Postprocessing goes here */
// cleanup
cudaFree(input_d);
cudaFree(output_d);
float time;
cudaEventElapsedTime(&time, cpy_H2D_start, cpy_H2D_end);
cout<<"Host to Device MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl;
cudaEventElapsedTime(&time, comp_start, comp_end);
cout<<"Computation takes "<<setprecision(4)<<time/1000<<"s"<<endl;
cudaEventElapsedTime(&time, cpy_D2H_start, cpy_D2H_end);
cout<<"Device to Host MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl;
}
|
12,372 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void standard(float *ptr)
{
*ptr = powf(*ptr, 2.0f);
}
__global__ void intrinsic(float *ptr)
{
*ptr = __powf(*ptr, 2.0f);
}
//int main()
//{
// float value = 23;
// int SIZE = sizeof(float);
//
// float *d_val;
// cudaMalloc((void**)&d_val, SIZE);
// cudaMemcpy(d_val, &value, SIZE, cudaMemcpyHostToDevice);
// standard << <1, 1 >> > (d_val);
// intrinsic << <1, 1 >> > (d_val);
// cudaDeviceSynchronize();
//
// cudaDeviceReset();
// return 0;
//} |
12,373 | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <time.h>
#include <fstream>
#include <vector>
#include <fstream>
#include <curand_kernel.h>
#include <cufft.h>
float L,LL; int N, C,itera;
using namespace std;
// función Maxwelliana de la distribución de las partículas.
__device__ float distribution (float vb, float aleatorio, curandState *states) //generador de distribución maxwelliana para la velocidad
{
// Genera un valor random v
float fmax = 0.5 * (1.0 + exp (-2.0 * vb * vb));
float vmin = - 5.0 * vb;
float vmax = + 5.0 * vb;
float v;
float f;
float x;
int Idx = blockIdx.x*blockDim.x + threadIdx.x;
while(true){
v = vmin + ((vmax - vmin) * aleatorio);
f = 0.5 * (exp (-(v - vb) * (v - vb) / 2.0) +
exp (-(v + vb) * (v + vb) / 2.0));
x = fmax * aleatorio;
if(x > f) aleatorio = curand_uniform(states + Idx);
else return v;
}
}
//Distribución aleatoria de las partículas.
__global__ void distribucionParticulas(float *rx,float *ry,float *vx,float *vy,int N,curandState *states,float vb,float L){
int Idx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int seed = (unsigned int) (clock() * Idx);
curand_init(seed, 0, 0, states + Idx);
if(Idx < N){
rx[Idx] = L*curand_uniform(states + Idx); //inicializando la posicion aleatoria en x
ry[Idx] = L*curand_uniform(states + Idx);
vx[Idx] = distribution(vb,curand_uniform(states + Idx),states);//;L*curand_uniform_float(states + Idx);//distribution(vb,states); //inicializa la velocidad con una distribucion maxwelliana
vy[Idx] = distribution(vb,curand_uniform(states + Idx),states);//L*curand_uniform_float(states + Idx);//distribution(vb,states); //inicializa la velocidad con una distribucion maxwelliana
}
}
// inicialización de la densidad.
__global__ void inicializacionDensidad(float *ne,int C){
int Id=blockIdx.x*blockDim.x + threadIdx.x;
if(Id<(C*C)){
ne[Id]=0.0;
}
}
//Calculo de la densidad en cada celda.
__global__ void calculoDensidad(float *rx, float *ry, float *ne, int N, int C,float L){
int Id=blockIdx.x*blockDim.x + threadIdx.x;
float dx = L / float (C);
float dxx=L/float(C*C);
if(Id<N){
int jx = int(rx[Id]/dx); //posicion en x de la particula
int jy = int(ry[Id]/dx); //posicion en y de la particula
float yx = (rx[Id]/dx) - (float)jx; //posicion exacta de la particula en x de la celda "j"
//float yy = (ry[Id]/dx) - (float)jy; //posicion exacta de la particula en y de la celda "j"
ne[(jy*C)+jx] += (1. - yx)/dxx;
if(jx+1==C) ne[(jy*C)] += yx/dxx;
else ne[(jy*C)+jx+1] += yx/dxx;
}
}
//pasar de reales a complejos.
__global__ void real2complex (float *ne, cufftComplex *u, int C)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int idy = blockIdx.y*blockDim.y+threadIdx.y;
int index =idy*C+idx;
if ( idx < C && idy <C)
{
u[index].x = ne[index];
u[index].y = 0.0f;
}
}
//__global__ void prueba (cufftComplex *vf, float *vr, int C){
// int idx = blockIdx.x*blockDim.x+threadIdx.x;
// int idy = blockIdx.y*blockDim.y+threadIdx.y;
// int index =idy*C+idx;
//
// if(idx<C && idy<C){
//
// vr[index]= (vf[index].x)/((float)C*(float)C*(float)C*(float)C);
// vr[index]= (vf[index].y)/((float)C*(float)C*(float)C*(float)C);
//
// }
//}
__global__ void solve_Poisson(cufftComplex *vf, cufftComplex *v, int C,float L){
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int idy = blockIdx.y*blockDim.y+threadIdx.y;
float dx = L / float (C);
float i,W,Wm,Wn;
i = (0.0,L);
W = exp(2.0 * M_PI * i / float(C));
Wm = L;
Wn = L;
if(idx<C && idy<C){
int index = idy*C+idx;
float denom;
denom = 4.0;
denom -= (Wm + (L / Wm) + Wn +( L / Wn));
if (denom != 0.0){
vf[index].x *= dx*dx/denom;
vf[index].y *= dx*dx/denom;
}
Wn *= W;//se multiplica por la constante W
}
Wm *= W;
if(idx<C && idy<C){
int index = idx*C+idy;
v[index].x=vf[index].x;
v[index].y=vf[index].y;
}
}
__global__ void complex2real(cufftComplex *v, float *vr, int C){
/* compute idx and idy, the location of the element in the original CxC array */
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int idy = blockIdx.y*blockDim.y+threadIdx.y;
if ( idx < C && idy <C)
{
int index = idy*C+idx;
vr[index] = v[index].x /((float)C*(float)C);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
int main(){
// Parametros
L = 64.0; // dominio de la solucion 0 <= x <= L (en longitudes de debye)
//L=LL*LL;
N = 10000; // Numero de particulas
C = 64; // Número de celdas.
float vb = 3.0; // velocidad promedio de los electrones
//float kappa = 2. * M_PI / (L);
//float dt=0.1; // delta tiempo (en frecuencias inversas del plasma)
//float tmax=10000; // cantidad de iteraciones. deben ser 100 mil segun el material
//int skip = int (tmax / dt) / 10; //saltos del algoritmo para reportar datos
//int itera=0;
float salida=0.;
//float dx = L / float (C);
/////////////////////////////////////////////////////////////////////////////////////////////////////
//Inicializacion de la posición de las particulas en x, y y velocidad en vx,vy del host y dispositivo.
float *rx_h,*ry_h,*vx_h,*vy_h;
float *rx_d,*ry_d, *vx_d,*vy_d;
////////////////////////////////////////////////////////////////////////////////////////////////////
// inicialización de las variables de densidad del host y dispositivo.
float *ne_h;
float *ne_d;
float *vr_h;
float *vr_d;
////////////////////////////////////////////////////////////////////////////////////////////////////
//inicializacion tipo complex a real.
cufftComplex *u_complex_d,*vf_complex_d,*v_complex_d ;
cudaMalloc((void**)&u_complex_d,sizeof(cufftComplex)*C*C);
cudaMalloc((void**)&vf_complex_d,sizeof(cufftComplex)*C*C);
cudaMalloc((void**)&v_complex_d,sizeof(cufftComplex)*C*C);
////////////////////////////////////////////////////////////////////////////////////////////////////
int size = N*sizeof(float);
int size_ne=C*C*sizeof(float);
//////////////////////////////////////////////////////////////////////////////////////////////////////
//reserva en memoria al host
rx_h = (float *)malloc(size);
ry_h = (float *)malloc(size);
vx_h = (float *)malloc(size);
vy_h = (float *)malloc(size);
ne_h = (float *)malloc(size_ne);
vr_h = (float *)malloc(size_ne);
//////////////////////////////////////////////////////////////////////////////////////////////////////
//reserva de memoria del dispositivo.
cudaMalloc((void **)&rx_d,size);
cudaMalloc((void **)&ry_d,size);
cudaMalloc((void **)&vx_d,size);
cudaMalloc((void **)&vy_d,size);
cudaMalloc((void **)&ne_d,size_ne);
cudaMalloc((void **)&vr_d,size_ne);
////////////////////////////////////////////////////////////////////////////////////////////////////
//valores aleatorios y tamaños de los vectores.
curandState *devStates;
cudaMalloc((void **) &devStates, N * sizeof(curandState));
float blockSize = 1024;
dim3 dimBlock (ceil(N/blockSize), 1, 1);
dim3 dimBlock2 (ceil((C*C)/blockSize), 1, 1);
dim3 dimGrid (blockSize, 1, 1);
distribucionParticulas<<<blockSize,dimBlock>>>(rx_d,ry_d,vx_d,vy_d,N,devStates,vb,L);
cudaDeviceSynchronize();
inicializacionDensidad<<<blockSize,dimBlock2>>>(ne_d,C);
cudaDeviceSynchronize();
calculoDensidad<<<blockSize,dimBlock>>>(rx_d,ry_d,ne_d,N,C,L);
cudaDeviceSynchronize();
cufftHandle plan;
cufftPlan2d(&plan, C, C, CUFFT_C2C);
real2complex<<<blockSize,dimBlock2>>>(ne_d,u_complex_d,C);
cudaDeviceSynchronize();
cufftExecC2C (plan, u_complex_d, vf_complex_d, CUFFT_FORWARD);
// dividir el resultado por C4
//prueba<<<dimGrid, dimBlock2>>> (vf_complex_d,vr_d,C);
v_complex_d[0].x=0.0;
v_complex_d[0].y=0.0;
solve_Poisson<<<dimGrid, dimBlock2>>> (vf_complex_d,v_complex_d,C,L);
cudaDeviceSynchronize();
cufftExecC2C (plan, v_complex_d, v_complex_d, CUFFT_INVERSE);
complex2real<<<dimGrid, dimBlock2>>> (v_complex_d,vr_d,C);
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//posición en x.
cudaMemcpy(rx_h, rx_d, size, cudaMemcpyDeviceToHost);
// posición en y.
cudaMemcpy(ry_h, ry_d, size, cudaMemcpyDeviceToHost);
// velocidad en x.
cudaMemcpy(vx_h, vx_d, size, cudaMemcpyDeviceToHost);
//velocidad en y.
cudaMemcpy(vy_h, vy_d, size, cudaMemcpyDeviceToHost);
//inicializacion densidades
cudaMemcpy(ne_h, ne_d, size_ne, cudaMemcpyDeviceToHost);
//calculo poisson
cudaMemcpy (vr_h , vr_d, size_ne, cudaMemcpyDeviceToHost);
///////////////////Imprimir los resultados en archivos//////////////////////
ofstream init;
init.open("distribucionInicial.txt");
for (int i = 0; i < N; i++){
init<<rx_h[i]<<" "<<ry_h[i]<<" "<<vx_h[i]<<" "<<vy_h[i]<<endl;
}
init.close();
init.open("salida_densidad3.txt");
for (int i = 0; i < C*C; i++){
init<<ne_h[i]<<endl;
salida+=ne_h[i];
}
init.close();
cout<<salida<<" "<<endl;
init.open("entrada_poisson");
for (int i = 0; i < C; i++){
for (int j = 0; j < C; j++){
init<<ne_h[(C*i)+j]<<" ";
}
init<<endl;
}
init.close();
init.open("poisson");
for (int i = 0; i < C; i++){
for (int j = 0; j < C; j++){
init<< vr_h[(C*j)+i]<<" ";
}
init<<endl;
}
init.close();
////////////////////Liberar memoria//////////////////////////
free(rx_h);
free(ry_h);
free(vx_h);
free(vy_h);
free(ne_h);
free(vr_h);
cufftDestroy(plan);
cudaFree(rx_d);
cudaFree(ry_d);
cudaFree(vx_d);
cudaFree(vy_d);
cudaFree(ne_d);
cudaFree(vr_d);
cudaFree(u_complex_d);
cudaFree(vf_complex_d);
cudaFree(v_complex_d);
return (0);
}
|
12,374 | __device__ float safelog(float x) {
if(x < 4.6e-5f)
return -10.0f;
return logf(x);
}
__global__ void lumKernel(float *ptr, float *lum, int width, int height, float *logged)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x < width && y < height)
{
int idx = width * y + x;
ptr += 3 * idx;
lum += idx;
logged += idx;
*lum = 0.2125f * ptr[0] + 0.7154f * ptr[1] + 0.0721f * ptr[2];
*logged = safelog(*lum);
}
}
extern "C"
void makeLum(float *ptr, float *lum, int width, int height, float *logged)
{
int lum_memory = width * height * sizeof(*ptr);
int image_memory = lum_memory * 3;
float *gpuPtr = NULL;
float *lumPtr = NULL;
float *logLumPtr = NULL;
cudaMalloc((void**) &gpuPtr, image_memory);
cudaMalloc((void**) &lumPtr, lum_memory);
cudaMalloc((void**) &logLumPtr, lum_memory);
cudaMemcpy(gpuPtr, ptr, image_memory, cudaMemcpyHostToDevice);
dim3 threads(16, 16);
dim3 blocks((width + threads.x - 1) / threads.x, (height + threads.y - 1) / threads.y);
lumKernel<<<blocks, threads>>>(gpuPtr, lumPtr, width, height, logLumPtr);
cudaMemcpy(lum, lumPtr, lum_memory, cudaMemcpyDeviceToHost);
if(logged)
cudaMemcpy(logged, logLumPtr, lum_memory, cudaMemcpyDeviceToHost);
cudaFree(gpuPtr);
cudaFree(lumPtr);
cudaFree(logLumPtr);
}
|
12,375 | #include <cuda.h>
#include <cuda_runtime_api.h>
__global__
void brute_merge_results(int* ar0, int* ar1, int* ar2, int* ar3, int array_len) {
int threadNum = blockIdx.x * blockDim.x + threadIdx.x;
if(threadNum < array_len){
int val = 0;
if(ar0[threadNum] !=0 && ar1[threadNum] !=0 && ar2[threadNum] !=0 && ar3[threadNum] !=0){
val = 1;
}
ar0[threadNum] = val;
}
} |
12,376 | // Compile this code with command: nvcc query.cu -o query
#include <stdio.h>
/* Print info about connected Nvidia GPUs */
void query_GPUs() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Major revision number: %d\n", prop.major);
printf(" Minor revision number: %d\n", prop.minor);
printf(" Total shared memory per block (Bytes): %u\n", prop.sharedMemPerBlock);
printf(" Total registers per block: %d\n", prop.regsPerBlock);
printf(" Warp size: %d\n", prop.warpSize);
printf(" Maximum threads per block: %d\n", prop.maxThreadsPerBlock);
printf(" Clock rate (KHz): %d\n", prop.clockRate);
printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Total VRAM (Bytes): %u\n", prop.totalGlobalMem);
printf(" Total constant memory (Bytes): %u\n", prop.totalConstMem);
printf(" Number of SMs: %d\n", prop.multiProcessorCount);
printf(" Peak Memory Bandwidth (GB/s): %f\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" Concurrent copy and execution: %s\n", (prop.deviceOverlap ? "Yes" : "No"));
printf(" Concurrent kernels: %s\n", (prop.concurrentKernels ? "Yes" : "No"));
printf(" Kernel execution timeout: %s\n", (prop.kernelExecTimeoutEnabled ? "Yes" : "No"));
}
}
int main(int argc, char ** argv) {
query_GPUs();
return 0;
}
|
12,377 | #include <stdio.h>
void helloCPU()
{
printf("Hello from the CPU.\n");
}
/* CUDA function : Device Code - __global__ keyword */
__global__ void helloGPU()
{
printf("Hello also from the GPU.\n");
}
int main()
{
helloCPU();
/* Call the GPU function */
helloGPU<<<1, 1>>>();
helloCPU();
cudaDeviceSynchronize();
}
/* nvcc -arch=sm_70 -o hello-gpu 01-hello/01-hello-gpu.cu -run */
|
12,378 | /*
Transformer function helper function.
Written by tomztyang,
2021/08/23
*/
#include <math.h>
#include <stdio.h>
#define THREADS_PER_BLOCK 256
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
// #define DEBUG
template <unsigned int d>
__global__ void attention_weight_computation_forward_v2(
int b, int total_query_num, int local_size,
int total_key_num, int nhead, int hdim,
const int *query_batch_cnt, const int *key_batch_cnt, const int* index_pair_batch,
const int *index_pair,
const float *query_features, const float* key_features,
float *output) {
// dim3 blocks(total_query_num, nhead); dim3 threads(local_size);
// params query_batch_cnt: [b]
// params key_batch_cnt: [b]
// params index_pair_batch: [total_query_num]
// params index_pair: [total_query_num, local_size]
// params query_features: [total_query_num, nhead, hdim]
// params key_features: [total_key_num, nhead, hdim]
// params output: [total_query_num, local_size, nhead]
int query_idx = blockIdx.x;
int head_idx = blockIdx.y;
int local_key_idx = threadIdx.x;
int index = query_idx * local_size + local_key_idx;
if (query_idx >= total_query_num ||
head_idx >= nhead ||
local_key_idx >= local_size) return;
// build shared query features.
__shared__ float shared_query_features[d];
for (int i = local_key_idx; i < hdim; i += blockDim.x){
shared_query_features[i] = query_features[
query_idx * nhead * hdim + head_idx * hdim + i];
}
__syncthreads();
if (index_pair[index] == -1){
// Ignore index.
return;
}
// get real key_idx.
int batch_idx = index_pair_batch[query_idx];
int key_start_idx = 0;
for (int i = 0; i < batch_idx; i++){
key_start_idx += key_batch_cnt[i];
}
key_start_idx += index_pair[index];
// get key features.
key_features += key_start_idx * nhead * hdim + head_idx * hdim;
output += index * nhead + head_idx;
float attn_weight = 0;
for (int i = 0; i < hdim; i++){
attn_weight += key_features[i] * shared_query_features[i];
}
output[0] = attn_weight;
}
void attention_weight_computation_launcher_v2(
int b, int total_query_num, int local_size,
int total_key_num, int nhead, int hdim,
const int *query_batch_cnt, const int *key_batch_cnt, const int* index_pair_batch,
const int *index_pair,
const float *query_features, const float* key_features,
float *output){
// params query_batch_cnt: [b]
// params key_batch_cnt: [b]
// params index_pair_batch: [total_query_num]
// params index_pair: [total_query_num, local_size]
// params query_features: [total_query_num, nhead, hdim]
// params key_features: [total_key_num, nhead, hdim]
// params output: [total_query_num, local_size, nhead]
if (hdim > 100){
throw "hdim should be <= 100.";
}
dim3 blocks(total_query_num, nhead);
dim3 threads(local_size);
switch(hdim){ // switch hdim for utilizing different shared vectors.
case 16:
attention_weight_computation_forward_v2<16><<<blocks, threads>>>(
b, total_query_num, local_size, total_key_num, nhead, hdim,
query_batch_cnt, key_batch_cnt, index_pair_batch,
index_pair, query_features, key_features,
output);
break;
case 24:
attention_weight_computation_forward_v2<24><<<blocks, threads>>>(
b, total_query_num, local_size, total_key_num, nhead, hdim,
query_batch_cnt, key_batch_cnt, index_pair_batch,
index_pair, query_features, key_features,
output);
break;
case 32:
attention_weight_computation_forward_v2<32><<<blocks, threads>>>(
b, total_query_num, local_size, total_key_num, nhead, hdim,
query_batch_cnt, key_batch_cnt, index_pair_batch,
index_pair, query_features, key_features,
output);
break;
case 48:
attention_weight_computation_forward_v2<48><<<blocks, threads>>>(
b, total_query_num, local_size, total_key_num, nhead, hdim,
query_batch_cnt, key_batch_cnt, index_pair_batch,
index_pair, query_features, key_features,
output);
break;
default:
attention_weight_computation_forward_v2<100><<<blocks, threads>>>(
b, total_query_num, local_size, total_key_num, nhead, hdim,
query_batch_cnt, key_batch_cnt, index_pair_batch,
index_pair, query_features, key_features,
output);
break;
}
}
template <unsigned int d>
__global__ void attention_weight_computation_backward_v2(
int b, int total_query_num, int local_size,
int total_key_num, int nhead, int hdim,
const int *query_batch_cnt, const int *key_batch_cnt, const int* index_pair_batch,
const int *index_pair,
const float *query_features, const float* key_features,
float *grad_out, float * grad_query_features, float * grad_key_features) {
// dim3 blocks(total_query_num, nhead); dim3 threads(local_size);
// params query_batch_cnt: [b]
// params key_batch_cnt: [b]
// params index_pair_batch: [total_query_num]
// params index_pair: [total_query_num, local_size]
// params query_features: [total_query_num, nhead, hdim]
// params key_features: [total_key_num, nhead, hdim]
// params grad_out: [total_query_num, local_size, nhead]
// params grad_query_features: [total_query_num, nhead, hdim]
// params grad_key_features: [total_key_num, nhead, hdim]
int query_idx = blockIdx.x;
int head_idx = blockIdx.y;
int local_key_idx = threadIdx.x;
int index = query_idx * local_size + local_key_idx;
if (query_idx >= total_query_num ||
head_idx >= nhead ||
local_key_idx >= local_size) return;
// build shared query features.
__shared__ float shared_query_features[d];
__shared__ float shared_grad_query_features[d];
for (int i = local_key_idx; i < hdim; i += blockDim.x){
shared_query_features[i] = query_features[
query_idx * nhead * hdim + head_idx * hdim + i];
shared_grad_query_features[i] = 0;
}
__syncthreads();
if (index_pair[index] != -1){
int batch_idx = index_pair_batch[query_idx];
int key_start_idx = 0;
for (int i = 0; i < batch_idx; i++){
key_start_idx += key_batch_cnt[i];
}
key_start_idx += index_pair[index];
key_features += key_start_idx * nhead * hdim + head_idx * hdim;
grad_key_features += key_start_idx * nhead * hdim + head_idx * hdim;
float gradient = grad_out[index * nhead + head_idx];
for (int i = 0; i < hdim; i++){
atomicAdd(
shared_grad_query_features + i,
gradient * key_features[i]);
atomicAdd(
grad_key_features + i,
gradient * shared_query_features[i]);
}
}
__syncthreads();
grad_query_features += query_idx * nhead * hdim + head_idx * hdim;
for (int i = local_key_idx; i < hdim; i += blockDim.x){
grad_query_features[i] = shared_grad_query_features[i];
}
}
void attention_weight_computation_grad_launcher_v2(
int b, int total_query_num, int local_size,
int total_key_num, int nhead, int hdim,
const int *query_batch_cnt, const int *key_batch_cnt, const int* index_pair_batch,
const int *index_pair,
const float *query_features, const float* key_features,
float *grad_out, float* grad_query_features, float* grad_key_features){
// params query_batch_cnt: [b]
// params key_batch_cnt: [b]
// params index_pair_batch: [total_query_num]
// params index_pair: [total_query_num, local_size]
// params query_features: [total_query_num, nhead, hdim]
// params key_features: [total_key_num, nhead, hdim]
// params grad_out: [total_query_num, local_size, nhead]
// params grad_query_features: [total_query_num, nhead, hdim]
// params grad_key_features: [total_key_num, nhead, hdim]
if (hdim > 100){
throw "hdim should be <= 100.";
}
dim3 blocks(total_query_num, nhead);
dim3 threads(local_size);
switch(hdim){
case 16:
attention_weight_computation_backward_v2<16><<<blocks, threads>>>(
b, total_query_num, local_size, total_key_num, nhead, hdim,
query_batch_cnt, key_batch_cnt, index_pair_batch,
index_pair, query_features, key_features,
grad_out, grad_query_features, grad_key_features);
break;
case 24:
attention_weight_computation_backward_v2<24><<<blocks, threads>>>(
b, total_query_num, local_size, total_key_num, nhead, hdim,
query_batch_cnt, key_batch_cnt, index_pair_batch,
index_pair, query_features, key_features,
grad_out, grad_query_features, grad_key_features);
break;
case 32:
attention_weight_computation_backward_v2<32><<<blocks, threads>>>(
b, total_query_num, local_size, total_key_num, nhead, hdim,
query_batch_cnt, key_batch_cnt, index_pair_batch,
index_pair, query_features, key_features,
grad_out, grad_query_features, grad_key_features);
break;
case 48:
attention_weight_computation_backward_v2<48><<<blocks, threads>>>(
b, total_query_num, local_size, total_key_num, nhead, hdim,
query_batch_cnt, key_batch_cnt, index_pair_batch,
index_pair, query_features, key_features,
grad_out, grad_query_features, grad_key_features);
break;
default:
attention_weight_computation_backward_v2<100><<<blocks, threads>>>(
b, total_query_num, local_size, total_key_num, nhead, hdim,
query_batch_cnt, key_batch_cnt, index_pair_batch,
index_pair, query_features, key_features,
grad_out, grad_query_features, grad_key_features);
break;
}
}
|
12,379 | #include "includes.h"
__constant__ float *c_Kernel;
__global__ void convolutionRowsKernel_up_smp( float *d_Dst, float *d_Src, int imageW, int n_imageW, int imageH, int filter_Rad, int Halo_steps )
{
extern __shared__ float s_Data[];
//Offset to the left halo edge
const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - Halo_steps) * ROWS_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y;
const int baseX1 = blockIdx.x * ROWS_RESULT_STEPS * 2 * ROWS_BLOCKDIM_X + 2 * threadIdx.x;
if (baseY < imageH)
{
d_Src += baseY * imageW + baseX;
d_Dst += baseY * n_imageW + baseX1;
//Load left halo
//#pragma unroll
for (int i = 0; i < Halo_steps; ++i)
{
s_Data[(threadIdx.y*(ROWS_RESULT_STEPS + 2 * Halo_steps)*ROWS_BLOCKDIM_X) + threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX + i * ROWS_BLOCKDIM_X >= 0) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Load right halo and main data
//#pragma unroll
for (int i = Halo_steps; i < Halo_steps + ROWS_RESULT_STEPS + Halo_steps; ++i)
{
s_Data[(threadIdx.y*(ROWS_RESULT_STEPS + 2 * Halo_steps)*ROWS_BLOCKDIM_X) + threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX + i * ROWS_BLOCKDIM_X < imageW) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Compute and store results
__syncthreads();
//#pragma unroll
for (int i = Halo_steps; i < Halo_steps + ROWS_RESULT_STEPS; ++i)
{
int pos_x = (baseX1 + 2 * (i - Halo_steps) * ROWS_BLOCKDIM_X);
if (pos_x < n_imageW)
{
float sum_1 = 0.0f, sum_2 = 0.0f;
//#pragma unroll
for (int l = -(filter_Rad / 2); l <= filter_Rad / 2; ++l)
{
int t = 2 * l;
float temp = s_Data[(threadIdx.y*(ROWS_RESULT_STEPS + 2 * Halo_steps)*ROWS_BLOCKDIM_X) + threadIdx.x + i * ROWS_BLOCKDIM_X + l];
sum_1 += c_Kernel[filter_Rad + t] * temp *2.0f;
sum_2 += c_Kernel[filter_Rad + t - 1] * temp *2.0f;
}
sum_2 += c_Kernel[2 * filter_Rad] * 2.0f * s_Data[(threadIdx.y*(ROWS_RESULT_STEPS + 2 * Halo_steps)*ROWS_BLOCKDIM_X) + threadIdx.x + i * ROWS_BLOCKDIM_X + filter_Rad / 2 + 1];
d_Dst[2 * (i - Halo_steps)* ROWS_BLOCKDIM_X] = sum_1;
if (pos_x + 1 < n_imageW) d_Dst[2 * (i - Halo_steps) * ROWS_BLOCKDIM_X + 1] = sum_2;
}
}
}
} |
12,380 | #include "includes.h"
__global__ void kernelMultMat(double *d_a, double *d_b, double *d_c, int ROWS, int COL_A, int COL_B) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
double add;
if (row < ROWS && col < COL_B) {
add = 0;
for (int k = 0; k < COL_A; k++) {
add += d_a[row * COL_A + k] * d_b[k * COL_B + col];
}
d_c[row * COL_B + col] = add;
}
} |
12,381 | __global__ void
createFinalHiddenFeaturesKernel(const float *weights,
const float *movie_rating_probs, float* final_hidden_feature_probs,
int num_movies, int num_hidden_features) {
// weights[NUM_MOVIES][5][NUM_FEATURES]
// movie_rating_probs[NUM_MOVIES][5]
// final_hidden_feature_probs[NUM_FEATURES]
unsigned int hidden_id = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int movie_id = 0;
unsigned int rating = 0;
float dot_prod; // Temporary, local dot product variable
while (hidden_id < num_hidden_features) {
dot_prod = 0.00; // Initialize the dot product to 0
for (movie_id = 0; movie_id < num_movies; movie_id++) {
for (rating = 0; rating < 5; rating++) {
// Indexing: weights[movie_id][rating][feature_id]
// movie_id - [1, 17771]
// rating - [0, 4]
// hidden_id - [0, 99]
// Do the dot product
dot_prod += weights[movie_id*5*num_hidden_features
+ rating*num_hidden_features
+ hidden_id]
* final_hidden_feature_probs[hidden_id];
}
}
// Store the dot_product result
final_hidden_feature_probs[hidden_id] = dot_prod;
// Re-use this thread on another data point:
hidden_id += blockDim.x * gridDim.x;
}
}
|
12,382 | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace std;
#define DEBUG_LEVEL 0
#define ELEMENTS_PER_BUY_OPTION 2
#define STORE_ID_OFFSET 0
#define PRICE_OFFSET 1
#define NUM_PRODUCTS 30000
#define NUM_BUY_OPTIONS 1024 // Debe ser igual al número de threads.
#define NUM_THREADS 1024 // El número mínimo de threads es 32 (por el tamaño de warp) y el maximo 1024.
void initAllProductsBuyOptions(unsigned int *all_products_buy_options);
void printAllProductsAllBuyOptions(unsigned int *all_products_buy_options);
void getBestBuyOptions(unsigned int *all_products_buy_options, unsigned int *best_buy_options);
void printBestBuyOptions(unsigned int *best_buy_options);
bool areResultsValid(unsigned int *all_products_buy_options, unsigned int *best_buy_options);
__global__ void KernelMarketplace(unsigned int *total_buy_options, unsigned int *best_buy_options)
{
__shared__ unsigned int tmp_best_buy_options[NUM_BUY_OPTIONS * ELEMENTS_PER_BUY_OPTION];
unsigned int thread_id = threadIdx.x;
unsigned int shared_thread_buy_option = thread_id * ELEMENTS_PER_BUY_OPTION;
unsigned int global_thread_buy_option = ( blockIdx.x * blockDim.x + thread_id ) * ELEMENTS_PER_BUY_OPTION;
tmp_best_buy_options[shared_thread_buy_option + STORE_ID_OFFSET] = total_buy_options[global_thread_buy_option + STORE_ID_OFFSET];
tmp_best_buy_options[shared_thread_buy_option + PRICE_OFFSET] = total_buy_options[global_thread_buy_option + PRICE_OFFSET];
__syncthreads();
for (unsigned int stride = 2; stride <= blockDim.x; stride *= 2)
{
if (thread_id % stride == 0)
{
unsigned int next_buy_option_position = shared_thread_buy_option + stride;
if (tmp_best_buy_options[shared_thread_buy_option + PRICE_OFFSET] > tmp_best_buy_options[next_buy_option_position + PRICE_OFFSET])
{
tmp_best_buy_options[shared_thread_buy_option + STORE_ID_OFFSET] = tmp_best_buy_options[next_buy_option_position + STORE_ID_OFFSET];
tmp_best_buy_options[shared_thread_buy_option + PRICE_OFFSET] = tmp_best_buy_options[next_buy_option_position + PRICE_OFFSET];
}
}
__syncthreads();
}
if (thread_id == 0)
{
best_buy_options[blockIdx.x * ELEMENTS_PER_BUY_OPTION + STORE_ID_OFFSET] = tmp_best_buy_options[STORE_ID_OFFSET];
best_buy_options[blockIdx.x * ELEMENTS_PER_BUY_OPTION + PRICE_OFFSET] = tmp_best_buy_options[PRICE_OFFSET];
}
}
int main(int argc, char** argv)
{
// Buy options in host and device
unsigned int *host_all_products_buy_options = (unsigned int *) malloc( NUM_PRODUCTS * NUM_BUY_OPTIONS * ELEMENTS_PER_BUY_OPTION * sizeof(unsigned int) );
unsigned int *best_buy_options = (unsigned int *) malloc( NUM_PRODUCTS * ELEMENTS_PER_BUY_OPTION * sizeof(unsigned int) );
unsigned int *seq_best_buy_options = (unsigned int *) malloc( NUM_PRODUCTS * ELEMENTS_PER_BUY_OPTION * sizeof(unsigned int) );
unsigned int *device_all_products_buy_options;
unsigned int *device_best_buy_options;
// Metadata
unsigned int buy_option_size = ELEMENTS_PER_BUY_OPTION * sizeof(unsigned int);
unsigned int num_total_buy_options = NUM_PRODUCTS * NUM_BUY_OPTIONS;
unsigned int total_buy_options_size = num_total_buy_options * buy_option_size;
unsigned int best_buy_options_size = NUM_PRODUCTS * buy_option_size;
// Benchmarking
float elapsed_time;
cudaEvent_t start;
cudaEvent_t stop;
float seq_elapsed_time;
cudaEvent_t seq_start;
cudaEvent_t seq_stop;
initAllProductsBuyOptions(host_all_products_buy_options);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&seq_start);
cudaEventCreate(&seq_stop);
// Obtener Memoria en el device
cudaMalloc( (unsigned int**) &device_all_products_buy_options, total_buy_options_size );
cudaMalloc( (unsigned int**) &device_best_buy_options, best_buy_options_size );
// Copiar datos desde el host en el device
cudaMemcpy(device_all_products_buy_options, host_all_products_buy_options, total_buy_options_size, cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
// Ejecutar el kernel (número de bloques = número de productos). Un bloque por cada producto y todos los threads por cada bloque.
KernelMarketplace<<<NUM_PRODUCTS, NUM_THREADS>>>(device_all_products_buy_options, device_best_buy_options);
// Obtener el resultado parcial desde el host
cudaMemcpy(best_buy_options, device_best_buy_options, best_buy_options_size, cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Liberar Memoria del device
cudaFree(device_all_products_buy_options);
cudaFree(device_best_buy_options);
cudaEventElapsedTime(&elapsed_time, start, stop);
printf("\nKERNEL MARKETPLACE\n");
printf("Number buy options per product: %d\n", NUM_BUY_OPTIONS);
printf("Number of blocks (products): %d\n", NUM_PRODUCTS);
printf("Number of Threads: %d\n", NUM_THREADS);
printf("Vector Size: %d\n", num_total_buy_options);
printf("Total time %4.6f milseg\n", elapsed_time);
printf("Bandwidth %4.3f GB/s\n", (num_total_buy_options * ELEMENTS_PER_BUY_OPTION * sizeof(unsigned int)) / (1000000 * elapsed_time));
cudaEventDestroy(start);
cudaEventDestroy(stop);
if (DEBUG_LEVEL >= 2)
{
printAllProductsAllBuyOptions(host_all_products_buy_options);
}
if (DEBUG_LEVEL >= 1)
{
printBestBuyOptions(best_buy_options);
}
if ( areResultsValid( host_all_products_buy_options, best_buy_options ) )
{
printf ("TEST PASS\n");
}
else
{
printf ("TEST FAIL\n");
}
cudaEventRecord(seq_start, 0);
getBestBuyOptions(host_all_products_buy_options, seq_best_buy_options);
cudaEventRecord(seq_stop, 0);
cudaEventSynchronize(seq_stop);
cudaEventElapsedTime(&seq_elapsed_time, seq_start, seq_stop);
printf("\nSEQUENCIAL\n");
printf("Elapsed time %4.6f milseg\n", seq_elapsed_time);
cudaEventDestroy(seq_start);
cudaEventDestroy(seq_stop);
}
void initAllProductsBuyOptions(unsigned int *all_products_buy_options)
{
for(unsigned int product_iteration = 0; product_iteration < NUM_PRODUCTS; ++product_iteration)
{
unsigned int current_product_position = product_iteration * NUM_BUY_OPTIONS * ELEMENTS_PER_BUY_OPTION;
for(unsigned int buy_option_iteration = 0; buy_option_iteration < NUM_BUY_OPTIONS * ELEMENTS_PER_BUY_OPTION; buy_option_iteration += ELEMENTS_PER_BUY_OPTION)
{
unsigned int current_product_store_position = current_product_position + buy_option_iteration + STORE_ID_OFFSET;
unsigned int current_product_price_position = current_product_position + buy_option_iteration + PRICE_OFFSET;
// Set the current product buy option to the store with the same id as the current iteration in order to do not duplicate buy options.
all_products_buy_options[current_product_store_position] = buy_option_iteration/2;
// Set the price with a random value between 1 and 1000.
all_products_buy_options[current_product_price_position] = rand() % 99999 + 1;
}
}
}
bool areResultsValid(unsigned int *all_products_buy_options, unsigned int *best_buy_options_by_device)
{
unsigned int *best_buy_options_by_host = (unsigned int *) malloc( NUM_PRODUCTS * ELEMENTS_PER_BUY_OPTION * sizeof(unsigned int) );
getBestBuyOptions(all_products_buy_options, best_buy_options_by_host);
if (DEBUG_LEVEL >= 1)
{
printBestBuyOptions(best_buy_options_by_host);
}
for (unsigned int product_iteration = 0; product_iteration < NUM_PRODUCTS * ELEMENTS_PER_BUY_OPTION; product_iteration += ELEMENTS_PER_BUY_OPTION)
{
unsigned int current_product_store_position = product_iteration + STORE_ID_OFFSET;
unsigned int current_product_price_position = product_iteration + PRICE_OFFSET;
unsigned int best_store_by_device = best_buy_options_by_device[current_product_store_position];
unsigned int best_price_by_device = best_buy_options_by_device[current_product_price_position];
unsigned int best_store_by_host = best_buy_options_by_host[current_product_store_position];
unsigned int best_price_by_host = best_buy_options_by_host[current_product_price_position];
if (best_price_by_device != best_price_by_host)
{
if (DEBUG_LEVEL >= 1)
{
printf("FAILED IN product: %d\n", product_iteration);
printf("\tbest_store_by_device: %d\n", best_store_by_device);
printf("\tbest_store_by_host: %d\n", best_store_by_host);
printf("\tbest_price_by_device: %d\n", best_price_by_device);
printf("\tbest_price_by_host: %d\n", best_price_by_host);
}
return false;
}
}
return true;
}
void printAllProductsAllBuyOptions(unsigned int *all_products_buy_options)
{
cout << "All products buy options:" << endl;
for (unsigned int i = 0; i < NUM_PRODUCTS; ++i){
cout << endl << "\tproduct_id: " << i << endl;
for (unsigned int j = 0; j < NUM_BUY_OPTIONS*2; j += 2){
cout << "Buy option:" << endl;
cout << "\tstore_id: " << all_products_buy_options[i*NUM_BUY_OPTIONS*2+j] << endl;
cout << "\tprice: " << all_products_buy_options[i*NUM_BUY_OPTIONS*2+j+1] << endl;
}
}
}
void getBestBuyOptions(unsigned int *all_products_buy_options, unsigned int *best_buy_options)
{
for(unsigned int product_iteration = 0; product_iteration < NUM_PRODUCTS; ++product_iteration)
{
unsigned int current_product_position = product_iteration * NUM_BUY_OPTIONS * ELEMENTS_PER_BUY_OPTION;
unsigned int current_product_store_position = current_product_position + STORE_ID_OFFSET;
unsigned int current_product_price_position = current_product_position + PRICE_OFFSET;
unsigned int best_store = all_products_buy_options[current_product_store_position];
unsigned int best_price = all_products_buy_options[current_product_price_position];
for(unsigned int product_to_compare = ELEMENTS_PER_BUY_OPTION; product_to_compare < NUM_BUY_OPTIONS * ELEMENTS_PER_BUY_OPTION; product_to_compare += ELEMENTS_PER_BUY_OPTION)
{
if (all_products_buy_options[current_product_position + product_to_compare + PRICE_OFFSET] < best_price)
{
best_store = all_products_buy_options[product_iteration * NUM_BUY_OPTIONS * ELEMENTS_PER_BUY_OPTION + product_to_compare + STORE_ID_OFFSET];
best_price = all_products_buy_options[product_iteration * NUM_BUY_OPTIONS * ELEMENTS_PER_BUY_OPTION + product_to_compare + PRICE_OFFSET];
}
}
best_buy_options[product_iteration * ELEMENTS_PER_BUY_OPTION + STORE_ID_OFFSET] = best_store;
best_buy_options[product_iteration * ELEMENTS_PER_BUY_OPTION + PRICE_OFFSET] = best_price;
}
}
void printBestBuyOptions(unsigned int *best_buy_options)
{
cout << endl <<"Best products buy options:" << endl;
for (unsigned int i = 0; i < NUM_PRODUCTS * ELEMENTS_PER_BUY_OPTION; i += ELEMENTS_PER_BUY_OPTION)
{
cout << "Best buy option for product_id: " << i / ELEMENTS_PER_BUY_OPTION << endl;
cout << "\tstore_id: " << best_buy_options[i + STORE_ID_OFFSET] << endl;
cout << "\tprice: " << best_buy_options[i + PRICE_OFFSET] << endl;
}
}
|
12,383 | /**********************************************************************
* DESCRIPTION:
* Serial Concurrent Wave Equation - C Version
* This program implements the concurrent wave equation
*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
#define PI 3.14159265
#define BLOCKSIZE 128
void check_param(void);
void init_line(void);
void update (void);
void printfinal (void);
int nsteps, /* number of time steps */
tpoints, /* total points along string */
rcode; /* generic return code */
float values[MAXPOINTS+2], /* values at time t */
oldval[MAXPOINTS+2], /* values at time (t-dt) */
newval[MAXPOINTS+2]; /* values at time (t+dt) */
/**********************************************************************
* Checks input values from parameters
*********************************************************************/
void check_param(void)
{
char tchar[20];
/* check number of points, number of iterations */
while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) {
printf("Enter number of points along vibrating string [%d-%d]: "
,MINPOINTS, MAXPOINTS);
scanf("%s", tchar);
tpoints = atoi(tchar);
if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS))
printf("Invalid. Please enter value between %d and %d\n",
MINPOINTS, MAXPOINTS);
}
while ((nsteps < 1) || (nsteps > MAXSTEPS)) {
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if ((nsteps < 1) || (nsteps > MAXSTEPS))
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
/**********************************************************************
* Update all values along line a specified number of times
*********************************************************************/
__global__ void update (float *values_d, int tpoints, int nsteps)
{
int i;
int k = 1+threadIdx.x + blockIdx.x * blockDim.x;
if(k <= tpoints){
float values_t;
float newval_t;
float oldval_t;
float x, fac , tmp ;
fac = 2.0 * PI;
tmp = tpoints - 1;
x = (float)(k-1)/tmp ;
values_t = sin(fac * x);
oldval_t = values_t;
for (i = 1; i <= nsteps ; i++) {
if ((k == 1) || (k == tpoints ))
newval_t = 0.0f;
else
newval_t = (2.0f * values_t) - oldval_t + ( 0.09f * ( -2.0f * values_t ));
oldval_t = values_t;
values_t = newval_t;
}
values_d[k] = values_t;
}
}
/**********************************************************************
* Print final results
*********************************************************************/
void printfinal()
{
int i;
for (i = 1; i <= tpoints; i++) {
printf("%6.4f ", values[i]);
if (i%10 == 0)
printf("\n");
}
}
/**********************************************************************
* Main program
*********************************************************************/
int main(int argc, char *argv[])
{
sscanf(argv[1],"%d",&tpoints);
sscanf(argv[2],"%d",&nsteps);
check_param();
printf("Initializing points on the line...\n");
printf("Updating all points for all time steps...\n");
float *values_d;
int size = (1+tpoints)*sizeof(float);
cudaMalloc((void**)&values_d,size);
update<<<(tpoints+BLOCKSIZE-1)/BLOCKSIZE, BLOCKSIZE>>>(values_d, tpoints, nsteps);
cudaMemcpy(values, values_d, size, cudaMemcpyDeviceToHost);
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
cudaFree(values_d);
return 0;
}
|
12,384 | // Gaurav Sheni
// CSC 391
// September 16, 2015
// Project 1
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
//declaring kernel call
__global__ void decrement(char* line, char* answer);
__global__ void decrement(char *current, char* answer){
int i = threadIdx.x;
answer[i] = (char)( (int) current[i] - 1 );
}
int main ( int argc, char *argv[] )
{
//check to make sure that there is only two command line arguments
if (argc != 2){
printf ("Incorrect number of command line arugments.\r\n");
//exit with 1 because exit(1) indicates that there were an error
exit(1);
}
//open the file to be read, give the first, argument
//"r" is for read
FILE *file = fopen(argv[1], "r");
//line keeps the input data, inialize to null.
char* line = NULL;
//for determining how many characters are in the input data
int lengthOfFile;
//make sure the file exists and is valid
if ( file != NULL ){
//first go to the end of the file and find out how many characters were counted
fseek(file, 0, SEEK_END);
//store number of charactes found.
lengthOfFile = ftell(file);
//allocate a character arry based on how many characters were found
//we need to + 1 because we need room for the null terminator character
line = (char *) malloc(lengthOfFile* sizeof(char) + 1 ) ;
//go back to the beginning of the input file
fseek (file , 0 , SEEK_SET);
//
char current_line[lengthOfFile+1];
while (fgets(current_line, sizeof(current_line), file)) {
line = current_line;
}
fclose ( file );
}
//error message if file does not exist.
else{
printf ("File could not be opened.\r\n");
printf ("File may not exist or the command line arugment is incorrectly named.\r\n");
//exit with 1 because exit(1) indicates that there were an error
exit(1);
}
// printf ("Character Count = %d.\r\n", lengthOfFile);
char answer[lengthOfFile];
char *dev_line;
char *dev_answer;
int size = lengthOfFile * sizeof(char);
cudaMalloc((void**)&dev_line, size);
cudaMalloc((void**)&dev_answer, size);
cudaMemcpy(dev_line, line, size, cudaMemcpyHostToDevice);
decrement<<< 1, lengthOfFile >>>(dev_line, dev_answer);
cudaThreadSynchronize();
cudaMemcpy(answer, dev_answer, size, cudaMemcpyDeviceToHost);
cudaFree(dev_line);
cudaFree(dev_answer);
printf("Decoded --- Message is: \r\n%s",answer);
exit (0);
}
|
12,385 | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#define NXPROB 20480 /* x dimension of problem grid */
#define NYPROB 32768 /* y dimension of problem grid */
#define STEPS 500 /* number of time steps */
#define MAXWORKER 8 /* maximum number of worker tasks */
#define MINWORKER 3 /* minimum number of worker tasks */
#define BEGIN 1 /* message tag */
#define LTAG 2 /* message tag */
#define RTAG 3 /* message tag */
#define NONE 0 /* indicates no neighbor */
#define DONE 4 /* message tag */
#define MASTER 0 /* taskid of first process */
#define BLOCK_H 10
#define BLOCK_V 8
#define THREADS 32
struct Parms {
float cx;
float cy;
} parms = {0.1, 0.1};
/**************************************************************************
* subroutine update
****************************************************************************/
void update(int start, int end, int ny, float *u1, float *u2)
{
int ix, iy;
for (ix = start; ix <= end; ix++)
for (iy = 1; iy <= ny-2; iy++)
*(u2+ix*ny+iy) = *(u1+ix*ny+iy) +
parms.cx * (*(u1+(ix+1)*ny+iy) +
*(u1+(ix-1)*ny+iy) -
2.0 * *(u1+ix*ny+iy)) +
parms.cy * (*(u1+ix*ny+iy+1) +
*(u1+ix*ny+iy-1) -
2.0 * *(u1+ix*ny+iy));
}
/*****************************************************************************
* subroutine inidat
*****************************************************************************/
void inidat(int nx, int ny, float *u) {
int ix, iy;
for (ix = 0; ix <= nx-1; ix++)
for (iy = 0; iy <= ny-1; iy++)
{*(u+ix*ny+iy) = (float)(ix * (nx - ix - 1) * iy * (ny - iy - 1)%1000);
//if (*(u+ix*ny+iy) > 10000.0)
//printf("%f\n", *(u+ix*ny+iy));
}
}
/**************************************************************************
* subroutine prtdat
**************************************************************************/
void prtdat(int nx, int ny, float *u1, const char *fnam) {
int ix, iy;
FILE *fp;
fp = fopen(fnam, "w");
for (iy = ny-1; iy >= 0; iy--) {
for (ix = 0; ix <= nx-1; ix++) {
fprintf(fp, "%6.1f", *(u1+ix*ny+iy));
if (ix != nx-1)
fprintf(fp, " ");
else
fprintf(fp, "\n");
}
}
fclose(fp);
}
__global__ void cuda_update(float *u0, float *u1, struct Parms parms)
{
int ix, iy;
ix = blockIdx.x * blockDim.x + threadIdx.x + 1;
iy = blockIdx.y * blockDim.y + threadIdx.y + 1;
if (ix > 0 && iy > 0)
{
if (ix + iy < NXPROB + NYPROB - 2)
{
*(u1+ix*NYPROB+iy) = *(u0+ix*NYPROB+iy) +
parms.cx * (*(u0+(ix+1)*NYPROB+iy) +
*(u0+(ix-1)*NYPROB+iy) -
2.0 * *(u0+ix*NYPROB+iy)) +
parms.cy * (*(u0+ix*NYPROB+iy+1) +
*(u0+ix*NYPROB+iy-1) -
2.0 * *(u0+ix*NYPROB+iy));
}
}
}
__global__ void MyKernel(int *a, int *b, int *c, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N)
{
c[idx] = a[idx] + b[idx];
}
}
int main (int argc, char *argv[])
{
int i;
float *u;
float *cuda_u0, *cuda_u1;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float ms = 0.0f;
int block_size;
int min_grid, grid;
//http://devblogs.nvidia.com/parallelforall/cuda-pro-tip-occupancy-api-simplifies-launch-configuration/
cudaOccupancyMaxPotentialBlockSize(&min_grid, &block_size, MyKernel, 0, NXPROB*NYPROB);
grid = (NXPROB*NYPROB + block_size - 1) / block_size;
dim3 dimBlocks(BLOCK_H, BLOCK_V);
dim3 dimThreads((NXPROB / BLOCK_H) + ((NXPROB % BLOCK_H) != 0), (NYPROB / BLOCK_V) + ((NYPROB % BLOCK_V) != 0));
//malloc host
u = (float*)malloc(NXPROB*NYPROB*sizeof(float));
//malloc device
cudaMalloc((void**)&cuda_u0, (NXPROB*NYPROB*sizeof(float)));
cudaMalloc((void**)&cuda_u1, (NXPROB*NYPROB*sizeof(float)));
printf("Grid size: X= %d Y= %d Time steps= %d\n",NXPROB,NYPROB,STEPS);
inidat(NXPROB, NYPROB, u); //initialize
//prtdat(NXPROB, NYPROB, u, "initial.dat"); //print
//copy from host to device
cudaMemcpy(cuda_u0, u, (NXPROB*NYPROB*sizeof(float)), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_u1, u, (NXPROB*NYPROB*sizeof(float)), cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
for (i = 0; i < STEPS; i+=2)
{
cuda_update<<<grid, block_size>>>(cuda_u0, cuda_u1, parms);
cuda_update<<<grid, block_size>>>(cuda_u1, cuda_u0, parms);
}
cudaThreadSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
//copy from device to host
cudaMemcpy(u, cuda_u1, (NXPROB*NYPROB*sizeof(float)), cudaMemcpyDeviceToHost);
//prtdat(NXPROB, NYPROB, u, "final.dat"); //print
printf("Time: %f ms\n", ms);
cudaFree(cuda_u0);
cudaFree(cuda_u1);
free(u);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
12,386 | #include "includes.h"
__global__ void update_dists_kernel(const int * beg_pos, const int * adj_list, const int* weights, bool * mask, int* dists, int* update_dists, const int num_vtx) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_vtx) {
if (dists[tid] > update_dists[tid]) {
dists[tid] = update_dists[tid];
mask[tid] = true;
}
update_dists[tid] = dists[tid];
}
} |
12,387 | /* Looooong compile time.
*
* Authors: Luc Grosheintz <forbugrep@zoho.com>
* Date: 2015-03-17
*/
#include "call_back.cuh"
__global__
void foo(double x) {
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
S::evil(x);
printf("%e\n", x);
}
int main(int argc, char *argv[]){
foo<<<1, 1>>>(0.2);
return 0;
}
|
12,388 | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
// Code Generated with GCDObsidian
__global__ void two(int *input0,int *result0){
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
extern __shared__ unsigned char sbase[];
((int *)sbase)[tid] = input0[((bid*32)+((tid&4294967280)|(15-(tid&15))))];
__syncthreads();
result0[((bid*32)+tid)] = ((int *)sbase)[tid];
}
int main(int argc, char **argv){
int values[32];
int result[32];
int * dvalues;
int * dresult;
//generate input data
for (int i = 0; i < 32; ++i) {
values[i] = i;
}
cudaMalloc((void**)&dvalues, sizeof(int) * 32 );
cudaMalloc((void**)&dresult, sizeof(int) * 32 );
cudaMemcpy(dvalues, values, sizeof(int) * 32, cudaMemcpyHostToDevice);
two<<<1, 32,32* sizeof(int)>>>((int*)dvalues,(int*)dresult);
cudaMemcpy(result, dresult, sizeof(int) * 32 , cudaMemcpyDeviceToHost);
cudaFree(dvalues);
cudaFree(dresult);
// show results
for (int i = 0; i < 32; ++i) {
printf("%d ", ((int*)result)[i]);
}
}
|
12,389 | #include <stdio.h>
//mdp:
// Print device properties
void printDevProp(cudaDeviceProp devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %zu\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %zu\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %zu\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %zu\n", devProp.totalConstMem);
printf("Texture alignment: %zu\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
int main()
{
// Number of CUDA devices
int devCount;
cudaGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
for (int i = 0; i < devCount; ++i)
{
// Get device properties
printf("\nCUDA Device #%d\n", i);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
printDevProp(devProp);
}
printf("\nPress any key to exit...");
char c;
scanf("%c", &c);
return 0;
}
/*
Quelques questions à se poser :
+ Quelle est la compute capability de la carte graphique ?
+ Quelles sont les dimensions maximales d'un bloc (en X, en Y, en Z)?
+ Supposons que nous lançons une grille de blocs unidimensionelle (seulement sur X).
Si la dimension maximale de la grille est de 65525 sur notre matériel et celle d'un
bloc de 1024, quel est le nombre maxmal de threads que l'on peut lancer sur notre GPU?
-1024*65525
+ Sous quelles conditions un programmeur pourrait choisir de ne pas utiliser ce nombre
maximum de threads?
- On n'en a pas forcément besoin d'autant
- Si ça ne tombe pas juste
+ D'après vous, qu'est-ce qui peut empêcher un programme de lancer le nombre maximal
de threads sur un GPU?
+ Le parallélisme dynamique est-il supporté sur votre carte graphique?
- yes (Titan et Titan X, 970M également)
*/
|
12,390 | extern "C"{
//腐蚀
__global__ void erode(float *dataIn, float *dataOut, int erodeElementWidth,int erodeElementHeight, int imgWidth, int imgHeight)
{
//Grid中x方向上的索引
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
//Grid中y方向上的索引
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
int elementWidth = erodeElementWidth;
int elementHeight = erodeElementHeight;
int halfEW = elementWidth / 2;
int halfEH = elementHeight / 2;
//初始化输出图
dataOut[yIndex * imgWidth + xIndex] = dataIn[yIndex * imgWidth + xIndex];
//防止越界
if (xIndex > halfEW && xIndex < imgWidth - halfEW && yIndex > halfEH && yIndex < imgHeight - halfEH)
{
for (int i = -halfEH; i < halfEH + 1; i++)
{
for (int j = -halfEW; j < halfEW + 1; j++)
{
if (dataIn[(i + yIndex) * imgWidth + xIndex + j] >= dataOut[yIndex * imgWidth + xIndex])
{
dataOut[yIndex * imgWidth + xIndex] = dataIn[(i + yIndex) * imgWidth + xIndex + j];
}
}
}
}
}
//腐蚀
__global__ void dilate(float *dataIn, float *dataOut, int erodeElementWidth,int erodeElementHeight, int imgWidth, int imgHeight)
{
//Grid中x方向上的索引
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
//Grid中y方向上的索引
int yIndex = threadIdx.y + blockIdx.y * blockDim.y;
int elementWidth = erodeElementWidth;
int elementHeight = erodeElementHeight;
int halfEW = elementWidth / 2;
int halfEH = elementHeight / 2;
//初始化输出图
dataOut[yIndex * imgWidth + xIndex] = dataIn[yIndex * imgWidth + xIndex];;
//防止越界
if (xIndex > halfEW && xIndex < imgWidth - halfEW && yIndex > halfEH && yIndex < imgHeight - halfEH)
{
for (int i = -halfEH; i < halfEH + 1; i++)
{
for (int j = -halfEW; j < halfEW + 1; j++)
{
if (dataIn[(i + yIndex) * imgWidth + xIndex + j] < dataOut[yIndex * imgWidth + xIndex])
{
dataOut[yIndex * imgWidth + xIndex] = dataIn[(i + yIndex) * imgWidth + xIndex + j];
}
}
}
}
}
} |
12,391 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void test(float* output)
{
__shared__ float temp, tmp1;
float partial, cc;
int tx = threadIdx.x;
if(tx ==0){
temp = 0.0;
tmp1 =0.0;
}
partial = tx+1;
//__syncthreads();
//for(int i=0; i<2; i++){
// temp[blockIdx.x] += float(i);
//}
atomicAdd(&temp,partial);
//atomicAdd(&(output[1]),partial);
cc = partial - temp;
atomicAdd(&tmp1,cc);
if(tx ==0){
output[blockIdx.x] = tmp1;
}
//output[blockIdx.x] = temp[blockIdx.x];
//output[tx] = 0.0;
//output[tx] = 0.0;
}
void launch(float* output)
{
test<<<2,10>>>(output);
}
int main()
{
float *out = (float*)malloc(2*sizeof(float));
float *output;
cudaMalloc((void**)&output, 2*sizeof(float));
launch(output);
cudaMemcpy(out,output,2*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(output);
for(int i=0; i<2; i++){
printf("the result is: %f\t", out[i]);
}
free(out);
return 0;
} |
12,392 | #include<stdio.h>
__global__ void add2(int *a)
{
*a = *a + 2;
}
int main( void )
{
int *data_h, *data_d;
cudaMalloc( (void**)&data_d, sizeof(int));
//data_d = (int *)malloc(sizeof(int));
data_h = (int *)malloc(sizeof(int));
*data_h = 5;
cudaMemcpy( data_d, data_h, sizeof(int), cudaMemcpyHostToDevice );
add2<<<1,1>>>(data_d);
cudaMemcpy(data_h, data_d, sizeof(int), cudaMemcpyDeviceToHost );
printf("data: %d\n", *data_h);
cudaFree(data_d);
free(data_h);
return 0;
}
//Res = 7
|
12,393 | #include <cstdio>
#include <cmath>
#include <vector>
#include <chrono>
#include <stdlib.h>
using namespace std;
__global__ void matrix(int N, float *A, float *B, float* C){
int i = blockIdx.x / N;
int j = blockIdx.x % N;
int k = threadIdx.x;
atomicAdd(C+N*i+j, A[N*i+k] * B[N*k+j]);
}
int main(int argc, char** argv) {
const int N = 256;
float *A;
float *B;
float *C;
cudaMallocManaged(&A, N*N*sizeof(float));
cudaMallocManaged(&B, N*N*sizeof(float));
cudaMallocManaged(&C, N*N*sizeof(float));
for (int i=0; i<N; i++) {
for (int j=0; j<N; j++) {
A[N*i+j] = drand48();
B[N*i+j] = drand48();
}
}
auto tic = chrono::steady_clock::now();
matrix<<<N*N,N>>>(N,A,B,C);
cudaDeviceSynchronize();
auto toc = chrono::steady_clock::now();
for (int i=0; i<N; i++)
for (int j=0; j<N; j++)
for (int k=0; k<N; k++)
C[N*i+j] -= A[N*i+k] * B[N*k+j];
double err = 0;
for (int i=0; i<N; i++)
for (int j=0; j<N; j++)
err += fabs(C[N*i+j]);
double time = chrono::duration<double>(toc-tic).count();
printf("N : %d\n",N);
printf("total: %lf s (%lf GFlops)\n",time,2.*N*N*N/time/1e9);
printf("error: %lf\n",err/N/N);
}
|
12,394 | #include "./test_utilities.cuh"
std::string get_env_var(std::string const &key) {
char *val = getenv(key.c_str());
if (val == nullptr) {
throw TestException(key + " is not defined.");
}
return std::string(val);
}
Probe probe_from_env() {
auto n_channels = std::stoi(get_env_var("TEST_NCHANNELS"));
auto n_active = std::stoi(get_env_var("TEST_NACTIVE"));
auto n_groups = std::stoi(get_env_var("TEST_NGROUPS"));
auto srate_hz = std::stod(get_env_var("TEST_SRATE_HZ"));
return make_probe(n_channels, n_active, n_groups, srate_hz);
} |
12,395 | // richu shaji abraham richursa
#include<iostream>
#include<stdio.h>
using namespace std;
__device__ int function(int value , int bit ,int bitset)
{
if(bitset == 1 )
{
if((value & bit) != 0)
{
return 1;
}
else
return 0;
}
else
{
if((value & bit) == 0)
{
return 1;
}
else
{
return 0;
}
}
}
__global__ void predicateDevice(int *d_array , int *d_predicateArrry , int d_numberOfElements,int bit,int bitset)
{
int index = threadIdx.x + blockIdx.x*blockDim.x;
if(index < d_numberOfElements)
{
d_predicateArrry[index] = function(d_array[index],bit,bitset);
}
}
__global__ void scatter(int *d_array , int *d_scanArray , int *d_predicateArrry,int * d_scatteredArray ,int d_numberOfElements,int offset)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < d_numberOfElements)
{
if(d_predicateArrry[index] == 1)
{
d_scatteredArray[d_scanArray[index] - 1 +offset ] = d_array[index];
}
}
}
__global__ void hillisSteeleScanDevice(int *d_array , int numberOfElements, int *d_tmpArray,int moveIndex)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
if(index > numberOfElements)
{
return;
}
d_tmpArray[index] = d_array[index];
if(index - moveIndex >=0)
{
d_tmpArray[index] = d_tmpArray[index] +d_array[index - moveIndex];
}
}
int* hillisSteeleScanHost(int *d_scanArray,int numberOfElements)
{
int *d_tmpArray;
int *d_tmpArray1;
cudaMalloc(&d_tmpArray1,sizeof(int)*numberOfElements);
cudaMalloc(&d_tmpArray,sizeof(int)*numberOfElements);
cudaMemcpy(d_tmpArray1,d_scanArray,sizeof(int)*numberOfElements,cudaMemcpyDeviceToDevice);
int j,k=0;
for(j=1;j<numberOfElements;j= j*2,k++)
{
if(k%2 == 0)
{
hillisSteeleScanDevice<<<1600,500>>>(d_tmpArray1,numberOfElements,d_tmpArray, j);
cudaDeviceSynchronize();
}
else
{
hillisSteeleScanDevice<<<1600,500>>>(d_tmpArray,numberOfElements,d_tmpArray1, j);
cudaDeviceSynchronize();
}
}
cudaDeviceSynchronize();
if(k%2 == 0)
{
return d_tmpArray1;
}
else
{
return d_tmpArray;
}
}
__global__ void print(int *d_predicateArrry,int numberOfElements)
{
for(int i=0;i<numberOfElements;i++)
{
printf("index = %d value = %d\n",i,d_predicateArrry[i]);
}
}
int *compact(int *d_array,int numberOfElements,int bit)
{
int offset;
int *d_predicateArrry;
cudaMalloc((void**)&d_predicateArrry,sizeof(int)*numberOfElements);
predicateDevice<<<1600,500>>>(d_array,d_predicateArrry,numberOfElements,bit,0);
int *d_scanArray;
d_scanArray = hillisSteeleScanHost(d_predicateArrry,numberOfElements);
int *d_scatteredArray;
cudaMalloc((void**)&d_scatteredArray,sizeof(int)*numberOfElements);
//cout<<"offset = "<<offset<<"\n";
scatter<<<1600,500>>>(d_array,d_scanArray,d_predicateArrry,d_scatteredArray, numberOfElements,0);
cudaMemcpy(&offset,d_scanArray+numberOfElements-1,sizeof(int),cudaMemcpyDeviceToHost);
predicateDevice<<<1600,500>>>(d_array,d_predicateArrry,numberOfElements,bit,1);
d_scanArray = hillisSteeleScanHost(d_predicateArrry,numberOfElements);
scatter<<<1600,500>>>(d_array,d_scanArray,d_predicateArrry,d_scatteredArray, numberOfElements,offset);
return d_scatteredArray;
}
int offset;
int *positivenegativesplit(int *d_array,int numberOfElements,int bit,int bitset)
{
int *d_predicateArrry;
cudaMalloc((void**)&d_predicateArrry,sizeof(int)*numberOfElements);
predicateDevice<<<1600,500>>>(d_array,d_predicateArrry,numberOfElements,bit,bitset);
int *d_scanArray;
d_scanArray = hillisSteeleScanHost(d_predicateArrry,numberOfElements);
int *d_scatteredArray;
cudaMemcpy(&offset,d_scanArray+numberOfElements-1,sizeof(int),cudaMemcpyDeviceToHost);
cudaMalloc((void**)&d_scatteredArray,sizeof(int)*offset);
scatter<<<1600,500>>>(d_array,d_scanArray,d_predicateArrry,d_scatteredArray, numberOfElements,0);
return d_scatteredArray;
}
int * radixSort(int *d_array , int numberOfElements)
{
int bit;
int *d_negativeArray = positivenegativesplit(d_array,numberOfElements,1L<<31,1);
for(int i=0;i<sizeof(int)*8;i++)
{
bit = 1<<i;
d_negativeArray = compact(d_negativeArray,offset,bit);
}
int *d_postiveArray = positivenegativesplit(d_array,numberOfElements,1L<<31,0);
for(int i=0;i<sizeof(int)*8;i++)
{
bit = 1<<i;
d_postiveArray = compact(d_postiveArray,offset,bit);
}
cudaMemcpy(d_array,d_negativeArray,sizeof(int)*(numberOfElements-offset),cudaMemcpyDeviceToDevice);
cudaMemcpy(d_array+(numberOfElements-offset),d_postiveArray,sizeof(int)*offset,cudaMemcpyDeviceToDevice);
return d_array;
}
int main()
{
cout<<"enter the number of elements \n";
int numberOfElements;
cin>>numberOfElements;
int *h_array = new int[numberOfElements];
for(int i=0;i<numberOfElements;i++)
{
cin>>h_array[i];
}
int *d_array;
cudaMalloc((void**)&d_array ,sizeof(int)*numberOfElements);
cudaMemcpy(d_array,h_array,sizeof(int)*numberOfElements,cudaMemcpyHostToDevice);
d_array = radixSort(d_array, numberOfElements);
cudaMemcpy(h_array,d_array,sizeof(int)*numberOfElements,cudaMemcpyDeviceToHost);
for(int i=0;i<numberOfElements;i++)
{
cout<<h_array[i]<<"\n";
}
}
|
12,396 | #include <stdio.h>
#include <curand.h>
#include <curand_kernel.h>
#include <math.h>
#include <assert.h>
#define MIN 2
#define MAX 7
#define ITER 10000000
__global__ void setup_kernel(curandState *state){
int idx = threadIdx.x+blockDim.x*blockIdx.x;
curand_init(1234, idx, 0, &state[idx]);
}
__global__ void generate_kernel(curandState *my_curandstate, const unsigned int n, const unsigned *max_rand_int, const unsigned *min_rand_int, unsigned int *result){
int idx = threadIdx.x + blockDim.x*blockIdx.x;
int count = 0;
while (count < n){
float myrandf = curand_uniform(my_curandstate+idx);
myrandf *= (max_rand_int[idx] - min_rand_int[idx]+0.999999);
myrandf += min_rand_int[idx];
int myrand = (int)truncf(myrandf);
assert(myrand <= max_rand_int[idx]);
assert(myrand >= min_rand_int[idx]);
result[myrand-min_rand_int[idx]]++;
count++;
}
}
int main(){
curandState *d_state;
cudaMalloc(&d_state, sizeof(curandState));
unsigned *d_result, *h_result;
unsigned *d_max_rand_int, *h_max_rand_int, *d_min_rand_int, *h_min_rand_int;
cudaMalloc(&d_result, (MAX-MIN+1) * sizeof(unsigned));
h_result = (unsigned *)malloc((MAX-MIN+1)*sizeof(unsigned));
cudaMalloc(&d_max_rand_int, sizeof(unsigned));
h_max_rand_int = (unsigned *)malloc(sizeof(unsigned));
cudaMalloc(&d_min_rand_int, sizeof(unsigned));
h_min_rand_int = (unsigned *)malloc(sizeof(unsigned));
cudaMemset(d_result, 0, (MAX-MIN+1)*sizeof(unsigned));
setup_kernel<<<1,1>>>(d_state);
*h_max_rand_int = MAX;
*h_min_rand_int = MIN;
cudaMemcpy(d_max_rand_int, h_max_rand_int, sizeof(unsigned), cudaMemcpyHostToDevice);
cudaMemcpy(d_min_rand_int, h_min_rand_int, sizeof(unsigned), cudaMemcpyHostToDevice);
generate_kernel<<<1,1>>>(d_state, ITER, d_max_rand_int, d_min_rand_int, d_result);
cudaMemcpy(h_result, d_result, (MAX-MIN+1) * sizeof(unsigned), cudaMemcpyDeviceToHost);
printf("Bin: Count: \n");
for (int i = MIN; i <= MAX; i++)
printf("%d %d\n", i, h_result[i-MIN]);
return 0;
} |
12,397 | #include "includes.h"
#define BLOCK_SIZE 512
#define BLOCK_SIZE_HOUGH 360
#define STEP_SIZE 5
#define NUMBER_OF_STEPS 360/STEP_SIZE
// Circ mask kernel storage
__constant__ int maskKernelX[NUMBER_OF_STEPS];
__constant__ int maskKernelY[NUMBER_OF_STEPS];
// Function to set precalculated relative coordinates for circle boundary coordinates
__global__ void AdjustImageIntensityKernel(float *imgOut, float *imgIn, int width, int height, float lowin, float lowout, float scale)
{
__shared__ float bufData[BLOCK_SIZE];
// Get the index of pixel
const int index = blockIdx.x * BLOCK_SIZE + threadIdx.x;
// Load data to shared variable
bufData[threadIdx.x] = imgIn[index];
// Check that it's not out of bounds
if (index < (height*width)) {
// Find the according multiplier
float tempLevel = ( bufData[threadIdx.x] - lowin)*scale + lowout;
// Check that it's within required range
if (tempLevel < 0) {
bufData[threadIdx.x] = 0;
}
else if (tempLevel > 1) {
bufData[threadIdx.x] = 1;
}
else {
bufData[threadIdx.x] = tempLevel;
}
// Write data back
imgOut[index] = bufData[threadIdx.x];
}
// Synchronise threads to have the whole image fully processed for output
__syncthreads();
} |
12,398 | /*
* Project: LevenbergMarquardtLeastSquaresFitting
*
* File: lmmin.c
*
* Contents: Levenberg-Marquardt core implementation,
* and simplified user interface.
*
* Authors: Burton S. Garbow, Kenneth E. Hillstrom, Jorge J. More
* (lmdif and other routines from the public-domain library
* netlib::minpack, Argonne National Laboratories, March 1980);
* Steve Moshier (initial C translation);
* Joachim Wuttke (conversion into C++ compatible ANSI style,
* corrections, comments, wrappers, hosting).
*
* Homepage: www.messen-und-deuten.de/lmfit
*
* Licence: Public domain.
*
* Make: For instance: gcc -c lmmin.c; ar rc liblmmin.a lmmin.o
*/
#include <stdlib.h>
#include <math.h>
#include <float.h>
#include "lmmin.cuh"
/* *************************** simple macros ******************************* */
#define MIN(a,b) (((a)<=(b)) ? (a) : (b))
#define MAX(a,b) (((a)>=(b)) ? (a) : (b))
#define SQR(x) ((x)*(x))
#define CID(i) ((i) * blockDim.x + threadIdx.x)
#define CID_BASE(i) ((i) * blockDim.x)
/* ******************* fitting a 2D symmetric gaussian ********************* */
__device__ FLOAT gaussian(FLOAT x, FLOAT y, FLOAT *par)
{
return par[CID(2)] * exp(-0.5f * ( SQR((x-par[CID(0)])/par[CID(3)]) + SQR((y-par[CID(1)])/par[CID(3)]) ) ) + par[CID(4)];
}
// Transformation of function parameters to limit their range
__device__ void partransf(FLOAT *parin, FLOAT *parout)
{
parout[CID(0)] = parin[CID(0)]; // x center stays the same
parout[CID(1)] = parin[CID(1)]; // y center stays the same
parout[CID(2)] = SQR(parin[CID(2)]); // amplitude >= 0
parout[CID(3)] = SQR(parin[CID(3)]); // sigma >= 0
parout[CID(4)] = SQR(parin[CID(4)]); // background >= 0
}
// Inverse transformation of function parameters
__device__ void parinvtransf(FLOAT *parin, FLOAT *parout)
{
parout[CID(0)] = parin[CID(0)];
parout[CID(1)] = parin[CID(1)];
parout[CID(2)] = sqrt(parin[CID(2)]);
parout[CID(3)] = sqrt(parin[CID(3)]);
parout[CID(4)] = sqrt(parin[CID(4)]);
}
__device__ void evaluate(FLOAT *par, int boxsize, FLOAT *fvec, FLOAT *Y) // X is 'wired' in here [-boxsize:+boxsize]
{
extern __shared__ FLOAT p[]; // `p` is at the begining of the shared memory address space (see `lm_lmdiff`)
// transform function parameters to limit their range
partransf(par, p);
// compute the difference " F = y - fun(X, par) " for all data points
for(int x = -boxsize, idx = 0; x <= +boxsize; x++)
for(int y = -boxsize; y <= +boxsize; y++, idx++)
fvec[CID(idx)] = Y[CID(idx)] - gaussian((FLOAT)x, (FLOAT)y, p); // SQR?
}
/* ************************** implementation ******************************* */
__device__ void lm_qrfac(int m, int n, FLOAT *a, int pivot, int *ipvt, FLOAT *rdiag, FLOAT *acnorm, FLOAT *wa);
__device__ void lm_qrsolv(int n, FLOAT *r, int ldr, int *ipvt, FLOAT *diag, FLOAT *qtb, FLOAT *x, FLOAT *sdiag, FLOAT *wa);
__device__ void lm_lmpar(int n, FLOAT *r, int ldr, int *ipvt, FLOAT *diag, FLOAT *qtb, FLOAT delta, FLOAT *par, FLOAT *x, FLOAT *sdiag, FLOAT *wa1, FLOAT *wa2);
__device__ FLOAT lm_enorm(int, FLOAT *);
/***** the low-level legacy interface for full control. *****/
__device__ void
lm_lmdif(int TI_MAX, int boxsize, FLOAT *g_dataY, int n, FLOAT *g_dataA, FLOAT ftol,
FLOAT xtol, FLOAT gtol, int maxfev, FLOAT epsfcn, int mode, FLOAT factor)
{
// TI = ThreadIndex
// all threads are independant of each other, so I do not need to use __syncthreads anywhere in the code
int TI = blockIdx.x * blockDim.x + threadIdx.x;
if(TI >= TI_MAX) return;
int m = SQR(1+2*boxsize);
// the following two are here because of the macros in lmmin.cuh
int n_params = n;
int n_input_data = m;
extern __shared__ FLOAT memory[];
// Note: (FLOAT*)+1 is addition of the sizeof(FLOAT)!
FLOAT *dataY = memory + blockDim.x*DATAA_SIZE; // here the addition os for shared temporary array `p` in function `evaluate`
FLOAT *x = dataY + blockDim.x*DATAY_SIZE;
FLOAT *fvec = x + blockDim.x*DATAA_SIZE;
FLOAT *diag = fvec + blockDim.x* FVEC_SIZE;
FLOAT *fjac = diag + blockDim.x* DIAG_SIZE;
FLOAT *qtf = fjac + blockDim.x* FJAC_SIZE;
FLOAT *wa1 = qtf + blockDim.x* QTF_SIZE;
FLOAT *wa2 = wa1 + blockDim.x* WA1_SIZE;
FLOAT *wa3 = wa2 + blockDim.x* WA2_SIZE;
FLOAT *wa4 = wa3 + blockDim.x* WA3_SIZE;
int *ipvt = (int *)(wa4 + blockDim.x*WA4_SIZE);
// copy the input data from the slow global memory to the much faster shared memory
for(int i = 0; i < m; i++)
dataY[CID(i)] = g_dataY[blockIdx.x*blockDim.x*DATAY_SIZE + CID(i)];
for(int i = 0; i < n; i++)
x[CID(i)] = g_dataA[blockIdx.x*blockDim.x*DATAA_SIZE + CID(i)];
/*
* The purpose of lmdif is to minimize the sum of the squares of
* m nonlinear functions in n variables by a modification of
* the levenberg-marquardt algorithm. The user must provide a
* subroutine evaluate which calculates the functions. The jacobian
* is then calculated by a forward-difference approximation.
*
* The multi-parameter interface lm_lmdif is for users who want
* full control and flexibility. Most users will be better off using
* the simpler interface lm_minimize provided above.
*
* The parameters are the same as in the legacy FORTRAN implementation,
* with the following exceptions:
* the old parameter ldfjac which gave leading dimension of fjac has
* been deleted because this C translation makes no use of two-
* dimensional arrays;
* the old parameter nprint has been deleted; printout is now controlled
* by the user-supplied routine *printout;
* the parameter field *data and the function parameters *evaluate and
* *printout have been added; they help avoiding global variables.
*
* Parameters:
*
* m is a positive integer input variable set to the number
* of functions.
*
* n is a positive integer input variable set to the number
* of variables; n must not exceed m.
*
* x is an array of length n. On input x must contain
* an initial estimate of the solution vector. on output x
* contains the final estimate of the solution vector.
*
* fvec is an output array of length m which contains
* the functions evaluated at the output x.
*
* ftol is a nonnegative input variable. termination
* occurs when both the actual and predicted relative
* reductions in the sum of squares are at most ftol.
* Therefore, ftol measures the relative error desired
* in the sum of squares.
*
* xtol is a nonnegative input variable. Termination
* occurs when the relative error between two consecutive
* iterates is at most xtol. Therefore, xtol measures the
* relative error desired in the approximate solution.
*
* gtol is a nonnegative input variable. Termination
* occurs when the cosine of the angle between fvec and
* any column of the jacobian is at most gtol in absolute
* value. Therefore, gtol measures the orthogonality
* desired between the function vector and the columns
* of the jacobian.
*
* maxfev is a positive integer input variable. Termination
* occurs when the number of calls to lm_fcn is at least
* maxfev by the end of an iteration.
*
* epsfcn is an input variable used in determining a suitable
* step length for the forward-difference approximation. This
* approximation assumes that the relative errors in the
* functions are of the order of epsfcn. If epsfcn is less
* than the machine precision, it is assumed that the relative
* errors in the functions are of the order of the machine
* precision.
*
* diag is an array of length n. If mode = 1 (see below), diag is
* internally set. If mode = 2, diag must contain positive entries
* that serve as multiplicative scale factors for the variables.
*
* mode is an integer input variable. If mode = 1, the
* variables will be scaled internally. If mode = 2,
* the scaling is specified by the input diag. other
* values of mode are equivalent to mode = 1.
*
* factor is a positive input variable used in determining the
* initial step bound. This bound is set to the product of
* factor and the euclidean norm of diag*x if nonzero, or else
* to factor itself. In most cases factor should lie in the
* interval (0.1,100.0). Generally, the value 100.0 is recommended.
*
* info is an integer output variable that indicates the termination
* status of lm_lmdif as follows:
*
* info < 0 termination requested by user-supplied routine *evaluate;
*
* info = 0 improper input parameters;
*
* info = 1 both actual and predicted relative reductions
* in the sum of squares are at most ftol;
*
* info = 2 relative error between two consecutive iterates
* is at most xtol;
*
* info = 3 conditions for info = 1 and info = 2 both hold;
*
* info = 4 the cosine of the angle between fvec and any
* column of the jacobian is at most gtol in
* absolute value;
*
* info = 5 number of calls to lm_fcn has reached or
* exceeded maxfev;
*
* info = 6 ftol is too small: no further reduction in
* the sum of squares is possible;
*
* info = 7 xtol is too small: no further improvement in
* the approximate solution x is possible;
*
* info = 8 gtol is too small: fvec is orthogonal to the
* columns of the jacobian to machine precision;
*
* nfev is an output variable set to the number of calls to the
* user-supplied routine *evaluate.
*
* fjac is an output m by n array. The upper n by n submatrix
* of fjac contains an upper triangular matrix r with
* diagonal elements of nonincreasing magnitude such that
*
* t t t
* p *(jac *jac)*p = r *r,
*
* where p is a permutation matrix and jac is the final
* calculated jacobian. Column j of p is column ipvt(j)
* (see below) of the identity matrix. The lower trapezoidal
* part of fjac contains information generated during
* the computation of r.
*
* ipvt is an integer output array of length n. It defines a
* permutation matrix p such that jac*p = q*r, where jac is
* the final calculated jacobian, q is orthogonal (not stored),
* and r is upper triangular with diagonal elements of
* nonincreasing magnitude. Column j of p is column ipvt(j)
* of the identity matrix.
*
* qtf is an output array of length n which contains
* the first n elements of the vector (q transpose)*fvec.
*
* wa1, wa2, and wa3 are work arrays of length n.
*
* wa4 is a work array of length m.
*
* The following parameters are newly introduced in this C translation:
*
* evaluate is the name of the subroutine which calculates the
* m nonlinear functions. A default implementation lm_evaluate_default
* is provided in lm_eval.c. Alternative implementations should
* be written as follows:
*
* void evaluate ( FLOAT* par, int m_dat, FLOAT* fvec,
* void *data, int *info )
* {
* // for ( i=0; i<m_dat; ++i )
* // calculate fvec[i] for given parameters par;
* // to stop the minimization,
* // set *info to a negative integer.
* }
*
* printout is the name of the subroutine which nforms about fit progress.
* Call with printout=NULL if no printout is desired.
* Call with printout=lm_print_default to use the default
* implementation provided in lm_eval.c.
* Alternative implementations should be written as follows:
*
* void printout ( int n_par, FLOAT* par, int m_dat, FLOAT* fvec,
* void *data, int iflag, int iter, int nfev )
* {
* // iflag : 0 (init) 1 (outer loop) 2(inner loop) -1(terminated)
* // iter : outer loop counter
* // nfev : number of calls to *evaluate
* }
*
* data is an input pointer to an arbitrary structure that is passed to
* evaluate. Typically, it contains experimental data to be fitted.
*
*/
int i, iter, j;
FLOAT actred, delta, dirder, eps, fnorm, fnorm1, gnorm, par, pnorm,
prered, ratio, step, sum, temp, temp1, temp2, temp3, xnorm;
FLOAT p1 = 0.1;
FLOAT p0001 = 1.0e-4;
int info = 0;
int nfev = 0; /* function evaluation counter */
iter = 1; /* outer loop counter */
par = 0; /* levenberg-marquardt parameter */
delta = 0; /* to prevent a warning (initialization within if-clause) */
xnorm = 0; /* ditto */
temp = MAX(epsfcn, LM_MACHEP);
eps = sqrt(temp); /* for calculating the Jacobian by forward differences */
/*** lmdif: check input parameters for errors. ***/
if ((n <= 0) || (m < n) || (ftol < 0.) || (xtol < 0.) || (gtol < 0.) || (maxfev <= 0) || (factor <= 0.))
{
info = 0; // invalid parameter
return;
}
if (mode == 2) { /* scaling by diag[] */
for (j = 0; j < n; j++) { /* check for nonpositive elements */
if (diag[CID(j)] <= 0.0) {
info = 0; // invalid parameter
return;
}
}
}
/*** lmdif: evaluate function at starting point and calculate norm. ***/
info = 0;
evaluate(x, boxsize, fvec, dataY); ++nfev;
if (info < 0) return;
fnorm = lm_enorm(m, fvec);
/*** lmdif: the outer loop. ***/
do {
/*** outer: calculate the jacobian matrix. ***/
for (j = 0; j < n; j++) {
temp = x[CID(j)];
step = eps * fabs(temp);
if (step == 0.)
step = eps;
x[CID(j)] = temp + step;
info = 0;
evaluate(x, boxsize, wa4, dataY);
if (info < 0) return; /* user requested break */
for (i = 0; i < m; i++) /* changed in 2.3, Mark Bydder */
fjac[CID(j * m + i)] = (wa4[CID(i)] - fvec[CID(i)]) / (x[CID(j)] - temp);
x[CID(j)] = temp;
}
/*** outer: compute the qr factorization of the jacobian. ***/
lm_qrfac(m, n, fjac, 1, ipvt, wa1, wa2, wa3);
if (iter == 1) { /* first iteration */
if (mode != 2) {
/* diag := norms of the columns of the initial jacobian */
for (j = 0; j < n; j++) {
diag[CID(j)] = wa2[CID(j)];
if (wa2[CID(j)] == 0.)
diag[CID(j)] = 1.;
}
}
/* use diag to scale x, then calculate the norm */
for (j = 0; j < n; j++)
wa3[CID(j)] = diag[CID(j)] * x[CID(j)];
xnorm = lm_enorm(n, wa3);
/* initialize the step bound delta. */
delta = factor * xnorm;
if (delta == 0.)
delta = factor;
}
/*** outer: form (q transpose)*fvec and store first n components in qtf. ***/
for (i = 0; i < m; i++)
wa4[CID(i)] = fvec[CID(i)];
for (j = 0; j < n; j++) {
temp3 = fjac[CID(j * m + j)];
if (temp3 != 0.) {
sum = 0;
for (i = j; i < m; i++)
sum += fjac[CID(j * m + i)] * wa4[CID(i)];
temp = -sum / temp3;
for (i = j; i < m; i++)
wa4[CID(i)] += fjac[CID(j * m + i)] * temp;
}
fjac[CID(j * m + j)] = wa1[CID(j)];
qtf[CID(j)] = wa4[CID(j)];
}
/** outer: compute norm of scaled gradient and test for convergence. ***/
gnorm = 0;
if (fnorm != 0) {
for (j = 0; j < n; j++) {
if (wa2[CID(ipvt[CID(j)])] == 0)
continue;
sum = 0.;
for (i = 0; i <= j; i++)
sum += fjac[CID(j * m + i)] * qtf[CID(i)] / fnorm;
gnorm = MAX(gnorm, fabs(sum / wa2[CID(ipvt[CID(j)])]));
}
}
if (gnorm <= gtol) {
info = 4;
return;
}
/*** outer: rescale if necessary. ***/
if (mode != 2) {
for (j = 0; j < n; j++)
diag[CID(j)] = MAX(diag[CID(j)], wa2[CID(j)]);
}
/*** the inner loop. ***/
do {
/*** inner: determine the levenberg-marquardt parameter. ***/
lm_lmpar(n, fjac, m, ipvt, diag, qtf, delta, &par, wa1, wa2, wa3, wa4);
/*** inner: store the direction p and x + p; calculate the norm of p. ***/
for (j = 0; j < n; j++) {
wa1[CID(j)] = -wa1[CID(j)];
wa2[CID(j)] = x[CID(j)] + wa1[CID(j)];
wa3[CID(j)] = diag[CID(j)] * wa1[CID(j)];
}
pnorm = lm_enorm(n, wa3);
/*** inner: on the first iteration, adjust the initial step bound. ***/
if (nfev <= 1 + n)
delta = MIN(delta, pnorm);
/* evaluate the function at x + p and calculate its norm. */
info = 0;
evaluate(wa2, boxsize, wa4, dataY); ++nfev;
if (info < 0) return; /* user requested break. */
fnorm1 = lm_enorm(m, wa4);
/*** inner: compute the scaled actual reduction. ***/
if (p1 * fnorm1 < fnorm)
actred = 1 - SQR(fnorm1 / fnorm);
else
actred = -1;
/*** inner: compute the scaled predicted reduction and
the scaled directional derivative. ***/
for (j = 0; j < n; j++) {
wa3[CID(j)] = 0;
for (i = 0; i <= j; i++)
wa3[CID(i)] += fjac[CID(j * m + i)] * wa1[CID(ipvt[CID(j)])];
}
temp1 = lm_enorm(n, wa3) / fnorm;
temp2 = sqrt(par) * pnorm / fnorm;
prered = SQR(temp1) + 2 * SQR(temp2);
dirder = -(SQR(temp1) + SQR(temp2));
/*** inner: compute the ratio of the actual to the predicted reduction. ***/
ratio = prered != 0 ? actred / prered : 0;
/*** inner: update the step bound. ***/
if (ratio <= 0.25) {
if (actred >= 0.)
temp = 0.5;
else
temp = 0.5 * dirder / (dirder + 0.55 * actred);
if (p1 * fnorm1 >= fnorm || temp < p1)
temp = p1;
delta = temp * MIN(delta, pnorm / p1);
par /= temp;
} else if (par == 0. || ratio >= 0.75) {
delta = pnorm / 0.5;
par *= 0.5;
}
/*** inner: test for successful iteration. ***/
if (ratio >= p0001) {
/* yes, success: update x, fvec, and their norms. */
for (j = 0; j < n; j++) {
x[CID(j)] = wa2[CID(j)];
wa2[CID(j)] = diag[CID(j)] * x[CID(j)];
}
for (i = 0; i < m; i++)
fvec[CID(i)] = wa4[CID(i)];
xnorm = lm_enorm(n, wa2);
fnorm = fnorm1;
iter++;
}
/*** inner: tests for convergence ( otherwise info = 1, 2, or 3 ). ***/
info = 0; /* do not terminate (unless overwritten by nonzero) */
if (fabs(actred) <= ftol && prered <= ftol && 0.5 * ratio <= 1)
info = 1;
if (delta <= xtol * xnorm)
info += 2;
if (info != 0)
{
// save the results back into the global memory! unless I would not be able to read the results!
for(int i = 0; i < n; i++)
g_dataA[blockIdx.x*blockDim.x*DATAA_SIZE + CID(i)] = x[CID(i)];
return;
}
/*** inner: tests for termination and stringent tolerances. ***/
if (nfev >= maxfev)
info = 5;
if (fabs(actred) <= LM_MACHEP && prered <= LM_MACHEP && 0.5 * ratio <= 1)
info = 6;
if (delta <= LM_MACHEP * xnorm)
info = 7;
if (gnorm <= LM_MACHEP)
info = 8;
if (info != 0)
{
// save the results back into the global memory! unless I would not be able to read the results!
for(int i = 0; i < n; i++)
g_dataA[blockIdx.x*blockDim.x*DATAA_SIZE + CID(i)] = x[CID(i)];
return;
}
/*** inner: end of the loop. repeat if iteration unsuccessful. ***/
} while (ratio < p0001);
/*** outer: end of the loop. ***/
} while (1);
} /*** lm_lmdif. ***/
__device__ void lm_lmpar(int n, FLOAT *r, int ldr, int *ipvt, FLOAT *diag,
FLOAT *qtb, FLOAT delta, FLOAT *par, FLOAT *x,
FLOAT *sdiag, FLOAT *wa1, FLOAT *wa2)
{
/* Given an m by n matrix a, an n by n nonsingular diagonal
* matrix d, an m-vector b, and a positive number delta,
* the problem is to determine a value for the parameter
* par such that if x solves the system
*
* a*x = b and sqrt(par)*d*x = 0
*
* in the least squares sense, and dxnorm is the euclidean
* norm of d*x, then either par=0 and (dxnorm-delta) < 0.1*delta,
* or par>0 and abs(dxnorm-delta) < 0.1*delta.
*
* This subroutine completes the solution of the problem
* if it is provided with the necessary information from the
* qr factorization, with column pivoting, of a. That is, if
* a*p = q*r, where p is a permutation matrix, q has orthogonal
* columns, and r is an upper triangular matrix with diagonal
* elements of nonincreasing magnitude, then lmpar expects
* the full upper triangle of r, the permutation matrix p,
* and the first n components of (q transpose)*b. On output
* lmpar also provides an upper triangular matrix s such that
*
* t t t
* p *(a *a + par*d*d)*p = s *s.
*
* s is employed within lmpar and may be of separate interest.
*
* Only a few iterations are generally needed for convergence
* of the algorithm. If, however, the limit of 10 iterations
* is reached, then the output par will contain the best
* value obtained so far.
*
* parameters:
*
* n is a positive integer input variable set to the order of r.
*
* r is an n by n array. on input the full upper triangle
* must contain the full upper triangle of the matrix r.
* on output the full upper triangle is unaltered, and the
* strict lower triangle contains the strict upper triangle
* (transposed) of the upper triangular matrix s.
*
* ldr is a positive integer input variable not less than n
* which specifies the leading dimension of the array r.
*
* ipvt is an integer input array of length n which defines the
* permutation matrix p such that a*p = q*r. column j of p
* is column ipvt(j) of the identity matrix.
*
* diag is an input array of length n which must contain the
* diagonal elements of the matrix d.
*
* qtb is an input array of length n which must contain the first
* n elements of the vector (q transpose)*b.
*
* delta is a positive input variable which specifies an upper
* bound on the euclidean norm of d*x.
*
* par is a nonnegative variable. on input par contains an
* initial estimate of the levenberg-marquardt parameter.
* on output par contains the final estimate.
*
* x is an output array of length n which contains the least
* squares solution of the system a*x = b, sqrt(par)*d*x = 0,
* for the output par.
*
* sdiag is an output array of length n which contains the
* diagonal elements of the upper triangular matrix s.
*
* wa1 and wa2 are work arrays of length n.
*
*/
int i, iter, j, nsing;
FLOAT dxnorm, fp, fp_old, gnorm, parc, parl, paru;
FLOAT sum, temp;
FLOAT p1 = 0.1;
/*** lmpar: compute and store in x the gauss-newton direction. if the
jacobian is rank-deficient, obtain a least squares solution. ***/
nsing = n;
for (j = 0; j < n; j++) {
wa1[CID(j)] = qtb[CID(j)];
if (r[CID(j * ldr + j)] == 0 && nsing == n)
nsing = j;
if (nsing < n)
wa1[CID(j)] = 0;
}
for (j = nsing - 1; j >= 0; j--) {
wa1[CID(j)] = wa1[CID(j)] / r[CID(j + ldr * j)];
temp = wa1[CID(j)];
for (i = 0; i < j; i++)
wa1[CID(i)] -= r[CID(j * ldr + i)] * temp;
}
for (j = 0; j < n; j++)
x[CID(ipvt[CID(j)])] = wa1[CID(j)];
/*** lmpar: initialize the iteration counter, evaluate the function at the
origin, and test for acceptance of the gauss-newton direction. ***/
iter = 0;
for (j = 0; j < n; j++)
wa2[CID(j)] = diag[CID(j)] * x[CID(j)];
dxnorm = lm_enorm(n, wa2);
fp = dxnorm - delta;
if (fp <= p1 * delta) {
*par = 0;
return;
}
/*** lmpar: if the jacobian is not rank deficient, the newton
step provides a lower bound, parl, for the 0. of
the function. otherwise set this bound to 0.. ***/
parl = 0;
if (nsing >= n) {
for (j = 0; j < n; j++)
wa1[CID(j)] = diag[CID(ipvt[CID(j)])] * wa2[CID(ipvt[CID(j)])] / dxnorm;
for (j = 0; j < n; j++) {
sum = 0.;
for (i = 0; i < j; i++)
sum += r[CID(j * ldr + i)] * wa1[CID(i)];
wa1[CID(j)] = (wa1[CID(j)] - sum) / r[CID(j + ldr * j)];
}
temp = lm_enorm(n, wa1);
parl = fp / delta / temp / temp;
}
/*** lmpar: calculate an upper bound, paru, for the 0. of the function. ***/
for (j = 0; j < n; j++) {
sum = 0;
for (i = 0; i <= j; i++)
sum += r[CID(j * ldr + i)] * qtb[CID(i)];
wa1[CID(j)] = sum / diag[CID(ipvt[CID(j)])];
}
gnorm = lm_enorm(n, wa1);
paru = gnorm / delta;
if (paru == 0.)
paru = LM_DWARF / MIN(delta, p1);
/*** lmpar: if the input par lies outside of the interval (parl,paru),
set par to the closer endpoint. ***/
*par = MAX(*par, parl);
*par = MIN(*par, paru);
if (*par == 0.)
*par = gnorm / dxnorm;
/*** lmpar: iterate. ***/
for (;; iter++) {
/** evaluate the function at the current value of par. **/
if (*par == 0.)
*par = MAX(LM_DWARF, 0.001 * paru);
temp = sqrt(*par);
for (j = 0; j < n; j++)
wa1[CID(j)] = temp * diag[CID(j)];
lm_qrsolv(n, r, ldr, ipvt, wa1, qtb, x, sdiag, wa2);
for (j = 0; j < n; j++)
wa2[CID(j)] = diag[CID(j)] * x[CID(j)];
dxnorm = lm_enorm(n, wa2);
fp_old = fp;
fp = dxnorm - delta;
/** if the function is small enough, accept the current value
of par. Also test for the exceptional cases where parl
is zero or the number of iterations has reached 10. **/
if (fabs(fp) <= p1 * delta
|| (parl == 0. && fp <= fp_old && fp_old < 0.)
|| iter == 10)
break; /* the only exit from the iteration. */
/** compute the Newton correction. **/
for (j = 0; j < n; j++)
wa1[CID(j)] = diag[CID(ipvt[CID(j)])] * wa2[CID(ipvt[CID(j)])] / dxnorm;
for (j = 0; j < n; j++) {
wa1[CID(j)] = wa1[CID(j)] / sdiag[CID(j)];
for (i = j + 1; i < n; i++)
wa1[CID(i)] -= r[CID(j * ldr + i)] * wa1[CID(j)];
}
temp = lm_enorm(n, wa1);
parc = fp / delta / temp / temp;
/** depending on the sign of the function, update parl or paru. **/
if (fp > 0)
parl = MAX(parl, *par);
else if (fp < 0)
paru = MIN(paru, *par);
/* the case fp==0 is precluded by the break condition */
/** compute an improved estimate for par. **/
*par = MAX(parl, *par + parc);
}
} /*** lm_lmpar. ***/
__device__
void lm_qrfac(int m, int n, FLOAT *a, int pivot, int *ipvt,
FLOAT *rdiag, FLOAT *acnorm, FLOAT *wa)
{
/*
* This subroutine uses householder transformations with column
* pivoting (optional) to compute a qr factorization of the
* m by n matrix a. That is, qrfac determines an orthogonal
* matrix q, a permutation matrix p, and an upper trapezoidal
* matrix r with diagonal elements of nonincreasing magnitude,
* such that a*p = q*r. The householder transformation for
* column k, k = 1,2,...,min(m,n), is of the form
*
* t
* i - (1/u(k))*u*u
*
* where u has zeroes in the first k-1 positions. The form of
* this transformation and the method of pivoting first
* appeared in the corresponding linpack subroutine.
*
* Parameters:
*
* m is a positive integer input variable set to the number
* of rows of a.
*
* n is a positive integer input variable set to the number
* of columns of a.
*
* a is an m by n array. On input a contains the matrix for
* which the qr factorization is to be computed. On output
* the strict upper trapezoidal part of a contains the strict
* upper trapezoidal part of r, and the lower trapezoidal
* part of a contains a factored form of q (the non-trivial
* elements of the u vectors described above).
*
* pivot is a logical input variable. If pivot is set true,
* then column pivoting is enforced. If pivot is set false,
* then no column pivoting is done.
*
* ipvt is an integer output array of length lipvt. This array
* defines the permutation matrix p such that a*p = q*r.
* Column j of p is column ipvt(j) of the identity matrix.
* If pivot is false, ipvt is not referenced.
*
* rdiag is an output array of length n which contains the
* diagonal elements of r.
*
* acnorm is an output array of length n which contains the
* norms of the corresponding columns of the input matrix a.
* If this information is not needed, then acnorm can coincide
* with rdiag.
*
* wa is a work array of length n. If pivot is false, then wa
* can coincide with rdiag.
*
*/
int i, j, k, kmax, minmn;
FLOAT ajnorm, sum, temp;
FLOAT p05 = 0.05;
/*** qrfac: compute initial column norms and initialize several arrays. ***/
for (j = 0; j < n; j++) {
acnorm[CID(j)] = lm_enorm(m, &a[CID_BASE(j * m)]);
rdiag[CID(j)] = acnorm[CID(j)];
wa[CID(j)] = rdiag[CID(j)];
if (pivot)
ipvt[CID(j)] = j;
}
/*** qrfac: reduce a to r with householder transformations. ***/
minmn = MIN(m, n);
for (j = 0; j < minmn; j++) {
if (!pivot)
goto pivot_ok;
/** bring the column of largest norm into the pivot position. **/
kmax = j;
for (k = j + 1; k < n; k++)
if (rdiag[CID(k)] > rdiag[CID(kmax)])
kmax = k;
if (kmax == j)
goto pivot_ok;
for (i = 0; i < m; i++) {
temp = a[CID(j * m + i)];
a[CID(j * m + i)] = a[CID(kmax * m + i)];
a[CID(kmax * m + i)] = temp;
}
rdiag[CID(kmax)] = rdiag[CID(j)];
wa[CID(kmax)] = wa[CID(j)];
k = ipvt[CID(j)];
ipvt[CID(j)] = ipvt[CID(kmax)];
ipvt[CID(kmax)] = k;
pivot_ok:
/** compute the Householder transformation to reduce the
j-th column of a to a multiple of the j-th unit vector. **/
ajnorm = lm_enorm(m - j, &a[CID_BASE(j * m + j)]);
if (ajnorm == 0.) {
rdiag[CID(j)] = 0;
continue;
}
if (a[CID(j * m + j)] < 0.)
ajnorm = -ajnorm;
for (i = j; i < m; i++)
a[CID(j * m + i)] /= ajnorm;
a[CID(j * m + j)] += 1;
/** apply the transformation to the remaining columns
and update the norms. **/
for (k = j + 1; k < n; k++) {
sum = 0;
for (i = j; i < m; i++)
sum += a[CID(j * m + i)] * a[CID(k * m + i)];
temp = sum / a[CID(j + m * j)];
for (i = j; i < m; i++)
a[CID(k * m + i)] -= temp * a[CID(j * m + i)];
if (pivot && rdiag[CID(k)] != 0.) {
temp = a[CID(m * k + j)] / rdiag[CID(k)];
temp = MAX(0., 1 - temp * temp);
rdiag[CID(k)] *= sqrt(temp);
temp = rdiag[CID(k)] / wa[CID(k)];
if (p05 * SQR(temp) <= LM_MACHEP) {
rdiag[CID(k)] = lm_enorm(m - j - 1, &a[CID_BASE(m * k + j + 1)]);
wa[CID(k)] = rdiag[CID(k)];
}
}
}
rdiag[CID(j)] = -ajnorm;
}
}
__device__
void lm_qrsolv(int n, FLOAT *r, int ldr, int *ipvt, FLOAT *diag,
FLOAT *qtb, FLOAT *x, FLOAT *sdiag, FLOAT *wa)
{
/*
* Given an m by n matrix a, an n by n diagonal matrix d,
* and an m-vector b, the problem is to determine an x which
* solves the system
*
* a*x = b and d*x = 0
*
* in the least squares sense.
*
* This subroutine completes the solution of the problem
* if it is provided with the necessary information from the
* qr factorization, with column pivoting, of a. That is, if
* a*p = q*r, where p is a permutation matrix, q has orthogonal
* columns, and r is an upper triangular matrix with diagonal
* elements of nonincreasing magnitude, then qrsolv expects
* the full upper triangle of r, the permutation matrix p,
* and the first n components of (q transpose)*b. The system
* a*x = b, d*x = 0, is then equivalent to
*
* t t
* r*z = q *b, p *d*p*z = 0,
*
* where x = p*z. If this system does not have full rank,
* then a least squares solution is obtained. On output qrsolv
* also provides an upper triangular matrix s such that
*
* t t t
* p *(a *a + d*d)*p = s *s.
*
* s is computed within qrsolv and may be of separate interest.
*
* Parameters
*
* n is a positive integer input variable set to the order of r.
*
* r is an n by n array. On input the full upper triangle
* must contain the full upper triangle of the matrix r.
* On output the full upper triangle is unaltered, and the
* strict lower triangle contains the strict upper triangle
* (transposed) of the upper triangular matrix s.
*
* ldr is a positive integer input variable not less than n
* which specifies the leading dimension of the array r.
*
* ipvt is an integer input array of length n which defines the
* permutation matrix p such that a*p = q*r. Column j of p
* is column ipvt(j) of the identity matrix.
*
* diag is an input array of length n which must contain the
* diagonal elements of the matrix d.
*
* qtb is an input array of length n which must contain the first
* n elements of the vector (q transpose)*b.
*
* x is an output array of length n which contains the least
* squares solution of the system a*x = b, d*x = 0.
*
* sdiag is an output array of length n which contains the
* diagonal elements of the upper triangular matrix s.
*
* wa is a work array of length n.
*
*/
int i, kk, j, k, nsing;
FLOAT qtbpj, sum, temp;
FLOAT _sin, _cos, _tan, _cot; /* local variables, not functions */
/*** qrsolv: copy r and (q transpose)*b to preserve input and initialize s.
in particular, save the diagonal elements of r in x. ***/
for (j = 0; j < n; j++) {
for (i = j; i < n; i++)
r[CID(j * ldr + i)] = r[CID(i * ldr + j)];
x[CID(j)] = r[CID(j * ldr + j)];
wa[CID(j)] = qtb[CID(j)];
}
/*** qrsolv: eliminate the diagonal matrix d using a givens rotation. ***/
for (j = 0; j < n; j++) {
/*** qrsolv: prepare the row of d to be eliminated, locating the
diagonal element using p from the qr factorization. ***/
if (diag[CID(ipvt[CID(j)])] == 0.)
goto L90;
for (k = j; k < n; k++)
sdiag[CID(k)] = 0.;
sdiag[CID(j)] = diag[CID(ipvt[CID(j)])];
/*** qrsolv: the transformations to eliminate the row of d modify only
a single element of (q transpose)*b beyond the first n, which is
initially 0.. ***/
qtbpj = 0.;
for (k = j; k < n; k++) {
/** determine a givens rotation which eliminates the
appropriate element in the current row of d. **/
if (sdiag[CID(k)] == 0.)
continue;
kk = k + ldr * k;
if (fabs(r[CID(kk)]) < fabs(sdiag[CID(k)])) {
_cot = r[CID(kk)] / sdiag[CID(k)];
_sin = 1 / sqrt(1 + SQR(_cot));
_cos = _sin * _cot;
} else {
_tan = sdiag[CID(k)] / r[CID(kk)];
_cos = 1 / sqrt(1 + SQR(_tan));
_sin = _cos * _tan;
}
/** compute the modified diagonal element of r and
the modified element of ((q transpose)*b,0). **/
r[CID(kk)] = _cos * r[CID(kk)] + _sin * sdiag[CID(k)];
temp = _cos * wa[CID(k)] + _sin * qtbpj;
qtbpj = -_sin * wa[CID(k)] + _cos * qtbpj;
wa[CID(k)] = temp;
/** accumulate the tranformation in the row of s. **/
for (i = k + 1; i < n; i++) {
temp = _cos * r[CID(k * ldr + i)] + _sin * sdiag[CID(i)];
sdiag[CID(i)] = -_sin * r[CID(k * ldr + i)] + _cos * sdiag[CID(i)];
r[CID(k * ldr + i)] = temp;
}
}
L90:
/** store the diagonal element of s and restore
the corresponding diagonal element of r. **/
sdiag[CID(j)] = r[CID(j * ldr + j)];
r[CID(j * ldr + j)] = x[CID(j)];
}
/*** qrsolv: solve the triangular system for z. if the system is
singular, then obtain a least squares solution. ***/
nsing = n;
for (j = 0; j < n; j++) {
if (sdiag[CID(j)] == 0. && nsing == n)
nsing = j;
if (nsing < n)
wa[CID(j)] = 0;
}
for (j = nsing - 1; j >= 0; j--) {
sum = 0;
for (i = j + 1; i < nsing; i++)
sum += r[CID(j * ldr + i)] * wa[CID(i)];
wa[CID(j)] = (wa[CID(j)] - sum) / sdiag[CID(j)];
}
/*** qrsolv: permute the components of z back to components of x. ***/
for (j = 0; j < n; j++)
x[CID(ipvt[CID(j)])] = wa[CID(j)];
} /*** lm_qrsolv. ***/
__device__ FLOAT lm_enorm(int n, FLOAT *x)
{
/* Given an n-vector x, this function calculates the
* euclidean norm of x.
*
* The euclidean norm is computed by accumulating the sum of
* squares in three different sums. The sums of squares for the
* small and large components are scaled so that no overflows
* occur. Non-destructive underflows are permitted. Underflows
* and overflows do not occur in the computation of the unscaled
* sum of squares for the intermediate components.
* The definitions of small, intermediate and large components
* depend on two constants, LM_SQRT_DWARF and LM_SQRT_GIANT. The main
* restrictions on these constants are that LM_SQRT_DWARF**2 not
* underflow and LM_SQRT_GIANT**2 not overflow.
*
* Parameters
*
* n is a positive integer input variable.
*
* x is an input array of length n.
*/
int i;
FLOAT agiant, s1, s2, s3, xabs, x1max, x3max, temp;
s1 = 0;
s2 = 0;
s3 = 0;
x1max = 0;
x3max = 0;
agiant = LM_SQRT_GIANT / ((FLOAT) n);
/** sum squares. **/
for (i = 0; i < n; i++) {
xabs = fabs(x[CID(i)]);
if (xabs > LM_SQRT_DWARF && xabs < agiant) {
/* sum for intermediate components. */
s2 += xabs * xabs;
continue;
}
if (xabs > LM_SQRT_DWARF) {
/* sum for large components. */
if (xabs > x1max) {
temp = x1max / xabs;
s1 = 1 + s1 * SQR(temp);
x1max = xabs;
} else {
temp = xabs / x1max;
s1 += SQR(temp);
}
continue;
}
/* sum for small components. */
if (xabs > x3max) {
temp = x3max / xabs;
s3 = 1 + s3 * SQR(temp);
x3max = xabs;
} else {
if (xabs != 0.) {
temp = xabs / x3max;
s3 += SQR(temp);
}
}
}
/** calculation of norm. **/
if (s1 != 0)
return x1max * sqrt(s1 + (s2 / x1max) / x1max);
if (s2 != 0) {
if (s2 >= x3max)
return sqrt(s2 * (1 + (x3max / s2) * (x3max * s3)));
else
return sqrt(x3max * ((s2 / x3max) + (x3max * s3)));
}
return x3max * sqrt(s3);
} /*** lm_enorm. ***/
//
// C wrapper around our template kernel
//
extern "C" __global__ void
lmmin(int nthreads, int boxsize, FLOAT *dataY, int nparams, FLOAT *params, FLOAT ftol, FLOAT xtol, FLOAT gtol, int maxfev, FLOAT epsfcn, int mode, FLOAT factor)
{
lm_lmdif(nthreads, boxsize, dataY, nparams, params, ftol, xtol, gtol, maxfev, epsfcn, mode, factor);
} |
12,399 | #include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
void fill_zero(thrust::device_vector<int>& v) {
thrust::fill(thrust::device, v.begin(), v.end(), 0);
}
|
12,400 | #include <stdio.h>
__global__ void hello_kernel()
{
int threadInBlock = threadIdx.x;
int blockIndex = blockIdx.x;
int blockSize = blockDim.x;
int threadIndex = blockIndex * blockSize + threadInBlock;
printf("Hello from GPU thread %d = %d * %d + %d\n", threadIndex, blockIndex, blockSize, threadInBlock);
}
int main()
{
int numThreadsInBlock = 32;
int numBlocks = 3;
hello_kernel<<<numBlocks, numThreadsInBlock>>>();
cudaDeviceSynchronize();
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.