serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
18,801 |
// Babak Poursartip
// 09/29/2020
// section 2: video 24
/*
- If each warp is not fully occupant, that would be a waste of resources.
- We need to calculate the occupancy of SM which is equal to:
occupancy = active warps/max warps
* max warps can be obtained from the device manual.
* active warps needs to be calculated: it is equal to the min of warps
obtained based on the size of register and shared memory. Refer to the slides
2-6
- To get the value of the shared memory and register, use the following command:
nvcc --ptxas-options=-v -o 4_occupancy_test.out 4_occupancy_test.cu
- Warp allocation granularity means that warps are allocated in multiples of a
number. For example, multiples of 4.
- Use the excel sheet to calculate the CUDA occupancy.
*/
#include <iostream>
__global__ void occupancy_test(int *results) {
int gid = blockDim.x * blockIdx.x + threadIdx.x;
int x1 = 1;
int x2 = 2;
int x3 = 3;
int x4 = 4;
int x5 = 5;
int x6 = 6;
int x7 = 7;
int x8 = 8;
results[gid] = x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8;
}
int main() {
int size = 1 << 16;
printf(" size: %d \n", size);
int byte_size = size * sizeof(int);
int *d_results;
cudaMalloc((void **)&d_results, byte_size);
cudaMemset(d_results, 0, byte_size);
dim3 block(128);
dim3 grid((size + block.x - 1) / block.x);
printf(" grids: %d, block: %d \n", grid.x, block.x);
occupancy_test<<<grid, block>>>(d_results);
cudaDeviceSynchronize();
return 0;
} |
18,802 | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* Code to simulate a GPU workload for lab assignment in [A2] Task Mapping on Soft Heterogeneous Systems.
* Workload consists of a the Black-Scholes kernel taken from NVIDIA SDK 10.1
*
* Computation is done on the GPU when the user selects a core attached to a GPU; otherwise the code is run on
* the CPU. GPU version of the code is expected to run faster.
*
* @author: Apan Qasem <apan@txstate.edu>
* @date: 04/02/20
*
* @update: 03/12/21
*/
#include<cstdio>
#include<sys/time.h>
///////////////////////////////////////////////////////////////////////////////
// Polynomial approximation of cumulative normal distribution function
///////////////////////////////////////////////////////////////////////////////
static double CND(double d)
{
const double A1 = 0.31938153;
const double A2 = -0.356563782;
const double A3 = 1.781477937;
const double A4 = -1.821255978;
const double A5 = 1.330274429;
const double RSQRT2PI = 0.39894228040143267793994605993438;
double
K = 1.0 / (1.0 + 0.2316419 * fabs(d));
double
cnd = RSQRT2PI * exp(- 0.5 * d * d) *
(K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5)))));
if (d > 0)
cnd = 1.0 - cnd;
return cnd;
}
///////////////////////////////////////////////////////////////////////////////
// Black-Scholes formula for both call and put
///////////////////////////////////////////////////////////////////////////////
static void BlackScholesBodyCPU(
float &callResult,
float &putResult,
float Sf, //Stock price
float Xf, //Option strike
float Tf, //Option years
float Rf, //Riskless rate
float Vf //Volatility rate
)
{
double S = Sf, X = Xf, T = Tf, R = Rf, V = Vf;
double sqrtT = sqrt(T);
double d1 = (log(S / X) + (R + 0.5 * V * V) * T) / (V * sqrtT);
double d2 = d1 - V * sqrtT;
double CNDD1 = CND(d1);
double CNDD2 = CND(d2);
//Calculate Call and Put simultaneously
double expRT = exp(- R * T);
callResult = (float)(S * CNDD1 - X * expRT * CNDD2);
putResult = (float)(X * expRT * (1.0 - CNDD2) - S * (1.0 - CNDD1));
}
////////////////////////////////////////////////////////////////////////////////
// Process an array of optN options
////////////////////////////////////////////////////////////////////////////////
void BlackScholesCPU(
float *h_CallResult,
float *h_PutResult,
float *h_StockPrice,
float *h_OptionStrike,
float *h_OptionYears,
float Riskfree,
float Volatility,
int optN
)
{
for (int opt = 0; opt < optN; opt++)
BlackScholesBodyCPU(
h_CallResult[opt],
h_PutResult[opt],
h_StockPrice[opt],
h_OptionStrike[opt],
h_OptionYears[opt],
Riskfree,
Volatility
);
}
// extern "C" void BlackScholesCPU(
// float *h_CallResult,
// float *h_PutResult,
// float *h_StockPrice,
// float *h_OptionStrike,
// float *h_OptionYears,
// float Riskfree,
// float Volatility,
// int optN
// );
////////////////////////////////////////////////////////////////////////////////
// Process an array of OptN options on GPU
////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
// Polynomial approximation of cumulative normal distribution function
///////////////////////////////////////////////////////////////////////////////
__device__ inline float cndGPU(float d)
{
const float A1 = 0.31938153f;
const float A2 = -0.356563782f;
const float A3 = 1.781477937f;
const float A4 = -1.821255978f;
const float A5 = 1.330274429f;
const float RSQRT2PI = 0.39894228040143267793994605993438f;
float
K = 1.0f / (1.0f + 0.2316419f * fabsf(d));
float
cnd = RSQRT2PI * __expf(- 0.5f * d * d) *
(K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5)))));
if (d > 0)
cnd = 1.0f - cnd;
return cnd;
}
///////////////////////////////////////////////////////////////////////////////
// Black-Scholes formula for both call and put
///////////////////////////////////////////////////////////////////////////////
__device__ inline void BlackScholesBodyGPU(
float &CallResult,
float &PutResult,
float S, //Stock price
float X, //Option strike
float T, //Option years
float R, //Riskless rate
float V //Volatility rate
)
{
float sqrtT, expRT;
float d1, d2, CNDD1, CNDD2;
sqrtT = sqrtf(T);
d1 = (__logf(S / X) + (R + 0.5f * V * V) * T) / (V * sqrtT);
d2 = d1 - V * sqrtT;
CNDD1 = cndGPU(d1);
CNDD2 = cndGPU(d2);
//Calculate Call and Put simultaneously
expRT = __expf(- R * T);
CallResult = S * CNDD1 - X * expRT * CNDD2;
PutResult = X * expRT * (1.0f - CNDD2) - S * (1.0f - CNDD1);
}
////////////////////////////////////////////////////////////////////////////////
//Process an array of optN options on GPU
////////////////////////////////////////////////////////////////////////////////
__global__ void BlackScholesGPU(
float *d_CallResult,
float *d_PutResult,
float *d_StockPrice,
float *d_OptionStrike,
float *d_OptionYears,
float Riskfree,
float Volatility,
int optN
)
{
////Thread index
//const int tid = blockDim.x * blockIdx.x + threadIdx.x;
////Total number of threads in execution grid
//const int THREAD_N = blockDim.x * gridDim.x;
const int opt = blockDim.x * blockIdx.x + threadIdx.x;
//No matter how small is execution grid or how large OptN is,
//exactly OptN indices will be processed with perfect memory coalescing
//for (int opt = tid; opt < optN; opt += THREAD_N)
if (opt < optN)
BlackScholesBodyGPU(
d_CallResult[opt],
d_PutResult[opt],
d_StockPrice[opt],
d_OptionStrike[opt],
d_OptionYears[opt],
Riskfree,
Volatility
);
}
////////////////////////////////////////////////////////////////////////////////
// Helper function, returning uniformly distributed
// random float in [low, high] range
////////////////////////////////////////////////////////////////////////////////
float RandFloat(float low, float high)
{
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
const int NUM_ITERATIONS = 512;
const float RISKFREE = 0.02f;
const float VOLATILITY = 0.30f;
#define DIV_UP(a, b) ( ((a) + (b) - 1) / (b) )
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
if (argc < 3) {
fprintf(stderr, "usage: ./blackscholes options GPU\n");
exit(0);
}
unsigned options = atoi(argv[1]);
int options_size = options * sizeof(float);
unsigned gpu = atoi(argv[2]);
float
*h_CallResultCPU,
*h_PutResultCPU,
*h_CallResultGPU,
*h_PutResultGPU,
*h_StockPrice,
*h_OptionStrike,
*h_OptionYears;
float
*d_CallResult,
*d_PutResult,
*d_StockPrice,
*d_OptionStrike,
*d_OptionYears;
double delta, ref, sum_delta, sum_ref, max_delta, L1norm;
int i;
// CPU memory allocation
h_CallResultCPU = (float *)malloc(options_size);
h_PutResultCPU = (float *)malloc(options_size);
h_CallResultGPU = (float *)malloc(options_size);
h_PutResultGPU = (float *)malloc(options_size);
h_StockPrice = (float *)malloc(options_size);
h_OptionStrike = (float *)malloc(options_size);
h_OptionYears = (float *)malloc(options_size);
// GPU memory allocation
cudaMalloc((void **)&d_CallResult, options_size);
cudaMalloc((void **)&d_PutResult, options_size);
cudaMalloc((void **)&d_StockPrice, options_size);
cudaMalloc((void **)&d_OptionStrike, options_size);
cudaMalloc((void **)&d_OptionYears, options_size);
srand(5347);
// Generate options set
for (i = 0; i < options; i++)
{
h_CallResultCPU[i] = 0.0f;
h_PutResultCPU[i] = -1.0f;
h_StockPrice[i] = RandFloat(5.0f, 30.0f);
h_OptionStrike[i] = RandFloat(1.0f, 100.0f);
h_OptionYears[i] = RandFloat(0.25f, 10.0f);
}
// Copy options data to GPU memory for further processing
cudaMemcpy(d_StockPrice, h_StockPrice, options_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_OptionStrike, h_OptionStrike, options_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_OptionYears, h_OptionYears, options_size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
timeval starttime, endtime;
double runtime;
if (gpu) {
gettimeofday(&starttime, NULL);
for (i = 0; i < NUM_ITERATIONS; i++) {
BlackScholesGPU<<<DIV_UP(options, 128), 128>>>(
d_CallResult,
d_PutResult,
d_StockPrice,
d_OptionStrike,
d_OptionYears,
RISKFREE,
VOLATILITY,
options
);
}
cudaDeviceSynchronize();
gettimeofday(&endtime, NULL);
}
else {
gettimeofday(&starttime, NULL);
//Calculate options values on CPU
BlackScholesCPU(
h_CallResultCPU,
h_PutResultCPU,
h_StockPrice,
h_OptionStrike,
h_OptionYears,
RISKFREE,
VOLATILITY,
options
);
gettimeofday(&endtime, NULL);
}
// Read back GPU results to compare them to CPU results
cudaMemcpy(h_CallResultGPU, d_CallResult, options_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_PutResultGPU, d_PutResult, options_size, cudaMemcpyDeviceToHost);
runtime = endtime.tv_sec + endtime.tv_usec / 1000000.0 - (starttime.tv_sec + starttime.tv_usec / 1000000.0);
fprintf(stdout, "\033[1;32m[wk3] compute time = %.3f s\n\033[0m", runtime);
#ifdef VERIFY
printf("%3.5f,%3.5f\n", h_CallResultGPU[2047],h_PutResultGPU[3145]);
#endif
// validation not use; code is running on either GPU or CPU
sum_delta = 0;
sum_ref = 0;
max_delta = 0;
for (i = 0; i < options; i++)
{
ref = h_CallResultCPU[i];
delta = fabs(h_CallResultCPU[i] - h_CallResultGPU[i]);
if (delta > max_delta)
{
max_delta = delta;
}
sum_delta += delta;
sum_ref += fabs(ref);
}
L1norm = sum_delta / sum_ref;
cudaFree(d_OptionYears);
cudaFree(d_OptionStrike);
cudaFree(d_StockPrice);
cudaFree(d_PutResult);
cudaFree(d_CallResult);
free(h_OptionYears);
free(h_OptionStrike);
free(h_StockPrice);
free(h_PutResultGPU);
free(h_CallResultGPU);
free(h_PutResultCPU);
free(h_CallResultCPU);
if (L1norm > 1e-6)
{
exit(EXIT_FAILURE);
}
exit(EXIT_SUCCESS);
}
|
18,803 | #include <stdio.h>
#include <cuda_runtime.h>
__constant__ int test_arr_d[5];
__constant__ int a;
__global__ void print()
{
int id = threadIdx.x;
printf("%d: %d\n", id, test_arr_d[id]);
__syncthreads();
}
int main()
{
int test_arr_h[5] = {1, 2, 3, 4, 5};
cudaError_t result = cudaMemcpyToSymbolAsync(test_arr_d, &test_arr_h, 5*sizeof(int), 0, cudaMemcpyHostToDevice);
print<<<1,5>>>();
cudaDeviceSynchronize();
return 0;
}
|
18,804 | #include "cudaStepper.cuh"
#include <stdio.h>
__global__ void stepper(
float* d_firingRate,
float* d_newFiringRate,
float* d_connMatrix,
int* d_sampleNeuronIndexes,
float* d_biasVec,
float* d_samples,
float* stepSize,
int* numNeurons) {
int neurNum = blockIdx.x;
float fireSum = 0;
int index;
for (int i = 0; i < (*numNeurons); i++) {
index = neurNum * (*numNeurons) + i;
fireSum += d_firingRate[i] * d_connMatrix[index];
}
fireSum += d_biasVec[neurNum];
float fVal = 1 / (1 + exp(-fireSum));
fVal -= d_firingRate[neurNum];
d_newFiringRate[neurNum] = d_firingRate[neurNum] + (fVal * (*stepSize));
index = d_sampleNeuronIndexes[neurNum];
if (index > -1) {
//printf("firingRate sample: %f\n", d_newFiringRate[neurNum]);
d_samples[index] = d_newFiringRate[neurNum];
}
}
namespace NNet {
float** stepSys(
int numSteps,
int numNeurons,
float** connMatrix,
float* biasVec,
float* startRate,
std::vector<int> sampleNeurons,
float stepSize
) {
//Set up flag array for sampled neurons
int* d_sampleNeuronIndexes;
int* sampleNeuronFlags = new int[numNeurons];
for (int i = 0; i < numNeurons; i++) {
sampleNeuronFlags[i] = -1;
}
float* sampleLayer = new float[sampleNeurons.size()];
float** sampleRates = new float* [numSteps + 1];
int acc = 0;
for (int i = 0; i < sampleNeurons.size(); i++) {
sampleNeuronFlags[sampleNeurons[i]] = acc;
acc++;
sampleLayer[i] = startRate[sampleNeurons[i]];
}
sampleRates[0] = sampleLayer;
int size = sizeof(int) * numNeurons;
cudaMalloc((void**)&d_sampleNeuronIndexes, size);
cudaMemcpy(d_sampleNeuronIndexes, sampleNeuronFlags, size, cudaMemcpyHostToDevice);
float* d_samples;
size = sampleNeurons.size() * sizeof(float);
cudaMalloc((void**)&d_samples, size);
//Copying to GPU system properties
float* d_biasVec; float* d_firingRate;
float* connMatrix1D; float* d_connMatrix;
float* d_newFiringRate;
int index;
connMatrix1D = new float[numNeurons * numNeurons];
for (int i = 0; i < numNeurons; i++) {
for (int j = 0; j < numNeurons; j++) {
index = i * numNeurons + j;
connMatrix1D[index] = connMatrix[i][j];
}
}
size = sizeof(float) * numNeurons * numNeurons;
cudaMalloc((void**)&d_connMatrix, size);
cudaMemcpy(d_connMatrix, connMatrix1D, size, cudaMemcpyHostToDevice);
size = sizeof(float) * numNeurons;
cudaMalloc((void**)&d_biasVec, size); cudaMalloc((void**)&d_firingRate, size);
cudaMemcpy(d_biasVec, biasVec, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_firingRate, startRate, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_newFiringRate, size);
size = sizeof(int);
int* d_numNeur; cudaMalloc((void**)&d_numNeur, size);
cudaMemcpy(d_numNeur, &numNeurons, size, cudaMemcpyHostToDevice);
size = sizeof(float);
float* d_stepSize; cudaMalloc((void**)&d_stepSize, size);
cudaMemcpy(d_stepSize, &stepSize, size, cudaMemcpyHostToDevice);
int sizeSample = sizeof(float) * sampleNeurons.size();
int sizeUpdate = sizeof(float) * numNeurons;
//Stepping system over input number of steps using stepper cuda kernel.
/*for (int i = 0; i < numSteps; i++) {
stepper<<<numNeurons, 1>>>(d_firingRate, d_newFiringRate, d_connMatrix, d_sampleNeuronIndexes,
d_biasVec, d_samples, d_stepSize, d_numNeur);
float* sampleLayer = new float[sampleNeurons.size()];
cudaMemcpy(sampleLayer, d_samples, sizeSample, cudaMemcpyDeviceToHost);
sampleRates[i+1] = sampleLayer;
cudaMemcpy(d_firingRate, d_newFiringRate, sizeUpdate, cudaMemcpyDeviceToDevice);
}*/
//stepping system over input number of steps... Sin(t) bias'
float time = 0.0;
int sizeBias = sizeof(float) * numNeurons;
for (int i = 0; i < numSteps; i++) {
float sint = sin(time);
for (int i = 0; i < numNeurons; i++) {
biasVec[i] = abs(sint);
}
time += stepSize;
cudaMemcpy(d_biasVec, biasVec, sizeBias, cudaMemcpyHostToDevice);
stepper << <numNeurons, 1 >> > (d_firingRate, d_newFiringRate, d_connMatrix, d_sampleNeuronIndexes,
d_biasVec, d_samples, d_stepSize, d_numNeur);
float* sampleLayer = new float[sampleNeurons.size()];
cudaMemcpy(sampleLayer, d_samples, sizeSample, cudaMemcpyDeviceToHost);
sampleRates[i+1] = sampleLayer;
cudaMemcpy(d_firingRate, d_newFiringRate, sizeUpdate, cudaMemcpyDeviceToDevice);
}
return sampleRates;
}
}
|
18,805 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <fstream>
#include <vector>
#include <string>
#include <sstream>
using namespace std;
#define gpuErrchk(ans){gpuAssert((ans), __FILE__, __LINE__);}
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if(code != cudaSuccess)
{
fprintf(stderr, "GPUassert : %s %s %d\n", cudaGetErrorString(code), file, line);
if(abort) exit(code);
}
}
__global__ void dipoleCorrelation(double *px, double *py, double *pz, double *corr, int N)
{
int tau = threadIdx.x + blockDim.x * blockIdx.x;
double local_corr = 0;
if(tau < N)
{
for(int index = 0; index < N - tau; ++index)
{
local_corr += px[index] * px[index + tau]
+ py[index] * py[index + tau]
+ pz[index] * pz[index + tau];
}
local_corr = local_corr/(N-tau);
corr[tau] = local_corr;
}
__syncthreads();
}
int main()
{
string data, line, word;
int pos(8);
vector< double > dipole_x, dipole_y, dipole_z;
vector< double > t;
const string fileName = "Platinum_nanosphere_run2.stat";
const string fileOut = "CorrfuncCuda.wcorr";
ifstream file;
//open file
file.open(fileName,ios::in);
if(!file)
{
cout<<"Error in opening file"<<endl;
return -1;
}
while(!file.eof())
{
getline(file, line);
int i = 0;
stringstream is(line);
while( is >> word )
{
if (word.compare("#") == 0 || word.compare("##") == 0 ) break;
if(i == 0) t.push_back(stod(word));
if(i == pos)
{
dipole_x.push_back(stod(word));
}
if(i == pos + 1)
{
dipole_y.push_back(stod(word));
}
if(i == pos + 2)
{
dipole_z.push_back(stod(word));
}
i++;
}
}
cout<<"Dipole vector list created"<<endl;
//vector<double> dipole_corr, corr_time;
// calculation of co-orelation function
ofstream outfile;
outfile.open(fileOut);
int N = dipole_x.size();
double *xcomp_dipole = &dipole_x[0]; //convert dipole_x vector to array
double *ycomp_dipole = &dipole_y[0];
double *zcomp_dipole = &dipole_z[0];
double *xcomp_dipole_d, *ycomp_dipole_d, *zcomp_dipole_d;
double *corr_h, *corr_d;
corr_h = (double*)malloc(N*sizeof(double));
double dt = t[1]-t[0];
cout<<"Finding the correlation funciton"<<endl;
gpuErrchk(cudaMalloc((void**)&xcomp_dipole_d, N * sizeof(double)));
gpuErrchk(cudaMalloc((void**)&ycomp_dipole_d, N * sizeof(double)));
gpuErrchk(cudaMalloc((void**)&zcomp_dipole_d, N * sizeof(double)));
gpuErrchk(cudaMalloc((void**)&corr_d, N * sizeof(double)));
/*
for(int index =0; index < N ; ++index)
{
printf("Index: %d Px: %e, Py: %e, Pz: %e\n",index,xcomp_dipole[index],ycomp_dipole[index],zcomp_dipole[index]);
printf("Index: %d Px: %e, Py: %e, Pz: %e\n",index,dipole_x[index],dipole_y[index],dipole_z[index]);
}
*/
gpuErrchk(cudaMemcpy(xcomp_dipole_d, xcomp_dipole, N * sizeof(double), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(ycomp_dipole_d, ycomp_dipole, N * sizeof(double), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(zcomp_dipole_d, zcomp_dipole, N * sizeof(double), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(corr_d, corr_h, N * sizeof(double), cudaMemcpyHostToDevice));
int number_of_blocks;
number_of_blocks = ( N/1024 ) + 1;
dipoleCorrelation<<<number_of_blocks,1024>>> (xcomp_dipole_d, ycomp_dipole_d, zcomp_dipole_d, corr_d, N);
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(corr_h, corr_d, N * sizeof(double), cudaMemcpyDeviceToHost));
outfile<<"## charge velocity autocorrelation function"<<endl;
outfile<<"# time(tau)\t wcorr"<<endl;
for(int count= 0; count < N ; ++count )
{
outfile << t[count] << "\t" << corr_h[count]<<endl;
// cout << t[count] << "\t" << corr_h[count]<<endl;
//dipole_corr.push_back(local_corr/(length - tau));
//corr_time.push_back(tau * dt);
}
outfile.close();
delete [] corr_h;
corr_h = NULL;
gpuErrchk(cudaFree(corr_d));
}
|
18,806 | #include <cuda.h>
#include <chrono>
#include <cstdlib>
#include <iostream>
__global__ void transposeKernel(const double* A, double* AT, int N) {
int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
int index = xIndex + N * yIndex;
int T_index = yIndex + N * xIndex;
if ((xIndex < N) && (yIndex < N))
AT[T_index] = A[index];
}
void displayMatrix(double* A, int N) {
for (size_t i = 0; i < N * N; i++) {
if (i % N == 0)
std::cout << "\n";
std::cout << A[i] << " ";
}
std::cout << "\n";
}
int main(int argc, char** argv) {
if (argc == 2) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int N = atoi(argv[1]);
const int BLOCK_SIZE = 32;
int grid_size = (N - 1) / BLOCK_SIZE + 1;
dim3 Grids(grid_size, grid_size);
dim3 Blocks(BLOCK_SIZE, BLOCK_SIZE);
size_t size = N * N * sizeof(double);
double* h_A = (double*)malloc(size);
if (h_A == NULL) {
std::cerr << "Failed to allocate memory for h_A!\n";
return 1;
}
double* h_AT = (double*)malloc(size);
if (h_AT == NULL) {
std::cerr << "Failed to allocate memory for h_B!\n";
return 2;
}
for (int i = 0; i < N * N; i++) {
h_A[i] = i % 1024;
}
int i = 0, k = 0;
auto begin = std::chrono::high_resolution_clock::now();
while (i < N * N) {
for (int j = k; j < N * N; j += N) {
h_AT[i++] = h_A[j];
}
k++;
}
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> cputime = end - begin;
std::cout << "CPU Elapsed Time: " << cputime.count() << " ms" << std::endl;
double* d_A = NULL;
double* d_AT = NULL;
// displayMatrix(h_A, N);
// displayMatrix(h_AT, N);
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_AT, size);
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaEventRecord(start);
transposeKernel<<<Grids, Blocks>>>(d_A, d_AT, N);
cudaDeviceSynchronize();
cudaEventRecord(stop);
float gpuTime = 0;
cudaEventElapsedTime(&gpuTime, start, stop);
std::cout << "GPU Elapsed Time: " << gpuTime << " ms\n";
cudaMemcpy(h_AT, d_AT, size, cudaMemcpyDeviceToHost);
displayMatrix(h_AT, N);
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++) {
if (h_A[i * N + j] != h_AT[j * N + i]) {
std::cout << "TEST FAILED...\n";
return 3;
}
}
std::cout << "TEST PASSED!\n";
free(h_A);
free(h_AT);
cudaFree(d_A);
cudaFree(d_AT);
}
return 0;
}
|
18,807 | #include<iostream>
#include <stdint.h>
#include<stdio.h>
#include<fstream>
#include <stdlib.h>
#include <malloc.h>
#include <string.h>
#include <sstream>
using namespace std;
#define REPEAT 1
#define STRIDE 1
#define CACHELINE 8
#define ALLIGNMENT 64
typedef unsigned long long Dtype;
__global__ void VecAdd(Dtype** A, int* N, unsigned long long* d_time, Dtype* xj, Dtype* xi);
int gcf(int a, int b)
{
if (a == 0) return b;
return gcf(b % a, a);
}
int main(int argc, char* argv[])
{
if(argc != 4)
{
std::cout << "Wrong number of argument!! Exiting program !!!";
return 0;
}
// struct timeval tv1, tv2;
int N = atoi(argv[1]);
int stride = atoi(argv[2]);
int noofthreads = atoi(argv[3]);
unsigned long long *d_time, h_time;
Dtype *xj, *xi;
Dtype *h_A, **d_A;
int *d_N;
std::ofstream fp;
std::ostringstream fpath;
// int i=system("pwd");
string dpath = "/home/hpc/ihpc/ihpc002h/gpu-exp/Master-thesis/exp4-thread/data/result.txt";
//fpath << i + dpath;
//cout << fpath << " ***** ";
fp.open(dpath.c_str(), std::ofstream::app);
h_A = (Dtype*)memalign(ALLIGNMENT,(N+2)*sizeof(Dtype));
cudaMalloc(&d_A, (N+2)*sizeof(Dtype));
cudaMalloc(&d_time, sizeof(unsigned long long));
cudaMalloc(&xj, sizeof(Dtype));
cudaMalloc(&xi, sizeof(Dtype));
cudaMalloc(&d_N, sizeof(int));
//int step = gcf (STRIDE, N);
for(unsigned int i=0; i < N ; i++)
{
h_A[i] = ((Dtype)(uintptr_t)d_A) + ( (i + stride) % N)*sizeof(Dtype);
}
h_A[N]=0;
h_A[N+1]=0;
cudaMemcpy(d_A, h_A, (N+2)*sizeof(Dtype), cudaMemcpyHostToDevice );
cudaMemcpy(d_N, &N, sizeof(int), cudaMemcpyHostToDevice );
VecAdd<<<1,noofthreads>>>(d_A, d_N, d_time, xj, xi);
cudaMemcpy(&h_time, d_time, sizeof(double), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
fp << noofthreads << " " << h_time << std::endl;
for(int i =0; i < N ; i++)
{
// printf("%llu ",*(h_A[i]));
}
cudaFree(d_A);
free(h_A);
fp.close();
}
|
18,808 | #include "stdio.h"
#define COLUMNS 3
#define ROWS 2
__global__ void matadd(int *a, int *b, int *c)
{
int x = blockIdx.x;
int y = blockIdx.y;
int i = (COLUMNS*y) + x;
c[i] = a[i] + b[i];
}
/* ------------- COMPUTATION DONE ON GPU ----------------------------*/
int main()
{
int a[ROWS][COLUMNS], b[ROWS][COLUMNS], c[ROWS][COLUMNS];
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void **) &dev_a, ROWS*COLUMNS*sizeof(int));
cudaMalloc((void **) &dev_b, ROWS*COLUMNS*sizeof(int));
cudaMalloc((void **) &dev_c, ROWS*COLUMNS*sizeof(int));
for (int i = 0; i < ROWS; i++) // Fill Arrays
for (int j = 0; j < COLUMNS; j++)
{
a[i][j] = 1;
b[i][j] = 3;
}
cudaMemcpy(dev_a, a, ROWS*COLUMNS*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, ROWS*COLUMNS*sizeof(int),cudaMemcpyHostToDevice);
dim3 grid(COLUMNS,ROWS);
matadd<<<grid,1>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, ROWS*COLUMNS*sizeof(int),cudaMemcpyDeviceToHost);
/* ------------- COMPUTATION DONE ON HOST CPU ---------------------------*/
for (int i = 0; i < ROWS; i++) // Output Arrays
{
for (int j = 0; j < COLUMNS; j++)
{
printf("[%d][%d]=%d ",i,j,c[i][j]);
}
printf("\n");
}
return 0;
}
|
18,809 | // nvcc -ccbin "D:\Program Files (x86)\Microsoft Visual Studio 11.0\VC\bin" piCalculate.cu -o piCalculate.exe
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include <cuda_profiler_api.h>
#define MAX_CUDA_BLOCKS 65535
#define MAX_CUDA_THREADS 1024
#define PI 3.141592653
__global__ void cuda_calc_pi_step1(int n, int *circle, float *x, float *y)
{
extern __shared__ int sdata[];
float c;
int t = 0;
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
c = (x[i] * x[i]) + (y[i] * y[i]);
if (c < 0.25f && i<n) t = 1;
sdata[tid] = t;
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0){
circle[blockIdx.x] = sdata[0];
};
}
__global__ void cuda_calc_pi_step2(int n, int *circle){
extern __shared__ int sdata[];
unsigned int tid = threadIdx.x;
sdata[tid] = circle[tid];
__syncthreads();
// do reduction in shared mem
for (unsigned int s = 1; s < blockDim.x; s *= 2){
if (tid % (2 * s) == 0){
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0){
circle[blockIdx.x] = sdata[0];
};
}
/* Function : generate_random_numbers
* Generates n random numbers for both x and y on the host
*/
void generate_random_numbers(int n, float *x, float *y)
{
srand(time(NULL));
for (int i = 0; i < n; i++)
{
x[i] = ((float)rand() / RAND_MAX) - 0.5f;
y[i] = ((float)rand() / RAND_MAX) - 0.5f;
}
}
/* Function : calculate_pi_monte_carlo
* Calculates pi on the host by using the monte carlo method, by using a set of
* random points within a 2R square about point (0,0) we can calculate pi by
* calculating the ratio of points within the a circle with radius R starting
* from point (0,0) compared to that of the square. This is done on the host.
*/
float calculate_pi_monte_carlo(int n, float *x, float *y)
{
int circle = 0;
for (int i = 0; i < n; i++)
{
if (pow(x[i], 2) + pow(y[i], 2) < pow(0.5f, 2))
{
circle++;
}
}
return (4.0f * circle) / n;
}
double gpu_calc_pi_monte_carlo(int samples){
int threads, blocks;
//You can only do so many samples in an execution due to limitations of the card
if(samples > (MAX_CUDA_THREADS*MAX_CUDA_THREADS)){
printf("Too many samples\n");
return 0.0f;
}
threads = (samples < MAX_CUDA_THREADS) ? samples : MAX_CUDA_THREADS;
blocks = ((samples-1)/MAX_CUDA_THREADS)+1;
int *d_circle_count, *circle_count;
float *x, *y, *d_x, *d_y;
double pi;
// Allocate memory for our random numbers
x = (float *)malloc(samples * sizeof(float));
y = (float *)malloc(samples * sizeof(float));
circle_count = (int *)malloc(blocks * sizeof(int));
// Allocate memory on our GPU
cudaMalloc(&d_x, samples * sizeof(float));
cudaMalloc(&d_y, samples * sizeof(float));
cudaMalloc(&d_circle_count,blocks * sizeof(int));
generate_random_numbers(samples, x, y);
cudaMemcpy(d_x, x, samples * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, samples * sizeof(float), cudaMemcpyHostToDevice);
cuda_calc_pi_step1<<<blocks,threads,threads*sizeof(int)>>>(samples, d_circle_count, d_x, d_y);
cuda_calc_pi_step2<<<1,blocks,blocks*sizeof(int)>>>(blocks, d_circle_count);
cudaMemcpy(circle_count, d_circle_count, blocks * sizeof(int), cudaMemcpyDeviceToHost);
pi = 4.0 * (double)circle_count[0] / (double)samples;
// Free memory
free(x);
free(y);
free(circle_count);
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_circle_count);
return pi;
}
int main(void)
{
// Initiate variables
double pi;
int it;
long int N = 1048576;
pi=0.0;
for(int i=0; i<10024;i++){
if(i==0)cudaProfilerStart();
pi += gpu_calc_pi_monte_carlo(N);
it = i+1;
if (i%50==1){
printf("Samples/1 mill : %d, ",it);
printf("Pi Estimated : %f, ", pi/it);
printf("Error : %f\n", (PI-(pi/it))/PI);
}
if(i==100)cudaProfilerStop();
}
pi /= 10024;
printf("Pi is estimated to be %f\n", pi);
} |
18,810 | // simple increment kernel
#include <cuda.h>
#include <stdio.h>
//TODO: increment kernel
__global__
void increment(float *val) {
*val += 2.0f;
}
int main(void)
{
// create host array and initialize
float *device_pointer;
// print original value
float input = 40.0f;
printf("Input: %f\n", input);
// allocate device memory
cudaMalloc(&device_pointer, sizeof(float));
// memcpy to device
cudaMemcpy(device_pointer, &input, sizeof(float), cudaMemcpyHostToDevice);
// launch the increment kernel
increment<<<1, 1>>>(device_pointer);
// memcpy results back to host
cudaMemcpy(&input, device_pointer, sizeof(float), cudaMemcpyDeviceToHost);
// print new value
printf("New Input: %f\n", input);
return 0;
}
|
18,811 | #include <iostream>
#include <cassert>
#include <chrono>
using namespace std;
// M x K and K x N
constexpr long M = 128;
constexpr long K = 128;
constexpr long N = 128;
void MatmulOnCPU(double* mat1, double* mat2, double* result) {
for (int i = 0; i < M; ++i)
for (int j = 0; j < N; ++j){
double sum = 0;
for (int k = 0; k < K; ++k){
sum += mat1[i * K + k] * mat2[k * N + j];
}
result[i * N + j] = sum;
}
}
__global__ void MatmulKernel(double* mat1, double* mat2, double* result);
void MatmulOnGPU(double* mat1, double* mat2, double* result) {
double *mat1_cuda, *mat2_cuda, *result_cuda;
dim3 dimBlock(1, 1);
dim3 dimGrid(M, N);
cudaMalloc(&mat1_cuda, M * K * sizeof(double));
cudaMemcpy(mat1_cuda, mat1, M * K * sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&mat2_cuda, K * N * sizeof(double));
cudaMemcpy(mat2_cuda, mat2, K * N * sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc(&result_cuda, M * N * sizeof(double));
MatmulKernel <<<dimGrid, dimBlock>>> (mat1_cuda, mat2_cuda, result_cuda);
cudaMemcpy(result, result_cuda, M * N * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(mat1_cuda);
cudaFree(mat2_cuda);
cudaFree(result_cuda);
}
__global__ void MatmulKernel(double* mat1_cuda, double* mat2_cuda, double* result_cuda) {
int i = blockIdx.x;
int j = blockIdx.y;
double sum = 0;
for (int k = 0; k < K; k++) {
sum += mat1_cuda[i * K + k] * mat2_cuda[k * N + j];
}
result_cuda[i * N + j] = sum;
}
int main() {
auto mat1 = new double[M * K] {0};
auto mat2 = new double[K * N] {0};
for (int i = 0; i < M * K; i++) {
mat1[i] = i;
}
for (int i = 0; i < K * N; i++) {
mat2[i] = i;
}
auto PCPU = new double[M * N] {0};
auto PGPU = new double[M * N] {0};
chrono::system_clock::time_point begin, end;
begin = chrono::system_clock::now();
MatmulOnCPU(mat1, mat2, PCPU);
end = chrono::system_clock::now();
auto cpu_duration = chrono::duration_cast<chrono::microseconds>(end - begin).count();
begin = chrono::system_clock::now();
MatmulOnGPU(mat1, mat2, PGPU);
end = chrono::system_clock::now();
auto gpu_duration = chrono::duration_cast<chrono::microseconds>(end - begin).count();
#ifdef DEBUG
printf("/\n");
for (int i = 0; i < M; i++) {
printf("|\t");
for (int j = 0; j < K; j++) {
printf("%.2lf\t", mat1[i * K + j]);
}
printf("\t|\n");
}
printf("\\\n");
printf("/\n");
for (int i = 0; i < K; i++) {
printf("|\t");
for (int j = 0; j < N; j++) {
printf("%.2lf\t", mat2[i * N + j]);
}
printf("\t|\n");
}
printf("\\\n");
printf("/\n");
for (int i = 0; i < M; i++) {
printf("|\t");
for (int j = 0; j < N; j++) {
printf("%.2lf\t", PCPU[i * N + j]);
}
printf("\t|\n");
}
printf("\\\n");
printf("/\n");
for (int i = 0; i < M; i++) {
printf("|\t");
for (int j = 0; j < N; j++) {
printf("%.2lf\t", PGPU[i * N + j]);
}
printf("\t|\n");
}
printf("\\\n");
#endif
bool correct = true;
for (long i = 0; i < M * N; i++) {
if (abs(PCPU[i] - PGPU[i]) > 1e-4) {
correct = false;
printf("at [%d, %d], %lf -- %lf -- %lf\n", i / N, i % N, PCPU[i], PGPU[i], PCPU[i] - PGPU[i]);
break;
}
}
printf("=====================Summary=======================\n");
printf("mat(%dx%d) x mat(%dx%d)\n", M, K, K, N);
if (correct) {
printf("\033[1;32mThe result is correct!\033[0m\n");
}
else {
printf("\033[1;31mThe result is wrong!\033[0m\n");
}
printf("cpu:\t %lld us\n", cpu_duration);
printf("gpu:\t %lld us\n", gpu_duration);
printf("speedup:\t %lf\n", cpu_duration / (double)gpu_duration);
printf("===================================================\n");
} |
18,812 | /*
* =====================================================================================
*
* Filename: lud.cu
*
* Description: The main wrapper for the suite
*
* Version: 1.0
* Created: 10/22/2009 08:40:34 PM
* Revision: none
* Compiler: gcc
*
* Author: Liang Wang (lw2aw), lw2aw@virginia.edu
* Company: CS@UVa
*
* =====================================================================================
*/
#include <assert.h>
#include <cuda.h>
#include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <time.h>
#include <sys/time.h>
/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////
// Common
/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////
#define GET_RAND_FP ((float)rand() / ((float)(RAND_MAX) + (float)(1)))
#define MIN(i, j) ((i) < (j) ? (i) : (j))
typedef enum _FUNC_RETURN_CODE { RET_SUCCESS, RET_FAILURE } func_ret_t;
typedef struct __stopwatch_t{
struct timeval begin;
struct timeval end;
}stopwatch;
void stopwatch_start(stopwatch *sw) {
if (sw == NULL)
return;
bzero(&sw->begin, sizeof(struct timeval));
bzero(&sw->end, sizeof(struct timeval));
gettimeofday(&sw->begin, NULL);
}
void stopwatch_stop(stopwatch *sw) {
if (sw == NULL)
return;
gettimeofday(&sw->end, NULL);
}
double get_interval_by_sec(stopwatch *sw) {
if (sw == NULL)
return 0;
return ((double)(sw->end.tv_sec - sw->begin.tv_sec) +
(double)(sw->end.tv_usec - sw->begin.tv_usec) / 1000000);
}
int get_interval_by_usec(stopwatch *sw) {
if (sw == NULL)
return 0;
return ((sw->end.tv_sec - sw->begin.tv_sec) * 1000000 +
(sw->end.tv_usec - sw->begin.tv_usec));
}
func_ret_t create_matrix_from_file(float **mp, const char *filename,
int *size_p) {
int i, j, size;
float *m;
FILE *fp = NULL;
fp = fopen(filename, "rb");
if (fp == NULL) {
return RET_FAILURE;
}
fscanf(fp, "%d\n", &size);
m = (float *)malloc(sizeof(float) * size * size);
if (m == NULL) {
fclose(fp);
return RET_FAILURE;
}
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
fscanf(fp, "%f ", m + i * size + j);
}
}
fclose(fp);
*size_p = size;
*mp = m;
return RET_SUCCESS;
}
func_ret_t create_matrix_from_random(float **mp, int size) {
float *l, *u, *m;
int i, j, k;
srand(time(NULL));
l = (float *)malloc(size * size * sizeof(float));
if (l == NULL)
return RET_FAILURE;
u = (float *)malloc(size * size * sizeof(float));
if (u == NULL) {
free(l);
return RET_FAILURE;
}
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
if (i > j) {
l[i * size + j] = GET_RAND_FP;
} else if (i == j) {
l[i * size + j] = 1;
} else {
l[i * size + j] = 0;
}
}
}
for (j = 0; j < size; j++) {
for (i = 0; i < size; i++) {
if (i > j) {
u[j * size + i] = 0;
} else {
u[j * size + i] = GET_RAND_FP;
}
}
}
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
for (k = 0; k <= MIN(i, j); k++)
m[i * size + j] = l[i * size + k] * u[j * size + k];
}
}
free(l);
free(u);
*mp = m;
return RET_SUCCESS;
}
void matrix_multiply(float *inputa, float *inputb, float *output, int size) {
int i, j, k;
for (i = 0; i < size; i++)
for (k = 0; k < size; k++)
for (j = 0; j < size; j++)
output[i * size + j] = inputa[i * size + k] * inputb[k * size + j];
}
func_ret_t lud_verify(float *m, float *lu, int matrix_dim) {
int i, j, k;
float *tmp = (float *)malloc(matrix_dim * matrix_dim * sizeof(float));
for (i = 0; i < matrix_dim; i++)
for (j = 0; j < matrix_dim; j++) {
float sum = 0;
float l, u;
for (k = 0; k <= MIN(i, j); k++) {
if (i == k)
l = 1;
else
l = lu[i * matrix_dim + k];
u = lu[k * matrix_dim + j];
sum += l * u;
}
tmp[i * matrix_dim + j] = sum;
}
func_ret_t ret = RET_SUCCESS;
for (i = 0; i < matrix_dim; i++) {
for (j = 0; j < matrix_dim; j++) {
if (fabs(m[i * matrix_dim + j] - tmp[i * matrix_dim + j]) > 0.0001) {
ret = RET_FAILURE;
printf("dismatch at (%d, %d): (o)%f (n)%f\n", i, j,
m[i * matrix_dim + j], tmp[i * matrix_dim + j]);
}
}
}
free(tmp);
return ret;
}
void matrix_duplicate(float *src, float **dst, int matrix_dim) {
int s = matrix_dim * matrix_dim * sizeof(float);
float *p = (float *)malloc(s);
memcpy(p, src, s);
*dst = p;
}
void print_matrix(float *m, int matrix_dim) {
int i, j;
for (i = 0; i < matrix_dim; i++) {
for (j = 0; j < matrix_dim; j++)
printf("%f ", m[i * matrix_dim + j]);
printf("\n");
}
}
// Generate well-conditioned matrix internally by Ke Wang 2013/08/07 22:20:06
func_ret_t create_matrix(float **mp, int size) {
float *m;
int i, j;
float lamda = -0.001;
float coe[2 * size - 1];
float coe_i = 0.0;
for (i = 0; i < size; i++) {
coe_i = 10 * exp(lamda * i);
j = size - 1 + i;
coe[j] = coe_i;
j = size - 1 - i;
coe[j] = coe_i;
}
m = (float *)malloc(sizeof(float) * size * size);
if (m == NULL) {
return RET_FAILURE;
}
for (i = 0; i < size; i++) {
for (j = 0; j < size; j++) {
m[i * size + j] = coe[size - 1 - i + j];
}
}
*mp = m;
return RET_SUCCESS;
}
/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE RD_WG_SIZE
#else
#define BLOCK_SIZE 16
#endif
static int do_verify = 0;
static struct option long_options[] = {
/* name, has_arg, flag, val */
{"input", 1, NULL, 'i'},
{"size", 1, NULL, 's'},
{"verify", 0, NULL, 'v'},
{0, 0, 0, 0}};
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE RD_WG_SIZE
#else
#define BLOCK_SIZE 16
#endif
__global__ void lud_diagonal(float *m, int matrix_dim, int offset) {
int i, j;
__shared__ float shadow[BLOCK_SIZE][BLOCK_SIZE];
int array_offset = offset * matrix_dim + offset;
for (i = 0; i < BLOCK_SIZE; i++) {
shadow[i][threadIdx.x] = m[array_offset + threadIdx.x];
array_offset += matrix_dim;
}
__syncthreads();
for (i = 0; i < BLOCK_SIZE - 1; i++) {
if (threadIdx.x > i) {
for (j = 0; j < i; j++)
shadow[threadIdx.x][i] -= shadow[threadIdx.x][j] * shadow[j][i];
shadow[threadIdx.x][i] /= shadow[i][i];
}
__syncthreads();
if (threadIdx.x > i) {
for (j = 0; j < i + 1; j++)
shadow[i + 1][threadIdx.x] -= shadow[i + 1][j] * shadow[j][threadIdx.x];
}
__syncthreads();
}
/*
The first row is not modified, it
is no need to write it back to the
global memory
*/
array_offset = (offset + 1) * matrix_dim + offset;
for (i = 1; i < BLOCK_SIZE; i++) {
m[array_offset + threadIdx.x] = shadow[i][threadIdx.x];
array_offset += matrix_dim;
}
}
__global__ void lud_perimeter(float *m, int matrix_dim, int offset) {
__shared__ float dia[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float peri_row[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float peri_col[BLOCK_SIZE][BLOCK_SIZE];
int i, j, array_offset;
int idx;
if (threadIdx.x < BLOCK_SIZE) {
idx = threadIdx.x;
array_offset = offset * matrix_dim + offset;
for (i = 0; i < BLOCK_SIZE / 2; i++) {
dia[i][idx] = m[array_offset + idx];
array_offset += matrix_dim;
}
array_offset = offset * matrix_dim + offset;
for (i = 0; i < BLOCK_SIZE; i++) {
peri_row[i][idx] = m[array_offset + (blockIdx.x + 1) * BLOCK_SIZE + idx];
array_offset += matrix_dim;
}
} else {
idx = threadIdx.x - BLOCK_SIZE;
array_offset = (offset + BLOCK_SIZE / 2) * matrix_dim + offset;
for (i = BLOCK_SIZE / 2; i < BLOCK_SIZE; i++) {
dia[i][idx] = m[array_offset + idx];
array_offset += matrix_dim;
}
array_offset =
(offset + (blockIdx.x + 1) * BLOCK_SIZE) * matrix_dim + offset;
for (i = 0; i < BLOCK_SIZE; i++) {
peri_col[i][idx] = m[array_offset + idx];
array_offset += matrix_dim;
}
}
__syncthreads();
if (threadIdx.x < BLOCK_SIZE) { // peri-row
idx = threadIdx.x;
for (i = 1; i < BLOCK_SIZE; i++) {
for (j = 0; j < i; j++)
peri_row[i][idx] -= dia[i][j] * peri_row[j][idx];
}
} else { // peri-col
idx = threadIdx.x - BLOCK_SIZE;
for (i = 0; i < BLOCK_SIZE; i++) {
for (j = 0; j < i; j++)
peri_col[idx][i] -= peri_col[idx][j] * dia[j][i];
peri_col[idx][i] /= dia[i][i];
}
}
__syncthreads();
if (threadIdx.x < BLOCK_SIZE) { // peri-row
idx = threadIdx.x;
array_offset = (offset + 1) * matrix_dim + offset;
for (i = 1; i < BLOCK_SIZE; i++) {
m[array_offset + (blockIdx.x + 1) * BLOCK_SIZE + idx] = peri_row[i][idx];
array_offset += matrix_dim;
}
} else { // peri-col
idx = threadIdx.x - BLOCK_SIZE;
array_offset =
(offset + (blockIdx.x + 1) * BLOCK_SIZE) * matrix_dim + offset;
for (i = 0; i < BLOCK_SIZE; i++) {
m[array_offset + idx] = peri_col[i][idx];
array_offset += matrix_dim;
}
}
}
__global__ void lud_internal(float *m, int matrix_dim, int offset) {
__shared__ float peri_row[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float peri_col[BLOCK_SIZE][BLOCK_SIZE];
int i;
float sum;
int global_row_id = offset + (blockIdx.y + 1) * BLOCK_SIZE;
int global_col_id = offset + (blockIdx.x + 1) * BLOCK_SIZE;
peri_row[threadIdx.y][threadIdx.x] =
m[(offset + threadIdx.y) * matrix_dim + global_col_id + threadIdx.x];
peri_col[threadIdx.y][threadIdx.x] =
m[(global_row_id + threadIdx.y) * matrix_dim + offset + threadIdx.x];
__syncthreads();
sum = 0;
for (i = 0; i < BLOCK_SIZE; i++)
sum += peri_col[threadIdx.y][i] * peri_row[i][threadIdx.x];
m[(global_row_id + threadIdx.y) * matrix_dim + global_col_id + threadIdx.x] -=
sum;
}
void lud_cuda(float *m, int matrix_dim) {
int i = 0;
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
float *m_debug = (float *)malloc(matrix_dim * matrix_dim * sizeof(float));
for (i = 0; i < matrix_dim - BLOCK_SIZE; i += BLOCK_SIZE) {
lud_diagonal<<<1, BLOCK_SIZE>>>(m, matrix_dim, i);
lud_perimeter<<<(matrix_dim - i) / BLOCK_SIZE - 1, BLOCK_SIZE * 2>>>(
m, matrix_dim, i);
dim3 dimGrid((matrix_dim - i) / BLOCK_SIZE - 1,
(matrix_dim - i) / BLOCK_SIZE - 1);
lud_internal<<<dimGrid, dimBlock>>>(m, matrix_dim, i);
}
lud_diagonal<<<1, BLOCK_SIZE>>>(m, matrix_dim, i);
}
int main(int argc, char *argv[]) {
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
int matrix_dim = 32; /* default matrix_dim */
int opt, option_index = 0;
func_ret_t ret;
const char *input_file = NULL;
float *m, *d_m, *mm;
stopwatch sw;
while ((opt = getopt_long(argc, argv, "::vs:i:", long_options,
&option_index)) != -1) {
switch (opt) {
case 'i':
input_file = optarg;
break;
case 'v':
do_verify = 1;
break;
case 's':
matrix_dim = atoi(optarg);
printf("Generate input matrix internally, size =%d\n", matrix_dim);
// fprintf(stderr, "Currently not supported, use -i instead\n");
// fprintf(stderr, "Usage: %s [-v] [-s matrix_size|-i input_file]\n",
// argv[0]); exit(EXIT_FAILURE);
break;
case '?':
fprintf(stderr, "invalid option\n");
break;
case ':':
fprintf(stderr, "missing argument\n");
break;
default:
fprintf(stderr, "Usage: %s [-v] [-s matrix_size|-i input_file]\n",
argv[0]);
exit(EXIT_FAILURE);
}
}
if ((optind < argc) || (optind == 1)) {
fprintf(stderr, "Usage: %s [-v] [-s matrix_size|-i input_file]\n", argv[0]);
exit(EXIT_FAILURE);
}
if (input_file) {
printf("Reading matrix from file %s\n", input_file);
ret = create_matrix_from_file(&m, input_file, &matrix_dim);
if (ret != RET_SUCCESS) {
m = NULL;
fprintf(stderr, "error create matrix from file %s\n", input_file);
exit(EXIT_FAILURE);
}
} else if (matrix_dim) {
printf("Creating matrix internally size=%d\n", matrix_dim);
ret = create_matrix(&m, matrix_dim);
if (ret != RET_SUCCESS) {
m = NULL;
fprintf(stderr, "error create matrix internally size=%d\n", matrix_dim);
exit(EXIT_FAILURE);
}
}
else {
printf("No input file specified!\n");
exit(EXIT_FAILURE);
}
if (do_verify) {
printf("Before LUD\n");
// print_matrix(m, matrix_dim);
matrix_duplicate(m, &mm, matrix_dim);
}
cudaMalloc((void **)&d_m, matrix_dim * matrix_dim * sizeof(float));
/* beginning of timing point */
stopwatch_start(&sw);
cudaMemcpy(d_m, m, matrix_dim * matrix_dim * sizeof(float),
cudaMemcpyHostToDevice);
lud_cuda(d_m, matrix_dim);
cudaMemcpy(m, d_m, matrix_dim * matrix_dim * sizeof(float),
cudaMemcpyDeviceToHost);
/* end of timing point */
stopwatch_stop(&sw);
printf("Time consumed(ms): %lf\n", 1000 * get_interval_by_sec(&sw));
cudaFree(d_m);
if (do_verify) {
printf("After LUD\n");
// print_matrix(m, matrix_dim);
printf(">>>Verify<<<<\n");
lud_verify(mm, m, matrix_dim);
free(mm);
}
free(m);
return EXIT_SUCCESS;
} /* ---------- end of function main ---------- */
|
18,813 | #include "includes.h"
__global__ void assisted_activation_kernel(float alpha, float *output, float *gt_gpu, float *a_avg_gpu, int size, int channels, int batches)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int xy = i % size;
int b = i / size;
if (b < batches) {
for (int c = 0; c < channels; ++c) {
output[xy + size*(c + channels*b)] += alpha * gt_gpu[i] * a_avg_gpu[i];
//output[xy + size*(c + channels*b)] += gt_gpu[i] * a_avg_gpu[i];
//output[xy + size*(c + channels*b)] += gt_gpu[i] * output[xy + size*(c + channels*b)];
//output[xy + size*(c + channels*b)] = a_avg_gpu[i];
}
}
} |
18,814 | #include "includes.h"
__global__ void gArgmax(float* out, const float* data, size_t rows, size_t cols) {
size_t row = blockIdx.x;
size_t startInd = row * cols;
float maxScore = -99999;
size_t maxInd;
for(size_t col = 0; col < cols; ++col) {
size_t ind = startInd + col;
float score = data[ind];
if(score > maxScore) {
maxScore = score;
maxInd = col;
}
}
out[row] = maxInd;
} |
18,815 | #include "includes.h"
#define WEIGHTSUM 273
#define BLOCK_SIZE 16
int * heatmap;
size_t heatmap_pitch;
int * scaled_heatmap;
size_t scaled_heatmap_pitch;
int * blurred_heatmap;
size_t blurred_heatmap_pitch;
float* d_desiredPositionX;
float* d_desiredPositionY;
__global__ void computeHeatmap(float* desiredAgentsX, float* desiredAgentsY, int n, int* heatmap, size_t heatmap_pitch, int* scaled_heatmap, size_t scaled_heatmap_pitch) {
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Thread row and column block
int row = threadIdx.y;
int col = threadIdx.x;
// x, y coordinate
int x = blockCol * blockDim.x + col;
int y = blockRow * blockDim.y + row;
// fade heatmap
int* heatPoint = (int*)((char*)heatmap + y * heatmap_pitch) + x;
*heatPoint = (int)round((*heatPoint) * 0.80);
// pull desiredAgentxX and Y array from global to shared memory, only 1 thread will do it
extern __shared__ float desiredPosition[];
if (row == 0 && col == 0) {
for (int i = 0; i < n; i++) {
desiredPosition[i] = desiredAgentsX[i];
desiredPosition[i + n] = desiredAgentsY[i];
}
}
__syncthreads();
// Count how many agents want to go to each location
for (int i = 0; i < n; i++) {
int desiredX = (int)desiredPosition[i];
int desiredY = (int)desiredPosition[i + n];
if (x == desiredX && y == desiredY) {
// intensify heat for better color results
if ((*heatPoint) + 40 <= 255) {
*heatPoint += 40;
}
}
}
} |
18,816 | #include "includes.h"
__global__ void addKernel(int *a, int *b, int *c)
{
// each parallel invocation of add() is referred to as a block.
// The set of blocks is referred to as a grid.
// Each invocation can refer to its block index using blockIdx.x.
// By using blockIdx.x to index into the array, each block handles a different index.
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
} |
18,817 | #include <stdlib.h>
#include <stdio.h>
#include <curand.h>
#include <time.h>
#include <iostream>
#include <string>
#include <fstream>
using namespace std;
#define CURAND_CALL(x) do { if((x)!=CURAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
int main(int argc, char* argv[]) {
int *h_data,L,tam,print;
size_t size;
curandGenerator_t gen;
L=10;
if(argc > 1)
L = atoi(argv[1]);
if(argc > 2)
print = atoi(argv[2]);
tam = L*L;
size = tam*sizeof(int);
// Allocate memory for the vectors on host memory.
h_data = (int*) malloc(size);
for (int i = 0; i < tam; i++)
h_data[i] = 0;
curandCreateGeneratorHost(&gen,CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen,time(NULL));
curandGenerate(gen,(unsigned int *)h_data, size);
ofstream out("data.txt");
if(print)
printf("\n\n");
for (int i = 0; i < tam; i++)
{
if(print)
if(i%L==0)
printf("\n");
out << h_data[i] << " ";
if(print)
printf(" %u",h_data[i]);
}
if(print)
printf("\n\n");
curandDestroyGenerator(gen);
out.close();
/* Free host memory */
free(h_data);
return 0;
} /* main */
|
18,818 | #include "includes.h"
/* TODO: Your code here */
/* all your GPU kernel code, e.g. matrix_softmax_cross_entropy_kernel */
// y = inputs[0], y_ = inputs[1]
// np.mean(-np.sum(y_ * np.log(softmax(y)), axis=1), keepdims=True)
__global__ void matrix_softmax_cross_entropy_kernel(int nrow, int ncol, const float *input_a, const float *input_b, float *output) {
// Dynamic shared memory, size provided at kernel launch.
extern __shared__ float loss_per_row[];
// Two dimensional thread blocks.
int y = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x
+ threadIdx.x;
if (y >= nrow) {
return;
}
input_a += y * ncol;
input_b += y * ncol;
float maxval = *input_a;
// Find max for a row.
for (int x = 1; x < ncol; ++x) {
maxval = max(maxval, input_a[x]);
}
// Deduct by max for a row, and raise to exp.
float sum = 0;
for (int x = 0; x < ncol; ++x) {
sum += exp(input_a[x] - maxval);
}
// Compute per-row loss.
float loss = 0;
for (int x = 0; x < ncol; ++x) {
loss -= input_b[x] * log(exp(input_a[x] - maxval) / sum);
}
loss_per_row[y] = loss;
__syncthreads();
// Compute reduce_mean across rows.
float mean_loss = 0;
// Use a single thread to reduce mean across rows.
if ((threadIdx.x == 0) && (threadIdx.y == 0)) {
for (int i = 0; i < nrow; ++i) {
mean_loss += loss_per_row[i];
}
mean_loss /= nrow;
output[0] = mean_loss;
}
} |
18,819 | #include <stdio.h>
#include <iostream>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#define R 3
__global__ void oneD_stencil_naive(int *in_arr, int *out_arr) {
int in_index = blockIdx.x + threadIdx.x;
int out_index = blockIdx.x;
// guaranteed to be performed without interference from other threads
atomicAdd(out_arr+out_index, in_arr[in_index]);
__syncthreads();
}
int main(void) {
int device;
cudaGetDevice(&device);
struct cudaDeviceProp props;
cudaGetDeviceProperties(&props, device);
printf("Using %s.\n\n", props.name);
// host copies of input and output array
int arr_in[13] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13};
int in_elements = 13;
int in_size = in_elements*sizeof(int);
int *arr_out;
int out_elements = in_elements-(2*R+1)+1;
int out_size = out_elements*sizeof(int);
arr_out = (int *)malloc(out_size);
int *d_arr_in, *d_arr_out; // host copies of input and output array
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_arr_in, in_size);
cudaMalloc((void **)&d_arr_out, out_size);
// Copy inputs to device
cudaMemcpy(d_arr_in, arr_in, in_size, cudaMemcpyHostToDevice);
// Launch stencil() kernel on GPU
int n_blocks = out_elements;
int n_threads_per_block = 2*R+1;
oneD_stencil_naive<<<n_blocks, n_threads_per_block>>>(d_arr_in, d_arr_out);
// Copy result back to host
cudaMemcpy(arr_out, d_arr_out, out_size, cudaMemcpyDeviceToHost);
std::cout << "[";
for(int i=0; i<n_blocks; ++i){
std::cout << arr_out[i] << ", ";
}
std::cout << "]" << std::endl;
// Cleanup
cudaFree(d_arr_in); cudaFree(d_arr_out);
return 0;
} |
18,820 | #include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <cuda.h>
__global__ void render(char *out, int width, int height) {
int index = 3 * (blockIdx.x * blockDim.x + threadIdx.x);
int x_dim = (index / 3) % width, y_dim = (index / 3) / width;
float x_origin = ((float) x_dim/width)*3.25 - 2;
float y_origin = ((float) y_dim/width)*2.5 - 1.25;
float x = 0.0;
float y = 0.0;
int iteration = 0;
int scale = 8;
int max_iteration = 256 * scale;
while(x*x + y*y <= 4 && iteration < max_iteration) {
float xtemp = x*x - y*y + x_origin;
y = 2*x*y + y_origin;
x = xtemp;
iteration++;
}
if(iteration == max_iteration) {
out[index] = 0;
out[index + 1] = 0;
out[index + 2] = 0;
} else {
out[index] = iteration / scale;
out[index + 1] = iteration / scale;
out[index + 2] = iteration / scale;
}
}
void runCUDA(int width, int height)
{
// Multiply by 3 here, since we need red, green and blue for each pixel
size_t buffer_size = sizeof(char) * width * height * 3;
char *image;
cudaMalloc((void **) &image, buffer_size);
char *host_image = (char *) malloc(buffer_size);
dim3 blockDim(64, 1, 1);
dim3 gridDim(width * height / blockDim.x, 1, 1);
render<<< gridDim, blockDim, 0 >>>(image, width, height);
cudaMemcpy(host_image, image, buffer_size, cudaMemcpyDeviceToHost);
// Now write the file
/*printf("P3\n%d %d\n255\n", width, height);
for (int row = 0; row < height; row++) {
for (int col = 0; col < width; col++) {
for (int i = 0; i < 3; i++) {
unsigned char c = host_image[(row * width + col) * 3 + i];
printf("%d ", c);
}
}
printf("\n");
}*/
cudaFree(image);
free(host_image);
}
int main(int argc, const char * argv[]) {
int N = 1024;
runCUDA(N, N);
return 0;
}
|
18,821 | #include "includes.h"
__global__ void pcr_k(float* a, float* b, float* c, float* y, int n) {
// Identifies the thread working within a group
int tidx = threadIdx.x % n;
// Identifies the data concerned by the computations
int Qt = (threadIdx.x - tidx) / n;
// The global memory access index
int gb_index_x = Qt + blockIdx.x * (blockDim.x / n);
// Local integers
int i, nt, lL, d, tL, tR;
// Local floats
float aL, bL, cL, yL, aLp, bLp, cLp, yLp;
// Shared memory
extern __shared__ float sAds[];
nt = 5 * Qt * n;
d = (n / 2 + (n % 2)) * (tidx % 2) + (int)tidx / 2;
float* sa = (float*)&sAds[nt];
float* sb = (float*)&sa[n];
float* sc = (float*)&sb[n];
float* sy = (float*)&sc[n];
int* sl = (int*)&sy[n];
sa[tidx] = a[gb_index_x * n + tidx];
sb[tidx] = b[gb_index_x * n + tidx];
sc[tidx] = c[gb_index_x * n + tidx];
sy[tidx] = y[gb_index_x * n + tidx];
sl[tidx] = tidx;
__syncthreads();
//Left/Right indices of the reduction
tL = tidx - 1;
if (tL < 0) tL = 0;
tR = tidx + 1;
if (tR >= n) tR = 0;
for (i = 0; i < (int)log2((float)n) + 1; i++) {
lL = (int)sl[tidx];
aL = sa[tidx];
bL = sb[tidx];
cL = sc[tidx];
yL = sy[tidx];
bLp = sb[tL];
//Reduction phase
if (fabsf(aL) > EPS) {
aLp = sa[tL];
cLp = sc[tL];
yLp = sy[tL];
float temp1 = aL / bLp;
bL -= cLp * temp1;
yL -= yLp * temp1;
aL = -aLp * temp1;
}
aLp = sa[tR];
bLp = sb[tR];
cLp = sc[tR];
float temp2 = cL / bLp;
if (fabsf(aLp) > EPS) {
yLp = sy[tR];
bL -= aLp * temp2;
yL -= yLp * temp2;
cL = -cLp * temp2;
}
__syncthreads();
//Permutation phase
if (i < (int)log2((float)n)) {
sa[d] = aL;
sb[d] = bL;
sc[d] = cL;
sy[d] = yL;
sl[d] = (int)lL;
__syncthreads();
}
}
sy[(int)tidx] = yL / bL;
__syncthreads();
y[gb_index_x * n + sl[tidx]] = sy[tidx];
} |
18,822 | #include <iostream>
#include <time.h>
#include <stdio.h>
// For the CUDA runtime routines
#include <cuda_runtime.h>
//initializing vectors with random numbers
void initVec(float *a, int N)
{
for(int i = 0; i < N; ++i)
{
a[i] = rand()%100;
}
}
//initializing vector with appointed number(probably useless)
void initWith(float num, float *a, int N)
{
for(int i = 0; i < N; ++i)
{
a[i] = num;
}
}
//dotting your vectors on cpu
void dotVectorsCPU(float result, float *a, float *b, int N)
{
for(int i = 0; i < N; i++)
{
result = result + a[i] * b[i];
}
}
//dotting your vectors on gpu
__global__
void dotVectorsGpu(float result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result = result + a[i] * b[i];
}
}
//adding your vectors on cpu
void addVectorsCPU(float *result, float *a, float *b, int N)
{
for(int i = 0; i < N; i++)
{
result[i] = a[i] + b[i];
}
}
//adding your vectors on gpu
__global__
void addVectorsGpu(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
//subtracting your vectors on cpu
void subVectorsCPU(float *result, float *a, float *b, int N)
{
for(int i = 0; i < N; i++)
{
result[i] = a[i] - b[i];
}
}
//subtracting your vectors on gpu
__global__
void subVectorsGpu(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] - b[i];
}
}
bool ask_repeat()
{
char decision;
while(1)
{
printf("\nDo You want to improve your score? (y/n)");
printf("\n");
scanf(" %c", &decision);
if(decision == 'y' || decision == 'Y')
return 1;
else if(decision == 'n' || decision == 'N')
return 0;
else
printf("\nWrong answer! Give y or n!");
}
}
int main()
{
//device's variables
int deviceId;
//int numberOfSMs;
cudaGetDevice(&deviceId);
//cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId);
//timing variables
clock_t start, end;
double cpu_time_used;
float el_time; // 1x1 kernel
cudaEvent_t start_gpu, stop_gpu;
//take value N of vectors' length
int N;
printf("Give the length of vectors you want to work with:");
scanf("%d", &N);
size_t size = N * sizeof(float);
float *a;
float *b;
float c = 0.0f;
float *d;
//allocate data
cudaMallocManaged(&a, size);
cudaMallocManaged(&b, size);
//cudaMallocManaged(&c, size);
cudaMemPrefetchAsync(a, size, cudaCpuDeviceId);
cudaMemPrefetchAsync(b, size, cudaCpuDeviceId);
//initWith(0, c, N);
//size_t threadsPerBlock;
//size_t numberOfBlocks;
//generate 2 vectors of length N
initVec(a, N);
initVec(b, N);
cudaMemPrefetchAsync(a, size, deviceId);
cudaMemPrefetchAsync(b, size, deviceId);
//switch statement for different operations
char operat;
printf("Choose mathematical operations:\n");
printf("Type '*' for dotting vectors\n");
printf("Type '+' for adding vectors\n");
printf("Type '-' for subtracting vectors\n");
scanf(" %c", &operat);
switch(operat)
{
case '*':
//dot vec on cpu
start = clock();
dotVectorsCPU(c, a, b, N);
//give time of execution
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("\nTime elapsed dotting vectors on CPU is: %f", cpu_time_used);
//dot vecs on 1x1 kernel
cudaEventCreate(&start_gpu);
cudaEventCreate(&stop_gpu);
cudaEventRecord(start_gpu, 0);
c = 0;
dotVectorsGpu<<<1,1>>>(c, a, b, N);
//give time of execution
cudaDeviceSynchronize();
cudaEventRecord(stop_gpu,0);
cudaEventSynchronize(stop_gpu);
cudaEventElapsedTime(&el_time, start_gpu, stop_gpu);
printf("\nTime elapsed on single-threaded vector dotting: %f \n", el_time);
break;
/////////
case '+':
cudaMallocManaged(&d, size);
cudaMemPrefetchAsync(d, size, cudaCpuDeviceId);
initWith(0,d,N);
cudaMemPrefetchAsync(d, size, deviceId);
//add vec on cpu
start = clock();
addVectorsCPU(d, a, b, N);
//give time of execution
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("\nTime elapsed adding vectors on CPU is: %f", cpu_time_used);
//add vecs on 1x1 kernel
cudaEventCreate(&start_gpu);
cudaEventCreate(&stop_gpu);
cudaEventRecord(start_gpu, 0);
initWith(0,d,N);
addVectorsGpu<<<1,1>>>(d, a, b, N);
//give time of execution
cudaDeviceSynchronize();
cudaEventRecord(stop_gpu,0);
cudaEventSynchronize(stop_gpu);
cudaEventElapsedTime(&el_time, start_gpu, stop_gpu);
printf("\nTime elapsed on single-threaded vector addition: %f \n", el_time);
break;
/////////
case '-':
cudaMallocManaged(&d, size);
cudaMemPrefetchAsync(d, size, cudaCpuDeviceId);
initWith(0,d,N);
cudaMemPrefetchAsync(d, size, deviceId);
//sub vec on cpu
start = clock();
subVectorsCPU(d, a, b, N);
//give time of execution
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("\nTime elapsed subtracting vectors on CPU is: %f", cpu_time_used);
//sub vecs on 1x1 kernel
cudaEventCreate(&start_gpu);
cudaEventCreate(&stop_gpu);
cudaEventRecord(start_gpu, 0);
initWith(0,d,N);
subVectorsGpu<<<1,1>>>(d, a, b, N);
//give time of execution
cudaDeviceSynchronize();
cudaEventRecord(stop_gpu,0);
cudaEventSynchronize(stop_gpu);
cudaEventElapsedTime(&el_time, start_gpu, stop_gpu);
printf("\nTime elapsed on single-threaded vector subtracting: %f \n", el_time);
break;
// operator doesn't match any case constant +, -, *,
default:
printf("Error! operator is not correct\n");
//printf("Operat = %c",operat);
printf("Quiting...");
return 0;
}
bool repeat = 1;
while (repeat)
{
//take size of grid and etc
int user_block;
printf("\nGive the size of block you want to use:");
scanf("%d", &user_block);
int user_thread;
printf("Give the number of threads per block you want to use:");
scanf("%d", &user_thread);
//timing variables
float el_time2;
cudaEvent_t start_gpu2, stop_gpu2;
switch(operat)
{
case '*':
cudaEventCreate(&start_gpu2);
cudaEventCreate(&stop_gpu2);
cudaEventRecord(start_gpu2, 0);
//dot vecs on got sizes kernel
c=0;
dotVectorsGpu<<<user_block,user_thread>>>(c, a, b, N);
//give time of execution
cudaEventRecord(stop_gpu2,0);
cudaEventSynchronize(stop_gpu2);
cudaEventElapsedTime(&el_time2, start_gpu2, stop_gpu2);
printf("\nTime elapsed on your size vectors dot product: %f\n", el_time2);
//print all execution times in table
cudaDeviceSynchronize();
//ask if user want to boost the score
repeat = ask_repeat();
break;
/////////
case '+':
cudaEventCreate(&start_gpu2);
cudaEventCreate(&stop_gpu2);
cudaEventRecord(start_gpu2, 0);
//add vecs on got sizes kernel
initWith(0,d,N);
addVectorsGpu<<<user_block,user_thread>>>(d, a, b, N);
//give time of execution
cudaEventRecord(stop_gpu2,0);
cudaEventSynchronize(stop_gpu2);
cudaEventElapsedTime(&el_time2, start_gpu2, stop_gpu2);
printf("\nTime elapsed on your size vectors addition: %f\n", el_time2);
//print all execution times in table
cudaDeviceSynchronize();
//ask if user want to boost the score
repeat = ask_repeat();
break;
/////////
case '-':
cudaEventCreate(&start_gpu2);
cudaEventCreate(&stop_gpu2);
cudaEventRecord(start_gpu2, 0);
//sub vecs on got sizes kernel
initWith(0,d,N);
subVectorsGpu<<<user_block,user_thread>>>(d, a, b, N);
//give time of execution
cudaEventRecord(stop_gpu2,0);
cudaEventSynchronize(stop_gpu2);
cudaEventElapsedTime(&el_time2, start_gpu2, stop_gpu2);
printf("\nTime elapsed on your size vectors subtraction: %f\n", el_time2);
//print all execution times in table
cudaDeviceSynchronize();
//ask if user want to boost the score
repeat = ask_repeat();
break;
////////
default:
printf("Error!\n");
printf("Quiting...");
return 0;
}
}
printf("Quiting...");
return 0;
}
|
18,823 | #include<stdio.h>
#include "cuda_runtime.h"
int main()
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
printf("device count is: %d\n", deviceCount);
for (int dev = 0; dev < deviceCount; dev++)
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("\ndevice name is %d %s:\n", dev, deviceProp.name);
printf("total amount global memory is: %u bytes\n", deviceProp.totalGlobalMem);
printf("total amount global memory is: %u M\n", deviceProp.totalGlobalMem / 1024);
printf("total amount global memory is: %u G\n", deviceProp.totalGlobalMem / (1024*1024));
printf("number of multiprocessor is: %d\n", deviceProp.multiProcessorCount);
printf("max number of thread per block %d\n", deviceProp.maxThreadsPerBlock);
printf("max size of each dimension %d, %d, %d \n",deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]);
printf("\n");
}
}
|
18,824 | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<string.h>
__global__ void setMax(long* d_adj, int n){
int x = threadIdx.x;
int y = threadIdx.y;
int pos = (x * n) + y;
if(x == y)
d_adj[pos] = 0; //Diagonal elements
else
d_adj[pos] = __INT_MAX__; //Others
}
__global__ void compute(long *d_ad, int k, int n){
int x = threadIdx.x;
int y = threadIdx.y;
int w_pos = (x * n) + y;
int r_pos1 = (x * n) + k;
int r_pos2 = (k * n) + y;
long s = d_ad[r_pos1] + d_ad[r_pos2];
__syncthreads();
if(s < d_ad[w_pos])
d_ad[w_pos] = s;
}
int main(int argc, char** argv){
int i, j, k, n;
printf("Enter the number of vertices : \n");
scanf("%d", &n);
long h_adj[n * n];
long* d_adj;
cudaMalloc((void**)&d_adj, n * n * sizeof(long*));
setMax<<<1, dim3(n, n, 1)>>>(d_adj, n);
cudaMemcpy(h_adj, d_adj, n * n * sizeof(long), cudaMemcpyDeviceToHost);
cudaFree(d_adj);
while(1){
printf("Click 1 to enter edge and 0 to finish.\n");
scanf("%d", &k);
if(!k)
break;
int s, d, w;
printf("Enter start and end of edge in 1-ordering : \n");
scanf("%d %d", &s, &d);
if(s == d){
printf("Invalid edge.\n");
continue;
}
if(s > n || s < 1 || d > n || d < 1){
printf("Invalid edge.\n");
continue;
}
printf("Enter edge weight : \n");
scanf("%d", &w);
if(w < 0){
printf("Invalid edge weight.\n");
continue;
}
int pos = ((s - 1) * n) + (d - 1);
h_adj[pos] = w;
}
cudaDeviceSynchronize();
long* d_ad;
cudaMalloc((void**)&d_ad, n * n * sizeof(long*));
cudaMemcpy(d_ad, h_adj, n * n * sizeof(long), cudaMemcpyHostToDevice);
for(k = 0; k < n; k++)
compute<<<1, dim3(n, n, 1)>>>(d_ad, k, n);
cudaMemcpy(h_adj, d_ad, n * n * sizeof(long), cudaMemcpyDeviceToHost);
for(i = 0;i < n; i++){
for(j = 0;j < n; j++){
int pos = (i * n) + j;
printf("%ld ", h_adj[pos]);
}
printf("\n");
}
cudaFree(d_ad);
return 0;
}
|
18,825 | #include <stdio.h>
#include <cuda_runtime.h>
#define CHECK(call) { \
const cudaError_t error = call; \
if (error != cudaSuccess) { \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
} \
#define RADIUS 4
#define BLOCKSIZE 128
// coeffecient
#define a0 0.00000f
#define a1 0.80000f
#define a2 -0.20000f
#define a3 0.03809f
#define a4 -0.00357f
// constant memory
__constant__ float coef[RADIUS + 1];
void initialData(float *ip, const int size);
void stencilHost(float *in, float *out, int isize);
void checkResult(float *hostRef, float *gpuRef, const int size);
__global__ void stencilGPU(float *in, float *out, const int n);
int main(int argc, char **argv) {
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// set up data size
int isize = 1 << 24;
size_t nBytes = (isize + 2 * RADIUS) * sizeof(float);
printf("array size: %d\n", isize);
// allocate host memory
float *h_in = (float *)malloc(nBytes);
float *h_out = (float *)malloc(nBytes);
float *h_out_gpu = (float *)malloc(nBytes);
initialData(h_in, isize + 2 * RADIUS);
// compute on CPU
stencilHost(h_in, h_out, isize);
// allocate device memory
float *d_in, *d_out;
CHECK(cudaMalloc((float**)&d_in, nBytes));
CHECK(cudaMalloc((float**)&d_out, nBytes));
CHECK(cudaMemcpy(d_in, h_in, nBytes, cudaMemcpyHostToDevice));
// set up constant memory
const float h_coef[] = {a0, a1, a2, a3, a4};
CHECK(cudaMemcpyToSymbol(coef, h_coef, (RADIUS + 1) * sizeof(float)));
// launch CUDA kernel
cudaDeviceProp info;
CHECK(cudaGetDeviceProperties(&info, 0));
dim3 block(BLOCKSIZE);
dim3 grid(info.maxGridSize[0] < isize / block.x ? info.maxGridSize[0] : isize / block.x);
printf("(grid, block) %d,%d \n", grid.x, block.x);
stencilGPU<<<grid, block>>>(d_in + RADIUS, d_out + RADIUS, isize);
CHECK(cudaMemcpy(h_out_gpu, d_out, nBytes, cudaMemcpyDeviceToHost));
checkResult(h_out, h_out_gpu, isize);
// free memory
CHECK(cudaFree(d_in));
CHECK(cudaFree(d_out));
free(h_in);
free(h_out);
free(h_out_gpu);
// reset device
CHECK(cudaDeviceReset());
return 0;
}
/**********CUDA kernels**********/
__global__ void stencilGPU(float *in, float *out, const int n) {
// shared memory
__shared__ float smem[BLOCKSIZE + 2 * RADIUS];
// index to global memory
int idx = blockIdx.x * blockDim.x + threadIdx.x;
while (idx < n) {
int sidx = threadIdx.x + RADIUS; // index to shared memory for stencil calculatioin
smem[sidx] = in[idx]; // read data from global memory into shared memory
if (threadIdx.x < RADIUS) { // read halo part to shared memory
smem[sidx - RADIUS] = in[idx - RADIUS];
smem[sidx + BLOCKSIZE] = in[idx + BLOCKSIZE];
}
__syncthreads();
float tmp = coef[0];
#pragma unroll
for (int i = 1; i <= RADIUS; i++) {
tmp += coef[i] * (smem[sidx + i] - smem[sidx - i]);
}
out[idx] = tmp;
idx += gridDim.x * blockDim.x;
}
}
/**********host functions**********/
void initialData(float *ip, const int size) {
for (int i = 0; i < size; i++) {
ip[i] = (float)(rand() & 0xFF) / 100.0f;
}
}
void stencilHost(float *in, float *out, int isize)
{
for (int i = RADIUS; i <= isize; i++) {
out[i] = a0 + a1 * (in[i + 1] - in[i - 1])
+ a2 * (in[i + 2] - in[i - 2])
+ a3 * (in[i + 3] - in[i - 3])
+ a4 * (in[i + 4] - in[i - 4]);
}
}
void checkResult(float *hostRef, float *gpuRef, const int size) {
double epsilon = 1.0E-6;
for (int i = RADIUS; i < size; i++) {
if (abs(hostRef[i] - gpuRef[i]) > epsilon) {
printf("different on %dth element: host %f gpu %f\n", i, hostRef[i], gpuRef[i]);
break;
}
}
}
|
18,826 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
void sumArrayOnHost(float *A, float *B, float *C, const int N)
{
for (int idx=0; idx<N; idx++)
{
C[idx] = A[idx] + B[idx];
}
}
void initialData(float *ip, int size)
{
// Generate different seed for random number.
time_t t;
srand((unsigned int)time(&t));
for (int i=0; i<size; i++)
{
ip[i] = (float)(rand() & 0xFF )/10.0f;
}
}
int main(int argc, char **argv)
{
int nElem = 1024;
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *h_C;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
h_C = (float *)malloc(nBytes);
initialData(h_A, nElem);
initialData(h_B, nElem);
sumArrayOnHost(h_A, h_B, h_C, nElem);
free(h_A);
free(h_B);
free(h_C);
printf("Done! \n");
return(0);
}
|
18,827 | /*
Compute potential energy for a system of particles
Miguel Aragon Calvo Apr/2010
"This software contains source code provided by NVIDIA Corporation."
"Glue c code based on galaxy collision demo"
History:
- 10/05/2010 First working implementation
- 01/06/2010 Add softening
*/
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#include <unistd.h>
#include <stdio.h>
#include <math.h>
#define BLOCKDIM 256
__constant__ float softeningSquared;
// Macros to simplify shared memory addressing
#define SX(i) sharedPos[i+blockDim.x*threadIdx.y]
//==================================
// Modified function to compute reduced potential G=1, M=1.
//==================================
__device__ float bodyBodyInteraction(float invSum, float4 pos_j, float4 pos_i) {
//--- Distance vector
float3 r;
r.x = pos_i.x - pos_j.x;
r.y = pos_i.y - pos_j.y;
r.z = pos_i.z - pos_j.z;
//--- Squared distance plus softening
float distSqr = r.x*r.x + r.y*r.y + r.z*r.z;
//--- Avoid itself
if (distSqr != 0) {
float invDist = (pos_i.w * pos_j.w) / sqrtf(distSqr + softeningSquared);
invSum += invDist;
}
return invSum;
}
//==================================
// This is the "tile_calculation" function from the GPUG3 article.
//==================================
__device__ float tile_potential(float4 myPos, float pot) {
extern __shared__ float4 sharedPos[];
unsigned long i = 0;
//--- Here we unroll the loop: LOOP_UNROLL = 4
for (unsigned int counter = 0; counter < blockDim.x; ) {
pot = bodyBodyInteraction(pot, SX(i++), myPos);
pot = bodyBodyInteraction(pot, SX(i++), myPos);
pot = bodyBodyInteraction(pot, SX(i++), myPos);
pot = bodyBodyInteraction(pot, SX(i++), myPos);
counter += 4;
}
return pot;
}
//==================================
// WRAP is used to force each block to start working on a different
// chunk (and wrap around back to the beginning of the array) so that
// not all multiprocessors try to read the same memory locations at once.
//==================================
#define WRAP(x,m) (((x)<m)?(x):(x-m)) // Mod without divide, works on values from 0 up to 2m
__device__ float computePotential(float4 bodyPos, float4* positions, int numBodies){
extern __shared__ float4 sharedPos[];
float pot = 0.0f;
int p = blockDim.x;
int q = blockDim.y;
int n = numBodies;
int numTiles = n / (p * q);
for (int tile = blockIdx.y; tile < numTiles + blockIdx.y; tile++) {
sharedPos[threadIdx.x+blockDim.x*threadIdx.y] = positions[WRAP(blockIdx.x + tile, gridDim.x) * p + threadIdx.x];
__syncthreads();
//--- This is the "tile_calculation" function from the GPUG3 article.
//pot = tile_potential(bodyPos, pot);
__syncthreads();
}
return pot;
}
__global__ void integrateBodies(float4* Pos, float* poten, int numBodies){
//--- Get the index of this thread ?
int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
float4 pos_i = Pos[index];
//--- Return potential
float pot = computePotential(pos_i, Pos, numBodies);
//--- Put potential in fourth field (mass)
//Pos[index].w = pot;
poten[index] = pot;
}
//============================================================
//
//============================================================
//--- Memory buffers on the GPU
float4 *pos1;
float *pote;
int old_buf;
int np;
int np_nopad;
//==============================================
//--- Interface routines...
//==============================================
extern "C"
{
#define BLOCKSIZE 256
void iniciar(){
printf("test\n");
}
//==================================
/* Allocate GPU memory and set initial positions */
//==================================
void init_nbody(int _n, float *_pos, float *_mass){
int i;
// Pad with zero mass particles if not a multiple of blocksize
np= (_n/BLOCKSIZE)*BLOCKSIZE;
if(np<_n){np = np + BLOCKSIZE;}
// Allocate GPU arrays
cudaMalloc((void **) &pos1, sizeof(float4)*np);
cudaMalloc((void **) &pote, sizeof(float) *np);
//Prepare initial conditions
float *posbuf = (float *) malloc(4*sizeof(float)*np);
float *potbuf = (float *) malloc( sizeof(float)*np);
for(i=0; i<_n; i++){
posbuf[4*i+0] = _pos[3*i+0];
posbuf[4*i+1] = _pos[3*i+1];
posbuf[4*i+2] = _pos[3*i+2];
posbuf[4*i+3] = _mass[i];
potbuf[i] = 0.0f;
}
// Pad particles
for(i=_n; i<np; i++){
posbuf[4*i+0] = 0.0;
posbuf[4*i+1] = 0.0;
posbuf[4*i+2] = 0.0;
posbuf[4*i+3] = 0.0;
potbuf[i] = 0.0;
}
//Copy to GPU
old_buf = 1;
cudaMemcpy(pos1, posbuf, sizeof(float4)*np, cudaMemcpyHostToDevice);
cudaMemcpy(pote, potbuf, sizeof(float )*np, cudaMemcpyHostToDevice);
np_nopad = _n;
}
cudaEvent_t evt;
int underway = 0;
//==================================
/* Do the actual potential */
//==================================
void compute_potential(void) {
/* Execute the kernel */
dim3 Dg(np/BLOCKSIZE);
dim3 Db(BLOCKSIZE);
size_t Ns = 4 * sizeof(float) * BLOCKSIZE;
cudaEventCreate(&evt);
integrateBodies <<< Dg, Db, Ns >>> (pos1, pote, np);
cudaEventRecord(evt, 0);
underway = 1;
}
//==================================
/* Check whether the calculation is done */
//==================================
int nbody_finished() {
if(cudaEventQuery(evt) == cudaErrorNotReady){
return 0;
} else {
cudaEventDestroy(evt);
underway = 0;
return 1;
}
}
//==================================
/* Shut down and deallocate */
//==================================
void dealloc_nbody(int dump){
if(underway==1)
while(nbody_finished()==0);
cudaFree(pos1);
cudaFree(pote);
}
//==================================
/* Set softening */
//==================================
void set_softening(float eps)
{
float eps2 = eps*eps;
cudaMemcpyToSymbol("softeningSquared", &eps2, sizeof(float), 0, cudaMemcpyHostToDevice);
}
//==================================
//--- Retrieve positions
//==================================
void get_positions(float *buf){
if(underway==1)
while(nbody_finished()==0);
float *pos = (float *) malloc(4*sizeof(float)*np);
cudaMemcpy(pos, pos1, sizeof(float)*4*np, cudaMemcpyDeviceToHost);
int i;
for(i=0;i<np_nopad;i++){
buf[4*i+0] = pos[4*i+0];
buf[4*i+1] = pos[4*i+1];
buf[4*i+2] = pos[4*i+2];
buf[4*i+3] = pos[4*i+3];
}
free(pos);
}
//==================================
//--- Retrieve potential
//==================================
void get_potential(float *buf){
//--- Wait until computation is finish
if(underway==1)
while(nbody_finished()==0);
float *pot = (float *) malloc(sizeof(float)*np);
cudaMemcpy(pot, pote, sizeof(float)*np, cudaMemcpyDeviceToHost);
int i;
for(i=0;i<np_nopad;i++)
{
buf[i] = pot[i];
}
free(pot);
}
} //--- end extern "C"
|
18,828 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define MAX_LEN 256
/* Lab 7: Programs on strings
* Q1: Write a CUDA program to count the number of times
* a given word is repeated in a sentence. (Use atomic function)
*/
__constant__ int len_sentence;
__constant__ int len_word;
__global__ void countWord(char *sentence, char *word, int *occurences){
int indx = threadIdx.x;
int flag = 0;
for(int i = 0; i < len_word; i++){
if(word[i] != sentence[i + indx]){
flag = 1;
break;
}
}
if(flag == 0)
atomicAdd(occurences, 1);
}
int main() {
// host copies of sentence and word strings
char sentence[MAX_LEN] = "This is a input sentence string string input string";
char word[MAX_LEN] = "string";
int len_sentence_h = strlen(sentence);
int len_word_h = strlen(word);
// number of occurences of 'word' in 'string'
int occurences = 0;
// device copies of variables
char *d_sentence, *d_word;
int *d_occurences;
size_t sizeSentence = len_sentence_h * sizeof(char);
size_t sizeWord = len_word_h * sizeof(char);
// Allocate space for device copies of variables
cudaMalloc((void **)&d_sentence, sizeSentence);
cudaMalloc((void **)&d_word, sizeWord);
cudaMalloc((void **)&d_occurences, sizeof(int));
// Copy string lengths to constant memory on device
cudaMemcpyToSymbol(len_sentence, &len_sentence_h, sizeof(int));
cudaMemcpyToSymbol(len_word, &len_word_h, sizeof(int));
// Copy inputs to device
cudaMemcpy(d_sentence, sentence, sizeSentence, cudaMemcpyHostToDevice);
cudaMemcpy(d_word, word, sizeWord, cudaMemcpyHostToDevice);
cudaMemcpy(d_occurences, &occurences, sizeof(int), cudaMemcpyHostToDevice);
// Launch kernels on GPU:
cudaError err;
// A thread for each possible starting index of 'word'
int num_threads = len_sentence_h - len_word_h + 1;
countWord<<<1, num_threads>>>(d_sentence, d_word, d_occurences);
// Retrieve result
err = cudaMemcpy(&occurences, d_occurences, sizeof(int), cudaMemcpyDeviceToHost);
if(err != cudaSuccess)
printf("CUDA error copying to Host: %s\n", cudaGetErrorString(err));
printf("Found %s in sentence %d times\n", word, occurences);
// Free resources
cudaFree(d_sentence);
cudaFree(d_word);
cudaFree(d_occurences);
return 0;
}
|
18,829 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
__global__ void reduce1(int *g_idata, int *g_odata) {
extern __shared__ int sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
for(unsigned int s = 1; s < blockDim.x; s *= 2) {
if(tid % (2*s) ==0) {
sdata[tid] += sdata[tid+s];
}
__syncthreads();
}
if(tid == 0) {
g_odata[blockIdx.x] = sdata[0];
}
}
__global__ void test(int *g_idata, int *g_odata) {
extern __shared__ int sdata[];
printf("hello world\n");
}
int main() {
int * a = (int*)malloc(sizeof(int)*32);
int * b = (int*)malloc(sizeof(int)*32);
for(int i = 0; i < 32; i++) {
a[i] = i;
}
int *a_dev;
int *b_dev;
cudaSetDevice(0);
cudaMalloc(&a_dev, sizeof(int)*32);
cudaMalloc(&b_dev, sizeof(int)*32);
cudaMemcpy(a_dev, a, sizeof(int)*32, cudaMemcpyHostToDevice);
reduce1 <<<1, 32, 32>>> (a_dev, b_dev);
cudaMemcpy(b, b_dev, sizeof(int)*32, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
printf("%d\n", b[0]);
cudaFree(a_dev);
cudaFree(b_dev);
free(a);
free(b);
}
|
18,830 | #include "includes.h"
__global__ void convn_same_kernel(float *output, float *data, float *kernel, const int H, const int W, const int kH, const int kW) {
// Matrix index
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= H || y >= W)
return;
const int i0 = kW / 2, j0 = kH / 2;
float sum = 0;
for (int i = 0; i < kW; ++i) {
for(int j = 0; j < kH; ++j) {
int ii = y - i + i0;
int jj = x - j + j0;
if ( ii < 0 || ii >= W || jj < 0 || jj >= H )
continue;
sum += kernel[ i * kH + j ] * data[ ii * H + jj ];
}
}
output[y * H + x] = sum;
} |
18,831 | // GPU with parallelization version
// Parallelization is implemented with CUDA
#include <iostream>
#include <cstdlib>
#include <ctime>
#include <cmath>
#include <cuda.h>
using namespace std;
// __global__ means the function runs on GPU, and called from CPU (in this case the function is called by main(), which runs on CPU)
__global__
void multiply_matrix(int n, int* matrixA, int* matrixB, int* matrix_res) {
int line_num = blockIdx.x * blockDim.x + threadIdx.x;
if (line_num < n) {
for (int i = 0; i < n; i ++) {
for (int j = 0; j < n; j ++) {
//
// matrix_res[line_num][i] = sum(j) matrixA[line_num][j] * matrixB[j][i]
//
matrix_res[line_num * n + i] += matrixA[line_num * n + j] * matrixB[j * n + i];
}
}
}
}
int main(int argc, char const *argv[])
{
if (argc < 2) {
cout << "Wrong number of arguments!!" << endl;
return -1;
}
int n = atoi(argv[1]);
//
// We will next generate two matrices both of size n*n with random int number with range [1, 512]
// We will also generate a zero matrix to store the result of multiplication
// For now, you do not need to understand the "new" expression
//
int *matrixA = new int[n*n];
int *matrixB = new int[n*n];
int *matrix_res = new int[n*n];
int *dMatrixA;
int *dMatrixB;
int *dMatrix_res;
// Notice that we use different index method
// We store all the data of the matrix in one row, so matrix[i * n + j] is previous matrix[i][j]
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
matrixA[i * n + j] = (rand() % 512) + 1;
matrixB[i * n + j] = (rand() % 512) + 1;
matrix_res[i * n + j] = 0;
}
}
//
// We will next multiply these two matrices (matrixA * matrixB) and count the time
// The result of multiplication will be stored in matrix_res
//
clock_t start = clock();
//
// Since CPU and GPU use different memory, we need to allocate memory on GPU
// We use id dMatrix to mean Matrix stored in device
//
cudaMallocManaged(&dMatrixA, n * n * sizeof(int));
cudaMallocManaged(&dMatrixB, n * n * sizeof(int));
cudaMallocManaged(&dMatrix_res, n * n * sizeof(int));
cudaMemcpy(dMatrixA, matrixA, n * n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dMatrixB, matrixB, n * n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dMatrix_res, matrix_res, n * n * sizeof(int), cudaMemcpyHostToDevice);
//
// The code below call the function run on GPU and wait it to finish
// Each thread will calculate one line
//
multiply_matrix<<< ((n + 255) / 256) , 256>>> (n, dMatrixA, dMatrixB, dMatrix_res);
cudaDeviceSynchronize();
cudaMemcpy(matrix_res, dMatrix_res, n * n * sizeof(int), cudaMemcpyDeviceToHost);
cout << "It takes " << (clock() - start) / (double) CLOCKS_PER_SEC << " seconds to multiply two matrices with size " << n << " * " << n << endl;
//
// Clean up the memory, you do not need to understand this part
//
cudaFree(dMatrixA);
cudaFree(dMatrixB);
cudaFree(dMatrix_res);
delete [] matrixA;
delete [] matrixB;
delete [] matrix_res;
return 0;
}
|
18,832 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
__global__ void findMaxKernel(unsigned int *array, unsigned int *max, int *mutex, unsigned int n)
{
unsigned int index = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int stride = gridDim.x*blockDim.x;
unsigned int offset = 0;
__shared__ unsigned int cache[1024];
unsigned int temp = 0;
while(index + offset < n){
temp = (temp > array[index + offset]) ? temp : array[index+offset];
offset += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] = (cache[threadIdx.x] > cache[threadIdx.x + i]) ? cache[threadIdx.x] : cache[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0){
atomicMax(max, cache[0]);
}
}
unsigned int getmaxcu(unsigned int num[], unsigned int size)
{
unsigned int *d_num;
unsigned int *h_max;
unsigned int *d_max;
int *d_mutex;
//Allocate memory
h_max = (unsigned int*)malloc(sizeof(unsigned int));
cudaMalloc((void**)&d_num, size*sizeof(unsigned int));
cudaMalloc((void**)&d_max, sizeof(unsigned int));
cudaMalloc((void**)&d_mutex, sizeof(int));
cudaMemset(d_max, 0, sizeof(unsigned int));
cudaMemset(d_mutex, 0, sizeof(unsigned int));
//Copy from host to device
cudaMemcpy(d_num, num, size*sizeof(unsigned int), cudaMemcpyHostToDevice);
// call kernel
dim3 gridSize = 256;
dim3 blockSize = 1024;
findMaxKernel<<< gridSize, blockSize >>>(d_num, d_max, d_mutex, size);
//Copy from device to host
cudaMemcpy(h_max, d_max, sizeof(unsigned int), cudaMemcpyDeviceToHost);
// free memory
cudaFree(d_num);
cudaFree(d_max);
cudaFree(d_mutex);
return h_max[0];
}
int main(int argc, char *argv[])
{
unsigned int size = 0; // The size of the array
unsigned int i; // loop index
unsigned int * numbers; //pointer to the array
if(argc !=2)
{
printf("usage: maxseq num\n");
printf("num = size of the array\n");
exit(1);
}
size = atol(argv[1]);
numbers = (unsigned int *)malloc(size * sizeof(unsigned int));
if( !numbers )
{
printf("Unable to allocate mem for an array of size %u\n", size);
exit(1);
}
srand(time(NULL)); // setting a seed for the random number generator
// Fill-up the array with random numbers from 0 to size-1
for( i = 0; i < size; i++)
numbers[i] = rand() % size;
printf(" The maximum number in the array is: %u\n", getmaxcu(numbers, size));
free(numbers);
exit(0);
}
|
18,833 | // Listing 5.1: dd_1d_global/main.cpp
#include <iostream>
#include <fstream>
#include <cuda_runtime.h>
#define TPB 64 // thread per block
__global__
void ddKernel(float *d_out, const float *d_in, int size, float h)
{
// on device, and hence do not have access to CPU memory
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i > size) return;
if (i == 0 || i == size-1) {
d_out[i] = 0;
return;
}
d_out[i] = (d_in[i+1] + d_in[i-1] - 2.0f*d_in[i])/(h*h);
}
void ddParallel(float *out, const float *in, int n, float h)
{
// create device memory
float *d_out = 0;
float *d_in = 0;
cudaMalloc(&d_out, n*sizeof(float));
cudaMalloc(&d_in, n*sizeof(float));
cudaMemcpy(d_in, in, n*sizeof(float), cudaMemcpyHostToDevice);
// call ddKernel
ddKernel<<<(n+TPB-1)/TPB, TPB>>>(d_out, d_in, n, h);
cudaMemcpy(out, d_out, n*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_out);
cudaFree(d_in);
}
int main()
{
const float PI = 3.1415926;
const int N = 150;
const float h = 2*PI/N; //
float x[N] = {0.0f};
float u[N] = {0.0f};
float result_parallel[N] = {0.0f};
// initialize x & u
for (int i = 0; i < N; i++) {
x[i] = i * (2 * PI / N);
u[i] = sinf(x[i]);
}
ddParallel(result_parallel, u, N, h);
std::ofstream outfile;
outfile.open("results.csv");
// x[i] u[i] d2u/d2x[i] u[i] + d2u/d2x[i]
// u = sin(x) d2u/d2x = -sin(x) u + d2u/d2x = 0.0
for (int i = 0; i < N; i++) {
outfile << x[i] << ", " << u[i] << ", " <<
result_parallel[i] << ", " << result_parallel[i] + u[i] << "\n";
}
outfile.close();
std::cout << "dd_1d_global\n";
}
|
18,834 | #include "includes.h"
__global__ void gPasteCols(float* out, const float* in, size_t rows, size_t colsOut, const size_t* targetColIdx, size_t colsIn) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* rowIn = in + j * colsIn;
float* rowOut = out + j * colsOut;
for(int tid = 0; tid < colsIn; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < colsIn)
rowOut[targetColIdx[i]] = rowIn[i];
}
}
}
} |
18,835 | // #include "RayMarchSampler.h"
// #include <crt/host_defines.h>
// #include "float3Extension.h"
// #include "math.h"
// #include <vector_functions.hpp>
// #include "float4x4.h"
//
// // #include "Ray.h"
// // #include "Camera.h"
// // #include "float3Extension.h"
// // #include <curand_discrete2.h>
// // #include <device_launch_parameters.h>
// // #include <curand_kernel.h>
// // #include <cstdio>
// // #include "RayHit.h"
// // #include "Objects.h"
// // #include "float3x3.h"
// // #include "Float2Byte.h"
// // #include "float2Extension.h"
// namespace RayMatch
// {
// #define EPSILON 0.001f
// #define MAX_DST 200
// #define MAX_STEP_COUNT 250
//
// struct Data
// {
// float power;
// float darkness;
// float blackAndWhite;
// float3 colourAMix;
// float3 colourBMix;
// float4x4 _CameraToWorld;
// float4x4 _CameraInverseProjection;
// float3 _LightDirection;
//
// };
//
// struct Ray {
// float3 origin;
// float3 direction;
// };
// Ray CreateRay(float3 origin, float3 direction) {
// Ray ray;
// ray.origin = origin;
// ray.direction = direction;
// return ray;
// }
// Ray CreateCameraRay(float2 uv) {
// float3 origin = mul(_CameraToWorld, float4(0, 0, 0, 1)).xyz;
// float3 direction = mul(_CameraInverseProjection, float4(uv, 0, 1)).xyz;
// direction = mul(_CameraToWorld, float4(direction, 0)).xyz;
// direction = normalize(direction);
// return CreateRay(origin, direction);
// }
//
// float2 SceneInfo(float3 position,Data data) {
// auto z = position;
// float dr = 1.0;
// float r = 0.0;
// auto iterations = 0;
//
// for (auto i = 0; i < 15; i++) {
// iterations = i;
// r = Float3::Length (z);
//
// if (r > 2) {
// break;
// }
//
// // convert to polar coordinates
// float theta = acos(z.z / r);
// float phi = atan2(z.y, z.x);
// dr = pow(r, data.power - 1.0) * data.power * dr + 1.0;
//
// // scale and rotate the point
// const float zr = pow(r, data.power);
// theta = theta * data.power;
// phi = phi * data.power;
//
// // convert back to cartesian coordinates
// z = zr * make_float3(sin(theta) * cos(phi), sin(phi) * sin(theta), cos(theta));
// z =z+ position;
// }
// float dst = 0.5 * log(r) * r / dr;
// return make_float2(iterations, dst * 1);
// }
//
// float3 EstimateNormal(float3 p,Data data) {
// float x = SceneInfo(make_float3(p.x + EPSILON, p.y, p.z), data).y - SceneInfo(make_float3(p.x - EPSILON, p.y, p.z), data).y;
// float y = SceneInfo(make_float3(p.x, p.y + EPSILON, p.z), data).y - SceneInfo(make_float3(p.x, p.y - EPSILON, p.z), data).y;
// float z = SceneInfo(make_float3(p.x, p.y, p.z + EPSILON), data).y - SceneInfo(make_float3(p.x, p.y, p.z - EPSILON), data).y;
// return Float3::UnitVector(make_float3(x, y, z));
// }
//
// __global__ void RayMatchSampler()
// {
// int width, height;
// float2 uv = id.xy / make_float2(width, height);
//
// auto data = Data();
//
// // Background gradient
// float4 result = lerp(make_float4(51, 3, 20, 1), make_float4(16, 6, 28, 1), uv.y) / 255.0;
//
// // Raymarching:
// Ray ray = CreateCameraRay(uv * 2 - 1);
// float rayDst = 0;
// int marchSteps = 0;
//
// while (rayDst < MAX_DST && marchSteps < MAX_STEP_COUNT) {
// marchSteps++;
// float2 sceneInfo = SceneInfo(ray.origin,data);
// float dst = sceneInfo.y;
//
// // Ray has hit a surface
// if (dst <= EPSILON) {
// float escapeIterations = sceneInfo.x;
// float3 normal = EstimateNormal(ray.origin - ray.direction * EPSILON * 2);
//
// float colourA = saturate(Float3::Dot(normal * .5 + .5, -data._LightDirection));
// float colourB = saturate(escapeIterations / 16.0);
// float3 colourMix = saturate(colourA * data.colourAMix + colourB * data.colourBMix);
//
// result = make_float4(colourMix.x,colourMix.y, colourMix.z, 1);
// break;
// }
// ray.origin = ray.origin+ ray.direction * dst;
// rayDst += dst;
// }
//
// float rim = marchSteps / data.darkness;
// //Destination[id.xy] = lerp(result, 1, blackAndWhite) * rim;
// }
//
// } |
18,836 | #define THREADS_PER_BLOCK 128
#include <cmath>
#include <chrono>
#include <cstring>
#include <fstream>
#include <iostream>
#include <stdexcept>
#include "tiffio.h"
// saves TIFF file from data in `raster`
void save_tiff(const char *fname, uint32 *raster, uint32 w, uint32 h) {
TIFF *tif = TIFFOpen(fname, "w");
if (! raster) {
throw std::runtime_error("Could not open output file");
}
TIFFSetField(tif, TIFFTAG_IMAGEWIDTH, w);
TIFFSetField(tif, TIFFTAG_IMAGELENGTH, h);
TIFFSetField(tif, TIFFTAG_SAMPLESPERPIXEL, 4);
TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 8);
TIFFSetField(tif, TIFFTAG_COMPRESSION, COMPRESSION_DEFLATE);
TIFFSetField(tif, TIFFTAG_ORIENTATION, ORIENTATION_TOPLEFT);
TIFFSetField(tif, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB);
TIFFSetField(tif, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG);
TIFFWriteEncodedStrip(tif, 0, raster, w*h*4);
TIFFClose(tif);
}
// loads image data from `fname` (allocating dynamic memory)
// *w and *h are updated with the image dimensions
// raster is a matrix flattened into an array using row-major order
// every uint32 in the array is 4 bytes, enconding 8-bit packed ABGR
// A: transparency attribute (can be ignored)
// B: blue pixel
// G: green pixel
// R: red pixel
uint32 *load_tiff(const char *fname, uint32 *w, uint32 *h) {
TIFF *tif = TIFFOpen(fname, "r");
if (! tif) {
throw std::runtime_error("Could not open input file");
}
TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, w);
TIFFGetField(tif, TIFFTAG_IMAGELENGTH, h);
uint32 *raster = (uint32 *) _TIFFmalloc(*w * *h * sizeof (uint32));
if (! raster) {
TIFFClose(tif);
throw std::runtime_error("Memory allocation error");
}
if (! TIFFReadRGBAImageOriented(tif, *w, *h, raster, ORIENTATION_TOPLEFT, 0)) {
TIFFClose(tif);
throw std::runtime_error("Could not read raster from TIFF image");
}
TIFFClose(tif);
return raster;
}
void clamp(float *val) {
if (*val < 0) *val = 0;
if (*val > 255) *val = 255;
}
__device__ void cuda_clamp(float *val) {
if (*val < 0) *val = 0;
if (*val > 255) *val = 255;
}
void filter_image_seq(uint32 *raster, uint32 w, uint32 h, const float *filter, int f_len) {
// to get RGB values from a pixel, you can either use bitwise masks
// or rely on the following macros:
// TIFFGetR(raster[i]) red
// TIFFGetG(raster[i]) green
// TIFFGetB(raster[i]) blue
// TIFFGetA(raster[i]) this value should be ignored
//
// to modify RGB values from a pixel, you can use bitwise shifts or masks
// each pixel stores values in the order ABGR
//
// TODO: here you will filter the image in raster
//
uint32 *copy = new uint32[w*h];
std::memcpy(copy, raster, sizeof(uint32)*w*h);
uint32 d = (uint32) std::sqrt(f_len);
uint32 idx, pixel;
uint32 st = d / 2;
uint32 end_w = w - d/2;
uint32 end_h = h - d/2;
float sumR, sumG, sumB;
// applies filter
for (uint32 i = st ; i < end_h ; i++) {
for (uint32 j = st ; j < end_w ; j++) {
sumR = sumG = sumB = 0;
for (uint32 k = 0 ; k < d ; k ++) {
idx = (i-st+k)*w + (j-st);
for (uint32 l = 0 ; l < d ; l++) {
pixel = copy[idx++];
sumR += (filter[k*d + l] * TIFFGetR(pixel));
sumG += (filter[k*d + l] * TIFFGetG(pixel));
sumB += (filter[k*d + l] * TIFFGetB(pixel));
}
}
clamp(&sumR);
clamp(&sumG);
clamp(&sumB);
raster[i*w + j] = TIFFGetA(raster[i*w + j]) << 24 | ((uint32) sumB << 16) | ((uint32) sumG << 8) | ((uint32) sumR);
}
}
delete [] copy;
}
__global__ void filter_image_cuda(uint32 *raster, uint32 *copy, uint32 w, uint32 h, const float *filter, int f_len, uint32 d, uint32 st, uint32 end_w, uint32 end_h) {
// applies filter
// Start Indices
uint32 start_i = (blockIdx.y * blockDim.y) + threadIdx.y + st;
uint32 start_j = (blockIdx.x * blockDim.x) + threadIdx.x + st;
uint32 idx, pixel;
float sumR, sumG, sumB;
// applies filter
for (uint32 i = start_i ; i < end_h ; i++) {
for (uint32 j = start_j ; j < end_w ; j++) {
sumR = sumG = sumB = 0;
for (uint32 k = 0 ; k < d ; k ++) {
idx = (i-st+k)*w + (j-st);
for (uint32 l = 0 ; l < d ; l++) {
pixel = copy[idx++];
sumR += (filter[k*d + l] * TIFFGetR(pixel));
sumG += (filter[k*d + l] * TIFFGetG(pixel));
sumB += (filter[k*d + l] * TIFFGetB(pixel));
}
}
cuda_clamp(&sumR);
cuda_clamp(&sumG);
cuda_clamp(&sumB);
raster[i*w + j] = TIFFGetA(raster[i*w + j]) << 24 | ((uint32) sumB << 16) | ((uint32) sumG << 8) | ((uint32) sumR);
}
}
}
void filter_image_par(uint32 *raster, uint32 w, uint32 h, const float *filter, int f_len, int n_threads, int n_blocks) {
//
// TODO: here you will filter the image in raster using GPU threads
//
// Consistent Computations
uint32 d = (uint32) std::sqrt(f_len);
uint32 st = d / 2;
uint32 end_w = w - d/2;
uint32 end_h = h - d/2;
uint32 n = w*h;
// Create Blocks and threads
dim3 threadsPerBlock(n_threads, n_threads, 1);
dim3 numBlocks(n_blocks,n_blocks,1);
// create pointers for the CUDA arrays
uint32 *copy_in;
uint32 *raster_out;
float *filter_in;
// variable to check for CUDA errors
cudaError_t status;
// choose GPU to run
status = cudaSetDevice(0);
if (status != cudaSuccess) std::cerr << "cudaSetDevice failed!" << std::endl;
// allocate space for the arrays in the GPU
status = cudaMalloc(©_in, sizeof(uint32) * n);
if (status != cudaSuccess) std::cerr << "cudaMalloc (copy_in) failed!" << std::endl;
status = cudaMalloc(&raster_out, sizeof(uint32) * n);
if (status != cudaSuccess) std::cerr << "cudaMalloc (raster_out) failed!" << std::endl;
status = cudaMalloc(&filter_in, sizeof(float) * f_len);
if (status != cudaSuccess) std::cerr << "cudaMalloc (filter) failed!" << std::endl;
// transfer data from CPU to GPU
status = cudaMemcpy(copy_in, raster, sizeof(uint32) * n, cudaMemcpyHostToDevice);
if (status != cudaSuccess) std::cerr << "cudaMemcpy H2D failed! - copy" << std::endl;
status = cudaMemcpy(raster_out, raster, sizeof(uint32) * n, cudaMemcpyHostToDevice);
if (status != cudaSuccess) std::cerr << "cudaMemcpy H2D failed! - raster" << std::endl;
status = cudaMemcpy(filter_in, filter, sizeof(float) * f_len, cudaMemcpyHostToDevice);
if (status != cudaSuccess) std::cerr << "cudaMemcpy H2D failed! - filter" << std::endl;
// Do the work in the GPU
//std::cout << "Blocks: " << std::ceil((float)n/THREADS_PER_BLOCK) << std::endl;
filter_image_cuda<<<numBlocks,threadsPerBlock>>>(raster_out, copy_in, w, h, filter_in, f_len, d, st, end_w, end_h);
// wait for the kernel to finish, and check for errors
status = cudaThreadSynchronize();
if (status != cudaSuccess) std::cerr << "error code " << status << " returned after kernel!" << std::endl;
// transfer results from GPU to CPU
status = cudaMemcpy(raster, raster_out, sizeof(uint32) * n, cudaMemcpyDeviceToHost);
if (status != cudaSuccess) std::cerr << "cudaMemcpy D2H failed! - final" << std::endl;
// Free memory
cudaFree(copy_in);
cudaFree(raster_out);
cudaFree(filter_in);
}
float *load_filter(const char *fname, int *n) {
std::ifstream myfile(fname);
if (! myfile) {
throw std::runtime_error("Could not open filter file");
}
myfile >> *n;
float *filter = new float[*n];
for (int i = 0 ; i < *n ; i++) myfile >> filter[i];
myfile.close();
return filter;
}
int main(int argc, char* argv[]) {
if (argc != 7) {
std::cout << "Usage:\t./filter <in_fname> <out_fname> <filter_fname> <algo>" << std::endl;
std::cout << "<in_fname> path to the input image" << std::endl;
std::cout << "<out_fname> path to the output image" << std::endl;
std::cout << "<filter_fname> path to the filter file" << std::endl;
std::cout << "<algo> whether to use the sequential (seq) or parallel algorithm (par)" << std::endl;
std::cout << "<n_threads> number of threads to use (Ex: enter 5 for 25 threads/block)" << std::endl;
std::cout << "<n_blocks> number of blocks to use [Ex: enter 2 for 4 blocks]" << std::endl;
return 0;
}
uint32 width, height;
int n_threads = std::stoi(argv[5]);
int n_blocks = std::stoi(argv[6]);
// loads the filter
int f_len;
float *filter = load_filter(argv[3], &f_len);
// loads image bytes from file name supplied as a command line argument
// this function allocates memory dynamically
uint32 *image = load_tiff(argv[1], &width, &height);
// measure time of the algorithm
auto start = std::chrono::high_resolution_clock::now();
if (! std::strcmp(argv[4], "seq")) {
// call the sequential implementation
filter_image_seq(image, width, height, filter, f_len);
} else if (! std::strcmp(argv[4], "par")) {
// TODO: call the parallel implementation
filter_image_par(image, width, height, filter, f_len, n_threads, n_blocks);
}
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff = end - start;
std::cout << diff.count() << std::endl;
// save new file with filtered image
save_tiff(argv[2], image, width, height);
// frees memory allocated by load_filter and load_tiff
delete [] filter;
_TIFFfree(image);
return 0;
} |
18,837 | #include "includes.h"
__global__ void matrixMultiply(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) {
//@@ Insert code to implement matrix multiplication here
} |
18,838 | /*
Copyright (c) 2013-2015, Gregory P. Meyer
University of Illinois Board of Trustees
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder(s) nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
namespace dip {
void CUDAError(cudaError_t result, char *file, int line) {
if (result != cudaSuccess) {
fprintf(stderr,"CUDA Error: %s %s %d\n", cudaGetErrorString(result),
file, line);
}
}
} // namespace dip
|
18,839 | __global__ void SoftMaxLossForward(const float* bottom_data, const int* bs, const float* label, const float* label_weight,
const int* ls, const float threshold, const int label_start, bool hasLabel_weight, float* loss) {
// bs = bottomSize
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= bs[0]*bs[1]*1*bs[3]) return;
// get current index, [h,w,'i',n]
int h = index % bs[0];
int w = (index / bs[0]) % bs[1];
int n = index / bs[0] / bs[1];
int ind_in_btm = 0;
int label_ind = 0;
if (ls[0]*ls[1]*ls[2]*ls[3] == bs[4]) {
if (label[n] < label_start) return;
label_ind = n;
}else{ //btm size 0,2,3 == label size 0,2,3, and ls[2] = 1
label_ind = n*bs[1]*bs[0] + w*bs[0] + h;
}
ind_in_btm = n*bs[2]*bs[1]*bs[0] + ((int)label[label_ind]-label_start)*bs[1]*bs[0] + w*bs[0] + h;
float maxValueInThirdDim = bottom_data[n*bs[2]*bs[1]*bs[0] + w*bs[0] + h];
for(int i=0; i<bs[2]; i++){
maxValueInThirdDim = max(maxValueInThirdDim, bottom_data[n*bs[2]*bs[1]*bs[0] + i*bs[1]*bs[0] + w*bs[0] + h]);
}
//do the work
float y = 0;
for(int i=0; i<bs[2]; i++){
y += exp(bottom_data[n*bs[2]*bs[1]*bs[0] + i*bs[1]*bs[0] + w*bs[0] + h] - maxValueInThirdDim); // sum third dim
}
//get softmax
y = exp(bottom_data[ind_in_btm]-maxValueInThirdDim)/y;
if (hasLabel_weight){
atomicAdd(loss, -label_weight[label_ind]*log(max(y, threshold)));
}else{
atomicAdd(loss, -log(max(y, threshold)));
}
//the output must divide bs[0]*bs[1]*bs[3]
}
__global__ void SoftMaxLossBackward(const float* bottom_data, const int* bs, const float* label, const float* label_weight,
const int* ls, const float top_diff, const float threshold, const int label_start, bool hasLabel_weight, float* bottom_diff) {
// bs = bottomSize
int index = blockIdx.x * blockDim.x + threadIdx.x;
int bs_len = bs[0]*bs[1]*bs[2]*bs[3];
if (index >= bs_len) return;
// get current index, [h,w,c,n]
int h = index % bs[0];
int w = (index / bs[0]) % bs[1];
int c = (index / bs[0] / bs[1]) % bs[2];
int n = index / bs[0] / bs[1] / bs[2];
int ind_in_btm = 0;
int label_ind = 0;
if (ls[0]*ls[1]*ls[2]*ls[3] == bs[4]) {
if (label[n] < label_start) {
label_ind = -1;
}else{
label_ind = n;
};
}else{ //btm size 0,2,3 == label size 0,2,3, and ls[2] = 1
label_ind = n*bs[1]*bs[0] + w*bs[0] + h;
}
if (label_ind == -1)
ind_in_btm = -1;
else
ind_in_btm = n*bs[2]*bs[1]*bs[0] + ((int)label[label_ind]-label_start)*bs[1]*bs[0] + w*bs[0] + h;
float maxValueInThirdDim = bottom_data[index]; // get current value first
for(int i=0; i<bs[2]; i++){
maxValueInThirdDim = max(maxValueInThirdDim, bottom_data[n*bs[2]*bs[1]*bs[0] + i*bs[1]*bs[0] + w*bs[0] + h]);
}
//do the work
float y = 0;
for(int i=0; i<bs[2]; i++){
y += exp(bottom_data[n*bs[2]*bs[1]*bs[0] + i*bs[1]*bs[0] + w*bs[0] + h] - maxValueInThirdDim); // sum third dim
}
//get derivative
y = exp(bottom_data[index]-maxValueInThirdDim)/max(y, threshold);
if (ind_in_btm == index) y = y-1.0;
if (hasLabel_weight){
if (label_ind == -1)
bottom_diff[index] = y*top_diff/bs[0]/bs[1]/bs[3];
else
bottom_diff[index] = label_weight[label_ind]*y*top_diff/bs[0]/bs[1]/bs[3];
}else{
bottom_diff[index] = y*top_diff/bs[0]/bs[1]/bs[3];
}
} |
18,840 | //pass
//--gridDim=[4,1,1] --blockDim=[512,1,1]
__global__ void
initValue(float *od, float value)
{
// position of write into global memory
unsigned int index = (blockIdx.x * blockDim.x) + threadIdx.x;
od[index] = value;
// sync after each decomposition step
__syncthreads();
}
|
18,841 | #include <stdio.h>
#define SIZE 1024
__global__ void VectorAdd( int * a, int *b, int* c, int n)
{
int i = threadIdx.x;
if(i < n)
c[i] = a[i] + b[i];
}
int main()
{
int *a, *b , * c;
int *d_a, *d_b, *d_c;
//allocate space for all gpu and cpu data
a = (int *)malloc(SIZE * sizeof(int));
b = (int *)malloc(SIZE * sizeof(int));
c = (int *)malloc(SIZE * sizeof(int));
cudaMalloc(&d_a, SIZE * sizeof(int));
cudaMalloc(&d_b, SIZE * sizeof(int));
cudaMalloc(&d_c, SIZE * sizeof(int));
//init all gpu and cpu data
for(int i =0; i < SIZE;++i){
a[i] = i;
b[i] = i;
c[i] = 0;
}
cudaMemcpy(d_a, a, SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, b, SIZE*sizeof(int), cudaMemcpyHostToDevice);
VectorAdd<<< 1, SIZE>>>(d_a,d_b,d_c,SIZE);
//get the gpu data out to cpu
cudaMemcpy(c, d_c, SIZE*sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < 10;++i){
printf("c[%d] = %d \n", i, c[i]);
}
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
} |
18,842 | #include "includes.h"
__global__ void run_reduction(bool *con, bool *blockCon,int* ActiveList, int nActiveBlock, int* blockSizes)
{
int list_idx = blockIdx.y*gridDim.x + blockIdx.x;
int maxblocksize = blockDim.x;
int tx = threadIdx.x;
int block_idx = ActiveList[list_idx];
int blocksize = blockSizes[block_idx];
__shared__ bool s_block_conv;
s_block_conv = true;
__syncthreads();
if(tx < blocksize)
{
if(!con[maxblocksize*block_idx+tx])
s_block_conv= false;
}
__syncthreads();
if(tx == 0)
{
blockCon[block_idx] = s_block_conv; // active list is negation of tile convergence (active = not converged)
}
} |
18,843 | /*
* CUDA Peer to Peer Example
*/
#include<cuda.h>
#include<stdio.h>
#include<sys/time.h>
#define SIZE 1048576
#define THREADS_PER_BLOCK 256
#define FLOAT(t) ((float)(t).tv_sec+((float)(t).tv_usec)/1000000)
#define CHECK_RUN( errorDescription ) { cudaError_t cerror; \
if( (cerror = cudaGetLastError()) != cudaSuccess ){ \
printf("execution aborted (function : %s, file : %s, line : %d) :\n %s -> %s\n", \
__func__, __FILE__, __LINE__, \
errorDescription, \
cudaGetErrorString(cerror) ); \
return 0; } }
int run_copy_without_gpudirect(float *ha, float *tmp, float *hb, float *g0, float *g1, int size);
int run_copy_with_gpudirect(float *ha, float *tmp, float *hb, float *g0, float *g1, int size);
void check_results(float *a);
__global__ void kernel_gpu0 (float *g0);
__global__ void kernel_gpu1 (float *g1);
int threadsPerBlock, blocksPerGrid;
int main()
{
struct timeval before, after, t1, t2;
int size = SIZE*sizeof(float);
threadsPerBlock = THREADS_PER_BLOCK;
blocksPerGrid = SIZE / threadsPerBlock;
// Allocate memory on the host
float *ha, *hb, *tmp;
ha = (float*)malloc(size);
hb = (float*)malloc(size);
tmp = (float*)malloc(size);
// Initialize the data
int i;
for(i=0; i<SIZE; i++)
{
ha[i] = 0;
hb[i] = 0;
tmp[i] = 0;
}
// Allocate memory on both the devices and enable peer access
float *g0, *g1;
cudaSetDevice(0);
CHECK_RUN("Set Device");
cudaSetDevice(0);
CHECK_RUN("Set Device");
cudaDeviceEnablePeerAccess(1, 0); // PeerGPU, flags
CHECK_RUN("Enable Peer Access");
cudaMalloc(&g0, size);
CHECK_RUN("Alloc g0");
cudaSetDevice(1);
CHECK_RUN("Set Device");
cudaDeviceEnablePeerAccess(0, 0); // PeerGPU, flags
CHECK_RUN("Enable Peer Access");
cudaMalloc(&g1, size);
CHECK_RUN("Alloc g1");
// Copy Data Without GPUdirect
cudaSetDevice(0);
CHECK_RUN("Set Device");
gettimeofday(&before, NULL);
run_copy_without_gpudirect(ha, tmp, hb, g0, g1, size);
gettimeofday(&after, NULL);
timersub(&after, &before, &t1);
printf("Time without GPUdirect: %0.6f ms\n", FLOAT(t1)*1000);
// Check results
check_results(hb);
// Copy Data With GPUdirect
cudaSetDevice(0);
CHECK_RUN("Set Device");
gettimeofday(&before, NULL);
run_copy_with_gpudirect(ha, tmp, hb, g0, g1, size);
gettimeofday(&after, NULL);
timersub(&after, &before, &t2);
printf("Time with GPUdirect: %0.6f ms\n", FLOAT(t2)*1000);
// Check results
check_results(hb);
// Free host memory
free(ha);
free(hb);
free(tmp);
// Free memory and disable peer access
cudaSetDevice(0);
CHECK_RUN("Set Device");
cudaDeviceDisablePeerAccess(1);
CHECK_RUN("Disable Peer Access");
cudaFree(g0);
CHECK_RUN("Free g0");
cudaSetDevice(1);
CHECK_RUN("Set Device");
cudaDeviceDisablePeerAccess(0);
CHECK_RUN("Disable Peer Access");
cudaFree(g1);
CHECK_RUN("Free g1");
return 0;
}
int run_copy_without_gpudirect(float *ha, float *tmp, float *hb, float *g0, float *g1, int size)
{
/* TODO: Do the following here
* 1. Copy ha to g0 and run kernel_gpu0 with g0
* 2. Modified g0 must be the input to kernel_gpu1. What do you do here?
* 2. Copy final result to hb
* Tip: Don't forget to insert cudaSetDevice(0|1) at the right places
*/
return 0;
}
int run_copy_with_gpudirect(float *ha, float *tmp, float *hb, float *g0, float *g1, int size)
{
/* TODO: Do the following here
* 1. Copy ha to g0 and run kernel_gpu0 with g0
* 2. Modified g0 must be the input to kernel_gpu1. What do you do here?
* 2. Copy final result to hb
* Tip: Don't forget to insert cudaSetDevice(0|1) at the right places
*/
return 0;
}
__global__
void kernel_gpu0 (float *g0)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if( i < SIZE)
if(g0[i] == 0)
g0[i] += 1;
return;
}
__global__
void kernel_gpu1 (float *g1)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < SIZE)
if(g1[i] == 1)
g1[i] += 1;
return;
}
void check_results(float *hb)
{
int i;
for(i=0; i<SIZE; i++)
{
if(hb[i] != 2)
{
printf("Test Result Failed\n");
return;
}
hb[i] = 0;
}
printf("Test Result Successful\n");
}
|
18,844 | /*
This version is "NO Streaming" version.
12/16 Try streaming!
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_profiler_api.h>
#include <time.h>
// #define TIME
// #define CUDA_NVPROF
const int BLOCKING_FACTOR = 32; // 32, 16, 8, 4, 2
const int INF = ((1 << 30) - 1);
// Global var stored in Data Section.
// const int V = 40010;
void input(char* inFileName);
void output(char* outFileName);
void print_ans(int num_V, char* ans_file);
void block_FW(int B);
void block_FW_Large_N(int B);
int ceil(int a, int b);
// void cal(int n, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height);
// Shared memory: For each block, each thread brings d[i][j] to s[i][j] !
//
// extern __shared__ int S[];
__device__ inline int Addr(int matrixIdx, int i, int j, int N){
return( N*N*matrixIdx + i*N + j);
}
// W: width, H: height
// __device__ inline int Addr2(int matrixIdx, int i, int j, int W, int H){
// return( W*H*matrixIdx + i*W + j);
// }
// TODO: Bank Conflict!
// TRY pahse1: Let thread(Idx.x, Idx.y) access in diagonally! Same WARP NO bank conflict.
// PHASE 1 : ONE Block do k iterations with B*B threads.
// __global__ void cal(int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height){
__global__ void cal(int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y){
__shared__ int S[32*32];
int i = block_start_y*B + threadIdx.y;
int j = block_start_x*B + threadIdx.x;
if(i<n && j<n){
// S[ (i%B)*B + (j%B) ] = device_Dist[i*n + j];
S[ Addr(0,threadIdx.y, threadIdx.x, B) ] = device_Dist[Addr(0,i,j,n)];
// S[Addr(0, (i%B), (j%B), B)] = device_Dist[Addr(0,i,j,n)];
// S[ (i%B)*(B+1) + (j%(B+1)) ] = device_Dist[i*n + j];
// __syncthreads();
// This for-loop CANNOT be serialize!
// for (int k = Round * B; k < (Round + 1) * B && k < n; ++k) {
for (int iter = 0; iter<B && Round*B+iter <n; iter++){
__syncthreads();
if (S[Addr(0, threadIdx.y, iter, B)]+ S[Addr(0, iter, threadIdx.x, B)] < S[Addr(0,threadIdx.y, threadIdx.x, B)] ) {
S[Addr(0,threadIdx.y, threadIdx.x, B)] = S[Addr(0, threadIdx.y, iter, B)]+ S[Addr(0, iter, threadIdx.x, B)];
}
}
device_Dist[Addr(0,i,j,n)] = S[Addr(0,threadIdx.y, threadIdx.x, B)];
}// end if(i<n && j<n )
}
// Why cal3 don't need sync_threads() and can perform all correct?
// Each thread do k calculation (O(k))
// __global__ void cal3(int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height){
__global__ void cal3(int* device_Dist, int n, int B, int Round, int block_start_x, int block_start_y){
__shared__ int S[32*32*3];
int i = block_start_y* B + blockIdx.y * B + threadIdx.y;
int j = block_start_x* B + blockIdx.x * B + threadIdx.x;
// S[Addr(1, threadIdx.y, ((Round*B + threadIdx.x)%B), B)] = device_Dist[Addr(0,i,(Round*B + threadIdx.x),n)];
// S[Addr(2, ((Round*B + threadIdx.y)%B), threadIdx.x, B)] = device_Dist[Addr(0,(Round*B + threadIdx.y),j,n)];
if(i<n && (Round*B + threadIdx.x) <n) S[Addr(1, threadIdx.y, ((Round*B + threadIdx.x)%B), B)] = device_Dist[Addr(0,i,(Round*B + threadIdx.x),n)];
if(j<n && (Round*B + threadIdx.y)<n) S[Addr(2, ((Round*B + threadIdx.y)%B), threadIdx.x, B)] = device_Dist[Addr(0,(Round*B + threadIdx.y),j,n)];
if(i<n && j<n){
// For each thread, calculate one edge.
S[ Addr(0,threadIdx.y, threadIdx.x, B) ] = device_Dist[Addr(0,i,j,n)];
__syncthreads();
// This for-loop CANNOT be parallelize!
// for (int k = Round * B; k < (Round + 1) * B && k < n; ++k) {
/// KEY!! Don't USE % on K.
for (int iter = 0; iter<B && Round*B+iter <n; iter++){ //k = Round * B; k < (Round + 1) * B && k < n; ++k) {
// __syncthreads();
// if (S[Addr(1, (i%B), (k%B), B)]+ S[Addr(2, (k%B), (j%B), B)] < S[Addr(0, (i%B), (j%B), B)] ) {
// S[Addr(0, (i%B), (j%B), B)] = S[Addr(1, (i%B), (k%B), B)]+ S[Addr(2, (k%B), (j%B), B)];
// }
// i , k // k , j // i , j
if (S[Addr(1, threadIdx.y, iter, B)]+ S[Addr(2, iter, threadIdx.x, B)] < S[Addr(0,threadIdx.y, threadIdx.x, B)] ) {
S[Addr(0,threadIdx.y, threadIdx.x, B)] = S[Addr(1, threadIdx.y, iter, B)]+ S[Addr(2, iter, threadIdx.x, B)];
}
// if (S[Addr(1, threadIdx.y, (k%B), B)]+ S[Addr(2, (k%B), threadIdx.x, B)] < S[Addr(0,threadIdx.y, threadIdx.x, B)] ) {
// S[Addr(0,threadIdx.y, threadIdx.x, B)] = S[Addr(1, threadIdx.y, (k%B), B)]+ S[Addr(2, (k%B), threadIdx.x, B)];
// }
}
device_Dist[Addr(0,i,j,n)] = S[Addr(0,threadIdx.y, threadIdx.x, B)];
}
}
int n, m;
// static int Dist[V][V];
int* Dist;
int main(int argc, char* argv[]) {
#ifdef TIME
// struct timespec start, end, temp;
struct timespec total_starttime;
struct timespec total_temp;
struct timespec start;
struct timespec end;
struct timespec temp;
double IO_time=0.0;
double Total_time = 0.0;
clock_gettime(CLOCK_MONOTONIC, &total_starttime);
clock_gettime(CLOCK_MONOTONIC, &start);
#endif
input(argv[1]);
#ifdef TIME
clock_gettime(CLOCK_MONOTONIC, &end);
if ((end.tv_nsec - start.tv_nsec) < 0) {
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec - start.tv_sec;
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
}
IO_time += temp.tv_sec + (double) temp.tv_nsec / 1000000000.0;
#endif
// printf("%f second on input\n", time_used);
// we have num_v, num_e, adj_matrix (Dist[V][V]) now
// int B = 512;
// Note: Since B*B threads, maximum B : 32 (MAX 1024 threads per block)
int B;
// B = 32; // 16: faster .(WHY?) communication. MAX: 32
B = BLOCKING_FACTOR;
// B = 7;
// int B = 4; // blocking factor.
// if(n>=5000) block_FW_Large_N(B);
// else block_FW(B);
block_FW(B);
// if(n>=5000) block_FW_Large_N(16);
// else block_FW(32);
#ifdef TIME
clock_gettime(CLOCK_MONOTONIC, &start);
#endif
output(argv[2]);
#ifdef TIME
clock_gettime(CLOCK_MONOTONIC, &end);
// IO Time
if ((end.tv_nsec - start.tv_nsec) < 0) {
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec - start.tv_sec;
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
}
// Total Time
if ((end.tv_nsec - total_starttime.tv_nsec) < 0) {
total_temp.tv_sec = end.tv_sec-total_starttime.tv_sec-1;
total_temp.tv_nsec = 1000000000 + end.tv_nsec - total_starttime.tv_nsec;
} else {
total_temp.tv_sec = end.tv_sec - total_starttime.tv_sec;
total_temp.tv_nsec = end.tv_nsec - total_starttime.tv_nsec;
}
IO_time += temp.tv_sec + (double) temp.tv_nsec / 1000000000.0;
Total_time = total_temp.tv_sec + (double) total_temp.tv_nsec / 1000000000.0;
#endif
#ifdef TIME
printf("IO Time: %.8f seconds\n", IO_time);
printf("Total Time: %.8f seconds\n",Total_time);
#endif
// Communicatoin time: (Memcpy H2D, D2H).
// printf("Computation Time: %.8f\n",); //GPU Kernel
// print_ans(n);
// print_ans(n, argv[3]);
// output(argv[2]);
return 0;
}
void input(char* infile) {
FILE* file = fopen(infile, "rb");
fread(&n, sizeof(int), 1, file); // n = num_vertices
fread(&m, sizeof(int), 1, file); // m = num_edges
printf("V: %d, E: %d\n",n,m);
Dist = (int*) malloc(sizeof(int)*n*n);
// Initialize adjacency matrix
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (i == j) {
Dist[i*n+j] = 0;
// Dist[i][j] = 0;
} else {
Dist[i*n+j] = INF;
// Dist[i][j] = INF;
}
}
}
// Sequentially read input edges and fill them into adj matrix.
int pair[3];
for (int i = 0; i < m; ++i) {
fread(pair, sizeof(int), 3, file);
// Dist[pair[0]][pair[1]] = pair[2];
Dist[ pair[0]*n+ pair[1]] = pair[2];
}
fclose(file);
}
void print_ans(int num_V, char* ans_file){
FILE* file = fopen(ans_file, "rb");
int* Ans = (int*)malloc(sizeof(int)*n*n);
fread(Ans, sizeof(int), n*n, file);
for(int i=0; i<num_V*num_V; i++){
if(Dist[i] != Ans[i]){
printf("Wrong at offset %d, expected %d but get %d\n", i*4, Ans[i], Dist[i]);
printf("Fron %d to %d , cost: %d\n", (i/n), (i%n), Ans[i] );
}
// printf("offset %d val %d, ans: %d\n", i*4, Dist[i], Ans[i]);
}
}
void output(char* outFileName) {
FILE* outfile = fopen(outFileName, "w");
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (Dist[i*n+j] >= INF) Dist[i*n+j] = INF;
}
fwrite(Dist+i*n, sizeof(int), n, outfile);
}
fclose(outfile);
}
int ceil(int a, int b) { return (a + b - 1) / b; }
// 1204: Idea1 : one stream with 9 serialize kernel launch?
// memory to pass to GPU: B, r, r, r, 1, 1. ALL constant! No memory copy.
void block_FW(int B) {
// printf("Blocking factor: %d (num of pixel(adj entries) in a Block)\n",B);
// printf(" %d * %d block\n",B,B);
int round = ceil(n, B);
// cudaMemcpy();
int *device_Dist;
// cudaMalloc(&device_Dist, V * V* sizeof(unsigned int));
cudaMalloc(&device_Dist, n * n* sizeof(unsigned int));
#ifdef TIME
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
#endif
// cudaMemcpy(...) copy source image to device (mask matrix if necessary)
cudaMemcpy(device_Dist, Dist, n* n*sizeof(unsigned int), cudaMemcpyHostToDevice);
#ifdef TIME
cudaEventRecord(stop);
cudaEventSynchronize(stop); // WAIT until 'stop' complete.
float Comm_time; // H2D
cudaEventElapsedTime(&Comm_time, start, stop);
// printf("Took %.8f milliseconds on computation.",time);
#endif
// printf("Initial matrix: \n");
// print_ans(n);
// 2*2 threadIdx.x from 0 to 1, Idx.y from 0 to 1
dim3 num_threads(B,B);
#ifdef TIME
cudaEvent_t compt_start, compt_stop;
cudaEventCreate(&compt_start);
cudaEventCreate(&compt_stop);
cudaEventRecord(compt_start);
#endif
#ifdef CUDA_NVPROF
cudaProfilerStart();
#endif
for (int r = 0; r < round; ++r) {
// printf("%d %d\n", r, round);
fflush(stdout);
/* Phase 1*/
// EX: 3*3 Blocks. At iteration k (round r), send D(r,r)
// cal<<< 1, num_threads , sizeof(int)*B*(B+1)>>> (device_Dist, n, B, r, r, r, 1, 1);
// cal<<< 1, num_threads , sizeof(int)*B*B*3>>> (device_Dist, n, B, r, r, r, 1, 1);
cal<<< 1, num_threads , sizeof(int)*B*B*3>>> (device_Dist, n, B, r, r, r);
/* Phase 2*/
// cudaProfilerStart();
if(r !=0){
dim3 nB(1,r);
// cal3<<< nB, num_threads , sizeof(int)*B*B*3>>>(device_Dist, n, B, r, r, 0, r, 1);
cal3<<< nB, num_threads , sizeof(int)*B*B*3>>>(device_Dist, n, B, r, r, 0);
}
if(round -r-1 !=0){
dim3 nB(1,round - r - 1);
// cal3<<< nB, num_threads , sizeof(int)*B*B*3>>>(device_Dist, n, B, r, r, r + 1, round - r - 1, 1);
cal3<<< nB, num_threads , sizeof(int)*B*B*3>>>(device_Dist, n, B, r, r, r + 1);
}
//////////// HEIGHT blocks (width == 1) /////////////
if(r!=0){
dim3 nB(r,1);
// cal3<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, 0, r, 1, r);
cal3<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, 0, r);
}
if(round-r-1 !=0) {
dim3 nB(round - r - 1,1);
// cal3<<< nB , num_threads, sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, r + 1, r, 1, round - r - 1);
cal3<<< nB , num_threads, sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, r + 1, r);
}
// cudaProfilerStop();
/* Phase 3*/ // => USE 2D block!
// 計算其他的 block
// 和pivot block 在 x 軸和 y 軸都沒有交集的 blocks!
// cudaProfilerStart();
if(r != 0){
dim3 nB(r,r);
// cal3<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, 0, 0, r, r);
cal3<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, 0, 0);
}
if(r !=0 && (round-r-1) !=0){
dim3 nB(r,(round-r-1));
// cal3<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, 0, r + 1, round - r - 1, r);
cal3<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, 0, r + 1);
}
if(r !=0 && round-r-1 !=0){
dim3 nB((round-r-1),r);
// cal3<<< nB ,num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, r + 1, 0, r, round - r - 1);
cal3<<< nB ,num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, r + 1, 0);
}
if(round-r-1 !=0){
dim3 nB_p3(round - r - 1, round - r - 1);
// cal3<<< nB_p3, num_threads, sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, r + 1, r + 1, round - r - 1, round - r - 1);
cal3<<< nB_p3, num_threads, sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, r + 1, r + 1);
}
// cudaProfilerStop();
}
#ifdef CUDA_NVPROF
cudaProfilerStop();
#endif
#ifdef TIME
cudaEventRecord(compt_stop);
cudaEventSynchronize(compt_stop); // WAIT until 'stop' complete.
float compt_time;
cudaEventElapsedTime(&compt_time, compt_start, compt_stop);
printf("Computation Time: %.8f seconds\n",compt_time/1000);
#endif
#ifdef TIME
cudaEventRecord(start);
#endif
cudaMemcpy(Dist, device_Dist, n * n *sizeof(unsigned int), cudaMemcpyDeviceToHost);
#ifdef TIME
cudaEventRecord(stop);
cudaEventSynchronize(stop); // WAIT until 'stop' complete.
float D2H_Comm_time;
cudaEventElapsedTime(&D2H_Comm_time, start, stop);
printf("Memory Copy Time: %.8f seconds\n", (D2H_Comm_time + Comm_time ) /1000);
#endif
}
// // For Large n.: Don't use Synchronize.
// // n > 5000
// void block_FW_Large_N(int B) {
// printf("Blocking factor: %d (num of pixel(adj entries) in a Block)\n",B);
// printf(" %d * %d block\n",B,B);
// int round = ceil(n, B);
// // #ifdef TIME
// // cudaEvent_t start, stop;
// // cudaEventCreate(&start);
// // cudaEventCreate(&stop);
// // cudaEventRecord(start);
// // #endif
// // cudaMemcpy();
// int *device_Dist;
// // cudaMalloc(&device_Dist, V * V* sizeof(unsigned int));
// cudaMalloc(&device_Dist, n * n* sizeof(unsigned int));
// // cudaMemcpy(...) copy source image to device (mask matrix if necessary)
// cudaMemcpy(device_Dist, Dist, n* n*sizeof(unsigned int), cudaMemcpyHostToDevice);
// #ifdef TIME
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
// cudaEventRecord(start);
// #endif
// // printf("Initial matrix: \n");
// // print_ans(n);
// // 2*2 threadIdx.x from 0 to 1, Idx.y from 0 to 1
// dim3 num_threads(B,B);
// /////// CREATE 4 STREAMS ///////////
// const int num_streams = 4;
// cudaStream_t streams[num_streams];
// float *data[num_streams];
// for (int i = 0; i < num_streams; i++) {
// cudaStreamCreate(&streams[i]);
// }
// // cudaDeviceReset();
// for (int r = 0; r < round; ++r) {
// // printf("%d %d\n", r, round);
// fflush(stdout);
// /* Phase 1*/
// // EX: 3*3 Blocks. At iteration k (round r), send D(r,r)
// // cal<<< 1, num_threads , sizeof(int)*B*(B+1)>>> (device_Dist, n, B, r, r, r, 1, 1);
// cal<<< 1, num_threads , sizeof(int)*B*B>>> (device_Dist, n, B, r, r, r, 1, 1);
// // cudaDeviceSynchronize();
// /* Phase 2*/
// ////////////// WIDTH blocks (height == 1) /////////////////
// // if(r !=0){
// // dim3 nB(1,r);
// // cal2<<< nB, num_threads , sizeof(int)*B*B*3>>>(device_Dist, n, B, r, r, 0, r, 1);
// // }
// // if(round -r-1 !=0){
// // dim3 nB(1,round - r - 1);
// // cal2<<< nB, num_threads , sizeof(int)*B*B*3>>>(device_Dist, n, B, r, r, r + 1, round - r - 1, 1);
// // }
// // //////////// HEIGHT blocks (width == 1) /////////////
// // if(r!=0){
// // dim3 nB(r,1);
// // cal2<<< nB, num_threads , sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, 0, r, 1, r);
// // }
// // if(round-r-1 !=0) {
// // dim3 nB(round - r - 1,1);
// // cal2<<< nB , num_threads, sizeof(int)*B*B*3 >>>(device_Dist, n, B, r, r + 1, r, 1, round - r - 1);
// // }
// if(r !=0){
// dim3 nB(1,r);
// cal3<<< nB, num_threads , sizeof(int)*B*B*3,streams[0]>>>(device_Dist, n, B, r, r, 0, r, 1);
// }
// if(round -r-1 !=0){
// dim3 nB(1,round - r - 1);
// cal3<<< nB, num_threads , sizeof(int)*B*B*3,streams[1]>>>(device_Dist, n, B, r, r, r + 1, round - r - 1, 1);
// }
// //////////// HEIGHT blocks (width == 1) /////////////
// if(r!=0){
// dim3 nB(r,1);
// cal3<<< nB, num_threads , sizeof(int)*B*B*3 ,streams[2]>>>(device_Dist, n, B, r, 0, r, 1, r);
// }
// if(round-r-1 !=0) {
// dim3 nB(round - r - 1,1);
// cal3<<< nB , num_threads, sizeof(int)*B*B*3,streams[3] >>>(device_Dist, n, B, r, r + 1, r, 1, round - r - 1);
// }
// // cudaDeviceSynchronize();
// /* Phase 3*/ // => USE 2D block!
// // 計算其他的 block
// // 和pivot block 在 x 軸和 y 軸都沒有交集的 blocks!
// if(r != 0){
// dim3 nB(r,r);
// cal3<<< nB, num_threads , sizeof(int)*B*B*3 ,streams[0] >>>(device_Dist, n, B, r, 0, 0, r, r);
// }
// if(r !=0 && (round-r-1) !=0){
// dim3 nB(r,(round-r-1));
// cal3<<< nB, num_threads , sizeof(int)*B*B*3 ,streams[1] >>>(device_Dist, n, B, r, 0, r + 1, round - r - 1, r);
// }
// if(r !=0 && round-r-1 !=0){
// dim3 nB((round-r-1),r);
// cal3<<< nB ,num_threads , sizeof(int)*B*B*3 ,streams[2] >>>(device_Dist, n, B, r, r + 1, 0, r, round - r - 1);
// }
// if(round-r-1 !=0){
// dim3 nB_p3(round - r - 1, round - r - 1);
// cal3<<< nB_p3, num_threads, sizeof(int)*B*B*3 ,streams[3] >>>(device_Dist, n, B, r, r + 1, r + 1, round - r - 1, round - r - 1);
// }
// // cudaDeviceSynchronize();
// }
// // cudaMemcpy(Dist, device_Dist, n * n *sizeof(unsigned int), cudaMemcpyDeviceToHost);
// #ifdef TIME
// cudaEventRecord(stop);
// cudaEventSynchronize(stop); // WAIT until 'stop' complete.
// float time;
// cudaEventElapsedTime(&time, start, stop);
// // printf("Took %.8f milliseconds",time);
// printf("Took %.8f seconds",time/1000);
// #endif
// cudaMemcpy(Dist, device_Dist, n * n *sizeof(unsigned int), cudaMemcpyDeviceToHost);
// }
|
18,845 | #include "assignmentHPC1.cuh"
#include <iostream>
#include <cstdlib>
using namespace std;
int main() {
unsigned int N = 1024*1024*512;
double *arr_host = (double *)malloc(N * sizeof(double));
for(unsigned int i = 0; i < N; i++) {
arr_host[i] = 1 ;//rand()%(1024*1024) + 10;
}
cout<<"\n\n---------------------------------- RESULTS --------------------------------------\n"<<endl;
cout<<"INPUT SIZE "<<endl;
cout<<"Vector Size : "<<N<<" * "<<1<<endl;
// Vector Maximun on CPU & GPU
cout<<"\n\n---------------------------------- MAX\n\n"<<endl;
find_max(arr_host, N);
// Vector Minimum on CPU & GPU
cout<<"\n\n---------------------------------- MIN\n\n"<<endl;
find_min(arr_host, N);
// Vector Sum on CPU & GPU
cout<<"\n\n---------------------------------- SUM\n\n"<<endl;
find_sum(arr_host, N);
// Standard Deviation on CPU & GPU
cout<<"\n\n---------------------------------- SD\n\n"<<endl;
find_sd(arr_host, N);
return 0;
} |
18,846 | #include <new>
struct Foo
{
int value = 0x1234;
};
__global__ void kernel_independent(Foo* storage, Foo** initialized)
{
Foo* start = storage + threadIdx.x * 2;
initialized[threadIdx.x] = new (start) Foo;
new (start + 1) Foo;
}
|
18,847 | #include <iostream>
#include <math.h>
int add(int n, float *x, float *y)
{
for (int i = 0; i < n; i++)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x = new float[N];
float *y = new float[N];
//init x, y arrs on host
for (int i = 0; i < N ; i++ )
{
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on GPU
add(N,x,y);
// Free mem
delete [] x;
delete [] y;
return 0;
} |
18,848 | /*
Faz a soma dos elementos de dois vetores
Exemplifica o uso de cudaMallocHost() para alocar memoria paginada no host e
o uso de cudaFreeHost para desalocar()
Para compilar: nvcc 01-soma-vet-pinned.cu -o 01-soma-vet-pinned
Para executar: ./01-soma-vet-pinned
OBS: os valores de tamanho do vetor e o conteudo do vetor
estao fixos no codigo
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
__global__ void soma(int *vetorA, int *vetorB,int *vetorC,int tam)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < tam)
{
vetorC[idx]=vetorA[idx]+vetorB[idx];
}
}
int main(int argc,char **argv)
{
int i,*vetorA,*vetorB,*vetorC,threadsPerBlock,blocksPerGrid;
int *vetorA_d,*vetorB_d,*vetorC_d;
int tam= 16; // 5000;
//Define a quantidade de threads por bloco
threadsPerBlock = 256;
//Aloca os vetores no host
cudaMallocHost((void**)&vetorA,tam*(sizeof(int)));
cudaMallocHost((void**)&vetorB,tam*(sizeof(int)));
cudaMallocHost((void**)&vetorC,tam*(sizeof(int)));
//Aloca os vetores no device
cudaMalloc((void**)&vetorA_d,tam*(sizeof(int)));
cudaMalloc((void**)&vetorB_d,tam*(sizeof(int)));
cudaMalloc((void**)&vetorC_d,tam*(sizeof(int)));
//Preenche os vetores no host
for(i=0;i<tam;i++)
{
vetorA[i] = i;
vetorB[i] = 0; //-i;
}
//Define a quantidade de blocos por grade
blocksPerGrid=(tam+threadsPerBlock-1)/threadsPerBlock;
//Copia o conteúdo dos vetores para o device
cudaMemcpy(vetorA_d,vetorA,tam*(sizeof(int)), cudaMemcpyHostToDevice);
cudaMemcpy(vetorB_d,vetorB,tam*(sizeof(int)), cudaMemcpyHostToDevice);
//Invoca o kernel com blocksPerGrid blocos e threadsPerBlock threads
soma <<<blocksPerGrid,threadsPerBlock>>> (vetorA_d,vetorB_d,vetorC_d,tam);
//Copia o resultado da soma de volta para o host
cudaMemcpy(vetorC,vetorC_d,tam*(sizeof(int)), cudaMemcpyDeviceToHost);
//Imprime o resultado no host
for(i=0;i<tam;i++)
{
printf("%d ",vetorC[i]);
}
printf("\n");
//Desaloca os vetores no host
cudaFreeHost(vetorA);
cudaFreeHost(vetorB);
cudaFreeHost(vetorC);
//Desaloca os vetores no device
cudaFree(vetorA_d);
cudaFree(vetorB_d);
cudaFree(vetorC_d);
} |
18,849 | #include "includes.h"
__global__ void block_sum_kernel(int *arr, int size, int *block_sums) {
int num_threads = blockDim.x * gridDim.x;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Each thread finds local sum of its assigned area
int my_sum = 0;
__shared__ int smem[128];
while (tid < size) {
my_sum += arr[tid];
tid += num_threads;
}
smem[threadIdx.x] = my_sum;
// Barrier then use parallel reduction to get block sum
__syncthreads();
for (int i = blockDim.x / 2; i > 0; i /= 2) {
if (threadIdx.x < i) {
int temp = smem[threadIdx.x] + smem[threadIdx.x + i];
smem[threadIdx.x] = temp;
}
__syncthreads();
}
// Block sum added to global arr
if (threadIdx.x == 0) {
block_sums[blockIdx.x] = smem[0];
}
} |
18,850 | #include <iostream>
#include <stdlib.h>
#include <math.h>
#include <algorithm>
#include <stdio.h>
#include <fcntl.h>
#include <time.h>
#define NS_PER_SEC (1000*1000*1000)
using namespace std;
int base[12];
int base7[21];
int base8[24];
int base11[33];
int base12[36];
int base13[39];
int base14[42];
inline unsigned long int monotonicTime(void)
{
//const unsigned long int NS_PER_SEC = 1000 * 1000 * 1000;
struct timespec now;
clock_gettime(CLOCK_MONOTONIC, &now);
return now.tv_sec * NS_PER_SEC + now.tv_nsec;
}
void loadData()
{
//base 3*4
base[0]=1;
base[3]=4;
base[6]=7;
base[9]=10;
base[1]=8;
base[4]=11;
base[7]=2;
base[10]=5;
base[2]=3;
base[5]=6;
base[8]=9;
base[11]=12;
//base 3*7
base7[0]=1;
base7[3]=14;
base7[6]=17;
base7[9]=20;
base7[12]=9;
base7[15]=4;
base7[18]=7;
base7[1]=16;
base7[4]=19;
base7[7]=12;
base7[10]=3;
base7[13]=6;
base7[16]=21;
base7[19]=10;
base7[2]=13;
base7[5]=2;
base7[8]=15;
base7[11]=18;
base7[14]=11;
base7[17]=8;
base7[20]=5;
//base 3*8
base8[0]=1;
base8[3]=16;
base8[6]=3;
base8[9]=22;
base8[12]=19;
base8[15]=12;
base8[18]=7;
base8[21]=10;
base8[1]=4;
base8[4]=21;
base8[7]=18;
base8[10]=15;
base8[13]=6;
base8[16]=9;
base8[19]=24;
base8[22]=13;
base8[2]=17;
base8[5]=2;
base8[8]=5;
base8[11]=20;
base8[14]=23;
base8[17]=14;
base8[20]=11;
base8[23]=8;
//base 3*11
for(int x = 0; x < 33; x++)
{
if(x < 12)
base11[x] = base[x];
else
base11[x] = base7[x-12]+3*4;
}
//base 3*12
for(int x = 0; x < 36; x++)
{
if(x<12)
base12[x] = base[x];
else
base12[x] = base8[x-12]+3*4;
}
//board 3*13
base13[0]=1;
base13[3]=4;
base13[6]=13;
base13[9]=16;
base13[12]=21;
base13[15]=8;
base13[18]=23;
base13[21]=18;
base13[24]=35;
base13[27]=38;
base13[30]=27;
base13[33]=32;
base13[36]=29;
base13[1]=12;
base13[4]=15;
base13[7]=6;
base13[10]=3;
base13[13]=10;
base13[16]=17;
base13[19]=20;
base13[22]=37;
base13[25]=24;
base13[28]=33;
base13[31]=30;
base13[34]=39;
base13[37]=26;
base13[2]=5;
base13[5]=2;
base13[8]=11;
base13[11]=14;
base13[14]=7;
base13[17]=22;
base13[20]=9;
base13[23]=34;
base13[26]=19;
base13[29]=36;
base13[32]=25;
base13[35]=28;
base13[38]=31;
//base 3*14
for(int x = 0; x < 42; x++)
{
if(x < 21)
base14[x] = base7[x];
else
base14[x] = base7[x-21]+3*7;
}
}
int blockOfFour(int n) // getting num blocks of four in each stripe.
{
if(n < 11)
{
return 0;
}
else
{
int num = 0;
switch(n%4)
{
case 0:
num = (n-8);
break;
case 1:
num = (n-13);
break;
case 2:
num = (n-14);
break;
case 3:
num = (n-7);
break;
}
return num;
}
}
__device__ int gpuBlockOfFour(int n)
{
if(n < 11)
{
return 0;
}
else
{
int num = 0;
switch(n%4)
{
case 0:
num = (n-8);
break;
case 1:
num = (n-13);
break;
case 2:
num = (n-14);
break;
case 3:
num = (n-7);
break;
}
return num;
}
}
__global__ void solveBoard(int* base, int* base7, int* base8, int* base11, int* base12, int* base13, int* base14, int* board, int n)
{
/*for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
board[i+j*n] = 0;*/
switch(n % 3)
{
case 0: // for all board size that is divisibe by 3
int BaseOfFour = gpuBlockOfFour(n)/4;
int blockOfFour = gpuBlockOfFour(n);
for (int x = 0; x < 3; x++)
{
for(int y = 0; y < n; y++)
{
if(y < gpuBlockOfFour(n))
{
int temp = y/4;
//for(int i = 0; i < n; i+= 6) // parrallel here if(threadIdx.x%2 == 0) i = threadIdx*6
//{
if(threadIdx.x % 2 == 0)
{
int i = threadIdx.x*3;
//int stride = threadIdx.x;
board[(x+i)+n*y] = base[x+(y%4)*3]+ temp*12 + 3*n*threadIdx.x;
if(x+3+i < n)
board[(x+3+i)+n*(n-y-1)] = base[x+(y%4)*3]+ temp*12 + 3*n*(threadIdx.x+1);
}
//}
}
else
{
//for(int i = 0 ; i < n; i+= 6) // parallel here
//{
if(threadIdx.x % 2 == 0)
{
int i = threadIdx.x*3;
//int stride = i/3;
if(n % 4 == 0)
{
board[(x+i)+n*y] = base8[x+(y-blockOfFour)*3]+BaseOfFour*12 + 3*n*threadIdx.x;
board[(x+3+i)+n*(n-y-1)] = base8[x+(y-blockOfFour)*3]+ BaseOfFour*12 + 3*n*(threadIdx.x+1);
}
if(n % 4 == 1)
{
board[(x+i)+n*y] = base13[x+(y-blockOfFour)*3]+ BaseOfFour * 12 + 3*n*threadIdx.x;
if(x+3+i < n)
board[(x+3+i)+ n*(n-y-1)] = base13[x+(y-blockOfFour)*3]+ BaseOfFour * 12 + 3*n*(threadIdx.x+1);
}
if(n % 4 == 2)
{
board[(x+i)+ n*y] = base14[x+(y-blockOfFour)*3]+ BaseOfFour * 12 + 3*n*threadIdx.x;
if(x+3+i < n)
board[(x+3+i)+n*(n-y-1)] = base14[x+(y-blockOfFour)*3]+ BaseOfFour * 12 + 3*n*(threadIdx.x+1);
}
if(n % 4 == 3)
{
board[(x+i)+n*y] = base7[x+(y-blockOfFour)*3]+ BaseOfFour * 12 + 3*n*threadIdx.x;
if(x+3+i < n)
board[(x+3+i)+n*(n-y-1)] = base7[x+(y-blockOfFour)*3]+ BaseOfFour * 12 + 3*n*(threadIdx.x+1);
}
}
}
}
}
break;
}
/*for (int x = 0; x < n; x++) {
for (int y = 0; y < n; y++)
cout << board[x+n*y]<< "\t";
cout << endl;
}*/
}
int main()
{
loadData();
int n;
cout << "Enter size of board:";
cin >> n;
int board[n*n];
unsigned long int gpuTime = monotonicTime();
// Declare gpuBase
int* gpuBase;
int* gpuBase7;
int* gpuBase8;
int* gpuBase11;
int* gpuBase12;
int* gpuBase13;
int* gpuBase14;
int* gpuBoard;
//Allocate
cudaMalloc(&gpuBase, 12*sizeof(int));
cudaMalloc(&gpuBase7, 21*sizeof(int));
cudaMalloc(&gpuBase8, 24*sizeof(int));
cudaMalloc(&gpuBase11, 33*sizeof(int));
cudaMalloc(&gpuBase12, 36*sizeof(int));
cudaMalloc(&gpuBase13, 39*sizeof(int));
cudaMalloc(&gpuBase14, 42*sizeof(int));
cudaMalloc(&gpuBoard, n*n*sizeof(int));
//Copy data
cudaMemcpy(gpuBase, base, 12*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpuBase7, base7, 21*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpuBase8, base8, 24*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpuBase11, base11, 33*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpuBase12, base12, 36*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpuBase13, base13, 39*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpuBase14, base14, 42*sizeof(int), cudaMemcpyHostToDevice);
// Calculate threads needed
int num_threads = n/3; // we want to handle the last thing in stripe
// Call kernel
solveBoard<<<1,num_threads>>>(gpuBase,gpuBase7,gpuBase8,gpuBase11,gpuBase12,gpuBase13,gpuBase14,gpuBoard,n);
// copy to out from device to host
cudaMemcpy(board, gpuBoard, n*n* sizeof(int) , cudaMemcpyDeviceToHost);
gpuTime = monotonicTime() - gpuTime;
fprintf(stderr, "Time to perform operation on CPU = %ld ns\n", gpuTime);
for (int x = 0; x < n; x++) {
for (int y = 0; y < n; y++)
cout << board[x+n*y]<< "\t";
cout << endl;
}
/*for(int x = 0; x < 42; x++)
{
cout << base14[x] << endl;
}*/
return 0;
}
|
18,851 | #include "includes.h"
// Include files
// Parameters
#define N_ATOMS 343
#define MASS_ATOM 1.0f
#define time_step 0.01f
#define L 10.5f
#define T 0.728f
#define NUM_STEPS 10000
const int BLOCK_SIZE = 1024;
//const int L = ;
const int scheme = 1; // 0 for explicit, 1 for implicit
/*************************************************************************************************************/
/************* INITIALIZATION CODE **********/
/*************************************************************************************************************/
__device__ float PutInBox(float r){
if (fabs(r) > L / 2.0)
r += (2 * (r < 0) - 1)*ceil((fabs(r) - L / 2.0f) / L)*L;
return r;
}
__global__ void kinematics(float* positions, float* force, float* vel, int len){
int tx = threadIdx.x;
int bx = blockIdx.x;
int index = bx*blockDim.x + tx;
float tempr;
//if (index == 0){ printf("You have been trolled! \n"); }
if (index < len){
tempr = positions[index] + 0.5f * force[index] / MASS_ATOM * time_step*time_step + vel[index] * time_step;
positions[index] = PutInBox(tempr);
vel[index] += force[index] / MASS_ATOM * time_step;
}
} |
18,852 | #include "includes.h"
__global__ void glcm_calculation_270(int *A,int *glcm, const int nx, const int ny,int max){
int ix = threadIdx.x + blockIdx.x* blockDim.x;
int iy = threadIdx.y + blockIdx.y* blockDim.y;
unsigned int idx =iy*nx+ix;
int i;
int k=0;
for(i=0;i<nx-1;i++){
if(idx>=i*nx && idx<((i+1) *nx)){
k=max*A[idx]+A[idx+nx];
atomicAdd(&glcm[k],1);
}
}
__syncthreads();
} |
18,853 | #include <iostream>
__global__ void vectorAdd(int *a, int *b, int *c, int n){
int i = blockIdx.x*blockDim.x+threadIdx.x;
if(i<n)
for(int j=0;j<100;j++)
c[i] = a[i] + b[i];
}
int main(void){
int * a, * b;
int * r1, * r2, *r3;
int * temp;
const int n = 1<<24;
const int n_s = 3;
cudaStream_t streams[n_s];
for(int i=0;i<n_s;i++)
cudaStreamCreate(&streams[i]);
cudaMallocManaged(&a, n*sizeof(int));
cudaMallocManaged(&b, n*sizeof(int));
cudaMallocManaged(&r1, n*sizeof(int));
cudaMallocManaged(&r2, n*sizeof(int));
cudaMallocManaged(&r3, n*sizeof(int));
temp = new int[n*sizeof(int)];
for(int i=0;i<n;i++){
a[i] = 3;
b[i] = 5;
}
int blockSize = 256;
int numBlocks = n/256;
vectorAdd<<<numBlocks,blockSize,0,streams[0]>>>(a,a,r1,n);
vectorAdd<<<numBlocks, blockSize,0,streams[1]>>>(b,b,r2,n);
vectorAdd<<<numBlocks, blockSize,0,streams[2]>>>(a,b,r3,n);
cudaDeviceSynchronize();
temp[0] = r1[0];
for(int i=1;i<n;i++)
temp[i] = temp[i-1] + r1[i];
cudaDeviceSynchronize();
temp[0] = r2[0];
for(int i=1;i<n;i++)
temp[i] = temp[i-1] + r2[i];
cudaDeviceSynchronize();
temp[0] = r3[0];
for(int i=1;i<n;i++)
temp[i] = temp[i-1] + r3[i];
cudaFree(a);
cudaFree(b);
cudaFree(r1);
cudaFree(r2);
cudaFree(r3);
delete [] temp;
return 0;
}
|
18,854 | /*
* ARQUITECTURA DE COMPUTADORES
* 2º Grado en Ingenieria Informatica
*
* PRACTICA 2: "Suma De Matrices Paralela"
* >> Arreglar for en __global__
* >> Pasar numElem como argumento
*
* AUTOR: Ivanes
*/
///////////////////////////////////////////////////////////////////////////
// Includes
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
// Defines
#define RAN_MIN 1
#define RAN_MAX 9
// Bloques - Hilos
#define MAX_HILOS 10
#define MAX_BLOQUES 512
// Declaracion de funciones
int numHilos()
{
int numHilos;
// Saca num hilos, funcion CUDA
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
// deviceProp.maxThreadsPerBlock;
// deviceProp.maxGridSize[0];
int maxValores = MAX_HILOS*MAX_BLOQUES;
//
printf("\n***********************************************************************\n\n");
printf("> Nombre Dispositivos: %s\n", deviceProp.name);
printf("> Capacidad de Computo: %d.%d\n", deviceProp.major, deviceProp.minor);
printf("> Numero de MultiProcesadores: %d \n", deviceProp.multiProcessorCount);
printf("> Numero de Nucleos (Arq. PASCAL): %d \n", 64);
printf("> Maximo de hilos por eje en bloque\n");
printf(" \t[x -> %d]\n \t[y -> %d]\n \t[z -> %d]\n",deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]);
printf("> Maximo de bloques por eje\n");
printf(" \t[x -> %d]\n \t[y -> %d]\n \t[z -> %d]\n",deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]);
printf("\n***********************************************************************\n");
printf("\nEl numero maximo de elementos del array es: %d valores\n", maxValores);
do {
printf("\n\nCuantos elementos quieres que tenga los vectores: ");
scanf("%d", &numHilos);
getchar();
} while ((numHilos > maxValores) || (numHilos <= 0));
return numHilos;
}
__global__
void reverseMatriz(int *dev_matriz, int *dev_matriz_reverse, int *dev_matriz_resultado, int numElem)
{
// Crea la matriz inversa
int id = threadIdx.x + blockIdx.x * blockDim.x;
dev_matriz_reverse[id] = dev_matriz[numElem - 1 - id];
// Suma las matrices
dev_matriz_resultado[id] = dev_matriz[id] + dev_matriz_reverse[id];
}
// MAIN: Rutina principal ejecutada en el host
int main(int argc, char** argv)
{
// Declaracion
int *hst_matriz;
int *hst_matriz_reverse;
int *hst_matriz_resultado;
int *dev_matriz;
int *dev_matriz_reverse;
int *dev_matriz_resultado;
// Saca numero de hilos y pregunta cuantos elementos quiere en el array. Pone el número de bloques a usar, 1 en este caso
int numElem = numHilos();
int numBlock = numElem/MAX_HILOS;
// Obtencion del numero del bloques
if(numElem%MAX_HILOS != 0)
numBlock++;
printf("Lanzamos %d valores en %d bloques de %d hilos", numElem, numBlock, MAX_HILOS);
// Reserva en el host
hst_matriz = (int*)malloc(numElem * sizeof(int));
hst_matriz_reverse = (int*)malloc(numElem * sizeof(int));
hst_matriz_resultado = (int*)malloc(numElem * sizeof(int));
// Reserva en el device
cudaMalloc( &dev_matriz, numElem * sizeof(int));
cudaMalloc( &dev_matriz_reverse, numElem * sizeof(int));
cudaMalloc( &dev_matriz_resultado, numElem * sizeof(int));
// Insertamos valores random en la matriz
srand((int)time(NULL));
for (int i = 0; i < numElem; i++)
{
hst_matriz[i] = RAN_MIN + rand() % RAN_MAX;
}
// Pasamos el array al device y le damos la vuelta
cudaMemcpy(dev_matriz, hst_matriz, numElem * sizeof(int), cudaMemcpyHostToDevice);
reverseMatriz <<< numBlock, MAX_HILOS>>>(dev_matriz, dev_matriz_reverse, dev_matriz_resultado, numElem);
// Check de errores
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(error));
exit(-1);
}
// Pasamos el array inverso a la cpu
cudaMemcpy(hst_matriz_reverse, dev_matriz_reverse, numElem * sizeof(int), cudaMemcpyDeviceToHost);
// Pasamos el resultado a la cpu
cudaMemcpy(hst_matriz_resultado, dev_matriz_resultado, numElem * sizeof(int), cudaMemcpyDeviceToHost);
// Muestra contenido de arrays y resultado
printf("\n\nMatriz: \n");
for (int i = 0; i < numElem; i++)
printf("%d ", hst_matriz[i]);
printf("\n\nMatriz Inversa: \n");
for (int i = 0; i < numElem; i++)
printf("%d ", hst_matriz_reverse[i]);
printf("\n\nMatriz Resultado: \n");
for (int i = 0; i < numElem; i++)
printf("%d ", hst_matriz_resultado[i]);
free(hst_matriz);
free(hst_matriz_reverse);
free(hst_matriz_resultado);
cudaFree(dev_matriz);
cudaFree(dev_matriz_reverse);
cudaFree(dev_matriz_resultado);
// salida
time_t fecha;
time(&fecha);
printf("\n\n***************************************************\n");
printf("Programa ejecutado el: %s\n", ctime(&fecha));
printf("<pulsa [INTRO] para finalizar>");
getchar();
return 0;
}
|
18,855 | /*
This is the function you need to implement. Quick reference:
- input rows: 0 <= y < ny
- input columns: 0 <= x < nx
- element at row y and column x is stored in data[x + y*nx]
- correlation between rows i and row j has to be stored in result[i + j*ny]
- only parts with 0 <= j <= i < ny need to be filled
*/
//#include <cstdlib>
#include <iostream>
#include <cuda_runtime.h>
#include <math.h>
using namespace std;
__global__ void mykernel(float* result, const float* data, int nx, int ny) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i >= ny || j >= ny || j > i)
return;
float size = nx;
//printf("%d j %d i", j, i);
double sumI = 0;
double sumJ = 0;
double sumJI = 0;
double squareSumI = 0;
double squareSumJ = 0;
for (int x = 0; x < nx; x++){
sumI += (double)data[x+i*nx];
sumJ += (double)data[x+j*nx];
sumJI += (double)data[x+i*nx] * (double)data[x+j*nx];
squareSumI += (double)data[x+i*nx] * (double)data[x+i*nx];
squareSumJ += (double)data[x+j*nx] * (double)data[x+j*nx];
}
double shit = (double)sqrt((size * squareSumJ - sumJ * sumJ) * (size * squareSumI - sumI * sumI));
double asd = (double)(size * sumJI - sumJ * sumI) / shit;
result[i + j*ny] = (float)asd;
}
static inline void check(cudaError_t err, const char* context) {
if (err != cudaSuccess) {
std::cerr << "CUDA error: " << context << ": "
<< cudaGetErrorString(err) << std::endl;
std::exit(EXIT_FAILURE);
}
}
#define CHECK(x) check(x, #x)
static inline int divup(int a, int b) {
return (a + b - 1)/b;
}
static inline int roundup(int a, int b) {
return divup(a, b) * b;
}
void correlate(int ny, int nx, const float *data, float *result) {
float* dGPU = NULL;
CHECK(cudaMalloc((void**)&dGPU, ny * nx * sizeof(float)));
float* rGPU = NULL;
CHECK(cudaMalloc((void**)&rGPU, ny * ny * sizeof(float)));
CHECK(cudaMemset(rGPU, 0, ny * ny * sizeof(float)));
CHECK(cudaMemcpy(dGPU, data, ny * nx * sizeof(float), cudaMemcpyHostToDevice));
// Run kernel
dim3 dimBlock(16, 16);
dim3 dimGrid(divup(ny, dimBlock.x), divup(ny, dimBlock.y));
mykernel<<<dimGrid, dimBlock>>>(rGPU, dGPU, nx, ny);
CHECK(cudaGetLastError());
// Copy data back to CPU & release memory
CHECK(cudaMemcpy(result, rGPU, ny * ny * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaFree(dGPU));
CHECK(cudaFree(rGPU));
}
|
18,856 | #include "stdio.h"
#include <stdlib.h>
#define N 10
__global__
void add( int *a, int *b, int *c ) {
int tid = 0;
while (tid < N) {
c[tid] = a[tid] + b[tid];
tid += 1;
}
}
int main( void ) {
size_t size = N* sizeof(int);
int* h_a = (int*)malloc(size);
int* h_b = (int*)malloc(size);
int* h_c = (int*)malloc(size);
for (int i=0; i<N; i++) {
h_a[i] = -i;
h_b[i] = i * i;
}
int* d_a;
int* d_b;
int* d_c;
cudaMalloc(&d_a, size);
cudaMalloc(&d_b, size);
cudaMalloc(&d_c, size);
cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice);
int threadsPerBlock = 1;
int blockPerGrid = 1;
add <<<blockPerGrid, threadsPerBlock>>>(d_a, d_b, d_c);
cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost);
for (int i=0; i<N; i++) {
printf( "%d + %d = %d\n", h_a[i], h_b[i], h_c[i] );
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
18,857 | extern "C"
__global__ void backwardDropoutKernel (int numberEntries, float* chain, float* mask, float* result)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < numberEntries) {
result[index] = chain[index] * mask[index];
}
} |
18,858 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <time.h>
#include<sys/time.h>
//don't forget the time
double cpuSecond() {
//#ifdef LINUX_IMP
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
//#endif
}
__global__ void warmup(float *c){// Branch Efficiency = 100.00%
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float a, b;
a = b = 0.0f;
c[tid] = a + b;
}
__global__ void mathKernel1(float *c){// Branch Efficiency = 83.3%
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float a, b;
a = b = 0.0f;
if (tid % 2 == 0) {
a = 100.0f;
} else {
b = 200.0f;
}
c[tid] = a + b;
}
__global__ void mathKernel2(float *c){// Branch Efficiency = 100.00%
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float a, b;
a = b = 0.0f;
if ((tid / warpSize) % 2 == 0) {
a = 100.0f;
} else {
b = 200.0f;
}
c[tid] = a + b;
}
__global__ void mathKernel3(float *c) {// Branch Efficiency = 71.43%
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float ia, ib;
ia = ib = 0.0f;
bool ipred = (tid % 2 == 0);
if (ipred) {
ia = 100.0f;
}
if (!ipred) {
ib = 200.0f;
}
c[tid] = ia + ib;
}
__global__ void mathKernel4(float *c) {// Branch Efficiency = 71.43%
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float ia, ib;
ia = ib = 0.0f;
bool ipred = (tid % 2 == 0);
if (ipred) {
ia = 100.0f;
}
if (!ipred) {
ib = 200.0f;
}
c[tid] = ia + ib;
}
int main(int argc, char **argv){
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp,dev);
printf("%s using Device %d: %s\n", argv[0],dev, deviceProp.name);
double iStart,iElaps;
// set up data size;
int size = 64;
int blocksize =64;
if(argc > 1) blocksize = atoi(argv[1]);
if(argc > 2) size = atoi(argv[2]);
size_t nBytes = size*sizeof(float);
// set up execution configuration
dim3 block(blocksize,1);
dim3 grid((size+block.x-1)/block.x,1);
printf("Execution Configure (block %d grid %d)\n",block.x, grid.x);
// allocate gpu memory
float *d_C;
cudaMalloc((float**)&d_C,nBytes);
// run a warmup kernel to remove overhead;
cudaDeviceSynchronize();
iStart = cpuSecond();
warmup<<<grid,block>>>(d_C);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("warmup <<< %4d %4d >>> elapsed %lf sec \n",grid.x,block.x, iElaps);
// run kenel 1
cudaDeviceSynchronize();
iStart = cpuSecond();
mathKernel1<<<grid, block>>>(d_C);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
printf("mathKernel1 <<< %4d %4d >>> elapsed %lf sec \n",grid.x,block.x,iElaps );
// run kernel 2
iStart = cpuSecond();
mathKernel2<<<grid, block>>>(d_C);
cudaDeviceSynchronize();
iElaps = cpuSecond () - iStart;
printf("mathKernel2 <<< %4d %4d >>> elapsed %lf sec \n",grid.x,block.x,iElaps );
// run kernel 3
iStart = cpuSecond ();
mathKernel3<<<grid, block>>>(d_C);
cudaDeviceSynchronize();
iElaps = cpuSecond () - iStart;
printf("mathKernel3 <<< %4d %4d >>> elapsed %lf sec \n",grid.x,block.x,iElaps);
// run kernel 4
iStart = cpuSecond ();
mathKernel4<<<grid, block>>>(d_C);
cudaDeviceSynchronize();
iElaps = cpuSecond () - iStart;
printf("mathKernel4 <<< %4d %4d >>> elapsed %lf sec \n",grid.x,block.x,iElaps);
// free gpu memory and reset divece
cudaFree(d_C);
cudaDeviceReset();
return EXIT_SUCCESS;
}
|
18,859 | #include<iostream>
using namespace std;
int n = 100;
__host__ __device__ bool read(int n) {
return n != 0;
}
__host__ __device__ bool read0(int n) {
return n == 0;
}
__global__ void test(int n) {
if(read0(n)) {
printf("true\n");
}
else if(read(n)){
printf("false\n");
}
}
int main() {
dim3 block(8,2);
test<<<1,block>>>(n);
cudaDeviceSynchronize();
}
|
18,860 | #include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <ctime>
__global__ void
setup_random_kernel(curandState *state, int length, int offset) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
curand_init((unsigned long long) clock(), idx, 0, &state[idx]);
}
}
__global__ void
get_random_array(curandState *state, int length, double *out_array) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
curandState localState = state[idx];
out_array[idx] = curand_uniform_double(&localState);
}
}
/*
* PHASE ONE
*/
__global__ void
phase_one_shift(double *block_b, double *block_i, double *block_n, int *shift, curandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Compute block and compute its offset
if (idx < length) {
curandState localState = state[idx];
shift[idx] = (int) __double2int_rn(
block_i[idx] + __double2int_rd(curand_uniform(&localState) * block_b[idx]) * block_n[idx]);
state[idx] = localState;
}
}
__global__ void
phase_one_i(int *i, double *block_b, double *block_i, double *block_n, int *shift, curandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Choose first node
if (idx < length) {
curandState localState = state[idx];
i[idx] = (int) __double2int_rn(__double2int_rd(curand_uniform(&localState) * block_n[idx]) + shift[idx]);
state[idx] = localState;
}
}
__global__ void
phase_one_j(int *i, int *j, double *block_b, double *block_i, double *block_n, int *shift,
curandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
curandState localState = state[idx];
// Choose second node
// "Without replacement"
j[idx] = (int) __double2int_rn(__double2int_rd(curand_uniform(&localState) * (block_n[idx] - 1)) + shift[idx]);
// Remove loops
if (j[idx] >= i[idx]) {
++j[idx];
}
state[idx] = localState;
}
}
/*
* PHASE TWO
*/
__global__ void
phase_two_fill(double *phase_two_shift_fill, double *phase_two_sz_fill, double *phase_two_fill,
curandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
curandState localState = state[idx];
phase_two_fill[idx] =
phase_two_shift_fill[idx] + __double2int_rd(curand_uniform(&localState) * phase_two_sz_fill[idx]);
state[idx] = localState;
}
}
__global__ void
phase_two_bulk(double *phase_two_shift_bulk, double *phase_two_sz_bulk, double *phase_two_bulk,
curandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
curandState localState = state[idx];
phase_two_bulk[idx] =
phase_two_shift_bulk[idx] + __double2int_rd(curand_uniform(&localState) * phase_two_sz_bulk[idx]);
state[idx] = localState;
}
}
__global__ void
phase_two_d(double *phase_two_fill, double *phase_two_bulk, int *phase_two, double *phase_two_rd_fill,
curandState *state, int length) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < length) {
curandState localState = state[idx];
if (curand_uniform(&localState) < phase_two_rd_fill[idx]) {
phase_two[idx] = (int) __double2int_rn(phase_two_fill[idx]);
} else {
phase_two[idx] = (int) __double2int_rn(phase_two_bulk[idx]);
}
state[idx] = localState;
}
} |
18,861 | /*
Now we make the matrix much bigger
g++ -pg seq_matrix_big_mul.c -o seq_matrix_big_mul
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#define N_THREADS 20
int num_rows_A = 2000; int num_rows_B = 2000; int num_rows_C = 2000;
int num_cols_A = 2000; int num_cols_B = 600; int num_cols_C = 600;
//int num_rows_A = 64; int num_rows_B = 64; int num_rows_C = 64;
//int num_cols_A = 64; int num_cols_B = 64; int num_cols_C = 64;
// I'm forcing a malloc because I want to add the malloc time on the game
float *A = (float*) malloc(sizeof(float) * num_rows_A * num_cols_A);
float *B = (float*) malloc(sizeof(float) * num_rows_B * num_cols_B);
float *C = (float*) malloc(sizeof(float) * num_rows_C * num_cols_C);
float *C_ref = (float*) malloc(sizeof(float) * num_rows_C * num_cols_C);
__global__ void matrix_2d_mul_float_gpu(float *A, float *B, float *C, int num_rows_A, int num_cols_A, int num_cols_B) {
// Create shared variables (Available to all threads on the same block)
__shared__ float A_tile[N_THREADS][N_THREADS];
__shared__ float B_tile[N_THREADS][N_THREADS];
// Block index
int bx = blockIdx.x; int by = blockIdx.y;
// Thread index
int tx = threadIdx.x; int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = num_cols_A * N_THREADS * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + num_cols_A - 1;
// Index of the first sub-matrix of B processed by the block
int bBegin = N_THREADS * bx;
int bStep = N_THREADS * num_cols_B;
int aStep = N_THREADS;
float sum = 0;
for (int a = aBegin, b = bBegin;a <= aEnd;a += aStep, b += bStep) {
A_tile[ty][tx] = A[a + num_cols_A * ty + tx];
B_tile[tx][ty] = B[b + num_cols_B * tx + ty];
// Synchronize to make sure the matrices are loaded
__syncthreads();
for (int k = 0; k < N_THREADS; ++k)
sum += A_tile[ty][k] * B_tile[k][tx];
// Wait other threads to finish their sub-matrices
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = num_cols_B * N_THREADS * by + N_THREADS * bx;
C[c + num_cols_B * ty + tx] = sum;
}
void matrix_2d_mul_float(float *A, float *B, float *C, int num_rows_A, int num_cols_A, int num_cols_B) {
float sum = 0;
int num_rows_C = num_rows_A;
int num_cols_C = num_cols_B;
// Iterate on each row of A
#pragma omp parallel for schedule(dynamic,1) collapse(2)
for(int i=0; i<num_rows_A; i++) {
// Iterate on each collumn of B
for (int k=0; k<num_cols_B; k++) {
sum = 0;
// Do the "multiply add between" row of A and collumn of B
for (int j=0; j<num_cols_A; j++){
// A[i][j] == A[i*num_cols_A+j]
// B[j][k] == B[j*num_cols_B+k]
//sum += A[i][j]*B[j][k];
sum += A[i*num_cols_A+j]*B[j*num_cols_B+k];
}
// C[i][k] == C[i*num_cols_C+k]
C[i*num_cols_C+k]=sum;
}
}
}
void fillRand(float *vec, int minValue, int maxValue, int sizeVec) {
srand(time(NULL));
for (int idx = 0; idx < sizeVec; idx++) {
vec[idx] = rand() % maxValue + minValue;
}
}
int main() {
// Get size in bytes for our vectors
int numBytesA = sizeof(float) * num_rows_A * num_cols_A;
int numBytesB = sizeof(float) * num_rows_B * num_cols_B;
printf("Size in bytes A: %d\n",numBytesA);
printf("Size in bytes B: %d\n",numBytesB);
// Fill arrays
fillRand(A, 1, 100, num_rows_A * num_cols_A);
fillRand(B, 1, 100, num_rows_B * num_cols_B);
memset(C, 0, (num_rows_C*num_cols_C)*sizeof(float));
// Allocate memory on GPU
float *device_A_mat; float *device_B_mat; float *device_C_mat;
cudaMalloc((char**)&device_A_mat,numBytesA);
cudaMalloc((char**)&device_B_mat,numBytesB);
cudaMalloc((char**)&device_C_mat,numBytesB);
// Calculate kernel grid and blocks
dim3 dimBlock(N_THREADS, N_THREADS);
dim3 dimGrid((num_cols_B + dimBlock.x - 1) / dimBlock.x, (num_rows_A + dimBlock.y - 1) / dimBlock.y);
// Call sequential function
//ProfilerStart("nameOfProfile.log");
for (int idxLoop=0; idxLoop < 10; idxLoop++) {
// Copy matrices A and B to GPU
cudaMemcpy(device_A_mat,A,numBytesA,cudaMemcpyHostToDevice);
cudaMemcpy(device_B_mat,B,numBytesB,cudaMemcpyHostToDevice);
// Launch the kernel
//matrix_2d_mul_float(A,B,C,num_rows_A,num_cols_A,num_cols_B);
matrix_2d_mul_float_gpu<<<dimGrid, dimBlock>>>(device_A_mat,device_B_mat,device_C_mat,num_rows_A,num_cols_A,num_cols_B);
cudaError_t err = cudaThreadSynchronize();
//printf("Run kernel: %s\n", cudaGetErrorString(err));
// Get the result from the GPU to the CPU
cudaMemcpy(C,device_C_mat,numBytesB,cudaMemcpyDeviceToHost);
printf("Matrix multiplication done %d\n",idxLoop);
}
// Calculate one iteration with the reference function
/*printf("Calculating reference\n");
matrix_2d_mul_float(A,B,C_ref,num_rows_A,num_cols_A,num_cols_B);
printf("Comparing with reference\n");
float sumDiff = 0;
for (int i = 0; i < (num_rows_C*num_cols_C); i++) {
float diff = C_ref[i] - C[i];
if (diff > 0.01f) {
printf("Values = %f -- %f\n",C_ref[i], C[i]);
sumDiff += diff;
}
}
printf("Difference = %f\n",sumDiff);*/
// Free memory
free(A);free(B);free(C);free(C_ref);
// Release memories from GPU
cudaFree(device_A_mat);
cudaFree(device_B_mat);
cudaFree(device_C_mat);
return 0;
}
|
18,862 | #include "includes.h"
__global__ void kernel_test5_init(char* _ptr, char* end_ptr)
{
unsigned int i;
unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE);
if (ptr >= (unsigned int*) end_ptr) {
return;
}
unsigned int p1 = 1;
for (i = 0;i < BLOCKSIZE/sizeof(unsigned int); i+=16){
unsigned int p2 = ~p1;
ptr[i] = p1;
ptr[i+1] = p1;
ptr[i+2] = p2;
ptr[i+3] = p2;
ptr[i+4] = p1;
ptr[i+5] = p1;
ptr[i+6] = p2;
ptr[i+7] = p2;
ptr[i+8] = p1;
ptr[i+9] = p1;
ptr[i+10] = p2;
ptr[i+11] = p2;
ptr[i+12] = p1;
ptr[i+13] = p1;
ptr[i+14] = p2;
ptr[i+15] = p2;
p1 = p1<<1;
if (p1 == 0){
p1 = 1;
}
}
return;
} |
18,863 | #include <iostream>
#include <math.h>
#include <unistd.h>
//#include <memory>
#include <algorithm>
#include <vector>
const std::size_t N = 1 << 20;
__device__ // can only be called from within a kernel, not from the host
void vec_inc(float* const c, const std::size_t n)
{
for (std::size_t i = threadIdx.x + (blockIdx.x * blockDim.x); i < n; i += (blockDim.x * gridDim.x))
{
++ c[i];
}
}
__host__ // explicitly disallow this to run on the GPU, cannot be launched nor called from kernels
void vec_inc2(float* const c, const std::size_t n)
{
for (std::size_t i = 0; i < n; ++ i)
{
++ c[i];
}
}
__global__
void vec_add(float* const c, const float* const a, const float* const b, const std::size_t n)
{
for (std::size_t i = threadIdx.x + (blockIdx.x * blockDim.x); i < n; i += (blockDim.x * gridDim.x))
{
c[i] = a[i] + b[i];
}
vec_inc(c, n);
}
int main(void)
{
// grids and blocks are topologically laid out similar to the problem
// for 1D arrays, a grid size of (1,1,1) = 1D, and a block size of (N,1,1) = N, would be enough to fully cover the array
const dim3 grid_size(1, 1, 1);
const dim3 block_size(1024, 1, 1);
std::vector<float> h_a(N);
std::vector<float> h_b(N);
std::vector<float> h_c(N);
std::size_t error_count = 0;
float* d_a = NULL;
float* d_b = NULL;
float* d_c = NULL;
cudaStream_t stream;
std::fill(h_a.begin(), h_a.end(), 1.0);
std::fill(h_b.begin(), h_b.end(), 2.0);
if (cudaSuccess != cudaStreamCreate(&stream))
{
std::cout << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaMalloc(&d_a, N * sizeof(h_a[0])))
{
std::cout << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaMalloc(&d_b, N * sizeof(h_b[0])))
{
std::cout << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaMalloc(&d_c, N * sizeof(h_c[0])))
{
std::cout << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaMemcpyAsync(d_a, h_a.data(), N * sizeof(h_a[0]), cudaMemcpyHostToDevice, stream))
{
std::cout << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaMemcpyAsync(d_b, h_b.data(), N * sizeof(h_b[0]), cudaMemcpyHostToDevice, stream))
{
std::cout << __LINE__ << std::endl;
return -__LINE__;
}
vec_add<<<grid_size, block_size, 0, stream>>>(d_c, d_a, d_b, N);
// this is slower to execute since we can just +1 in the prior kernel
// however we want it in a separate function to keep code clean
// so its not able to be launched like a kernel regularly can
//vec_inc<<<grid_size, block_size, 0, stream>>>(d_c, N);
if (cudaSuccess != cudaMemcpyAsync(h_c.data(), d_c, N * sizeof(h_c[0]), cudaMemcpyDeviceToHost, stream))
{
std::cout << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaStreamSynchronize(stream))
{
std::cout << __LINE__ << std::endl;
return -__LINE__;
}
// now add 1 to everything again, once h_c has been received from the GPU via d_c
vec_inc2(h_c.data(), h_c.size());
for (std::size_t i = 0; i < N; ++i)
{
if (h_a[i] + h_b[i] + 2 != h_c[i])
{
//std::cout << i << " " << h_c[i] << std::endl;
++ error_count;
}
}
std::cout << error_count << " " << 100.0 * (static_cast<double>(error_count) / N) << "% mismatched" << std::endl << std::flush;
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaStreamDestroy(stream);
return 0;
}
|
18,864 | #include <stdio.h>
#include <chrono>
__global__
void multiplyCell(int N, int * a, int * b, int * c){
// We get the index of the current data
unsigned int threadx = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int thready = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int threadxy = thready * N + threadx;
// Then we get the col and row
int row = threadxy / N;
int col = threadxy % N;
if(row < N && col < N){
// Then we multiply and add each one of them
int result = 0;
for(int i=0;i<N;i++){
result +=a[row*N+i]*b[i*N+col];
}
c[threadx]=result;
}
}
void GPUTimedMatrixMultiplication(int N,int * a,int * b, int * c,
int ** runs, int runsLength){
// Allocate in GPU
int *d_a,*d_b,*d_c;
int size = N*N*sizeof(int);
cudaMalloc(&d_a,size);
cudaMalloc(&d_b,size);
cudaMalloc(&d_c,size);
// Transfer to device
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice);
// Call kernel with the blocks, grid and threads specified
for(int i=0;i<runsLength;i++){
int * run = runs[i];
dim3 blocksPerGrid(run[0],run[1],run[2]);
dim3 threadsPerBlock(run[3],run[4],run[5]);
//initialize timer
auto start = std::chrono::high_resolution_clock::now();
multiplyCell<<<blocksPerGrid,threadsPerBlock>>>(N,d_a,d_b,d_c);
//finish timer
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<float, std::milli> duration_ms = end - start;
//print result
printf("GPU test dimensions threads %d %d blocks %d %d N: %d duration: %f\n ms\n",
run[0],run[1],run[3],run[4],N,duration_ms.count());
}
// Copy result back from gpu
cudaMemcpy(c,d_c,size,cudaMemcpyDeviceToHost);
// Free variables
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
void GPUMatrixMultiplication(int N,int * a,int * b, int * c,
int * run){
// Allocate in GPU
int *d_a,*d_b,*d_c;
int size = N*N*sizeof(int);
cudaMalloc(&d_a,size);
cudaMalloc(&d_b,size);
cudaMalloc(&d_c,size);
// Transfer to device
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice);
// Call kernel with the blocks, grid and threads specified
dim3 blocksPerGrid(run[0],run[1],run[2]);
dim3 threadsPerBlock(run[3],run[4],run[5]);
multiplyCell<<<blocksPerGrid,threadsPerBlock>>>(N,d_a,d_b,d_c);
// Copy result back from gpu
cudaMemcpy(c,d_c,size,cudaMemcpyDeviceToHost);
// Free variables
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
|
18,865 | extern "C"
__global__ void initDbIndexKernel(int totalVars, int totalPreds, int *d_varDomainSizes,
int *d_predBaseIdx, int *d_predVarMat, int *d_dbIndex,
long totalGroundings)
{
long idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < totalGroundings)
{
long baseDbIndex = idx * totalPreds;
for(int i = 0; i < totalPreds; i++)
d_dbIndex[baseDbIndex + i] = d_predBaseIdx[i];
long n = idx;
for(int i = totalVars-1; i >= 0; i--)
{
int domainSize = d_varDomainSizes[i];
long temp = n / domainSize;
int val = n - temp * domainSize;
n = temp;
int basePredVarMatIndex = i * totalPreds;
for(int j = 0; j < totalPreds; j++)
d_dbIndex[baseDbIndex + j] += d_predVarMat[basePredVarMatIndex + j] * val;
}
}
}
extern "C"
__global__ void evalClauseKernel(int *d_satArray, int **d_interpretation, int *dbIndex,
int *d_predicates, int *d_valTrue, int totalPreds, long totalGroundings)
{
long idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < totalGroundings && d_satArray[idx] == 1)
{
long baseDbIndex = idx * totalPreds;
int sat = 0;
for(int i = 0; i < totalPreds; i++)
{
int predId = d_predicates[i];
long interpretationIdx = dbIndex[baseDbIndex + i];
sat = max(sat, d_interpretation[predId][interpretationIdx] == d_valTrue[i]);
}
d_satArray[idx] = sat;
}
}
extern "C"
__global__ void evalClauseWithoutDbKernel(int totalVars, int totalPreds, int *d_varDomainSizes,
int *d_predicates, int *d_negated, int *d_predBaseIdx, int *d_valTrue, int *d_predVarMat,
int *d_satArray, int **d_interpretation, long totalGroundings, long offset, int *d_mem)
{
long idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < totalGroundings && d_satArray[idx] == 1)
{
int memBase = idx * totalVars;
long n = idx + offset;
for(int i = totalVars-1; i >= 0; i--)
{
int domainSize = d_varDomainSizes[i];
long temp = n / domainSize;
int val = n - temp * domainSize;
n = temp;
d_mem[memBase + i] = val;
}
int sat = 0;
for(int i = 0; i < totalPreds; i++)
{
int predId = d_predicates[i];
int negated = d_negated[i];
int dbIndex = d_predBaseIdx[i];
for(int j = 0; j < totalVars; j++)
dbIndex += d_mem[memBase + j] * d_predVarMat[j * totalPreds + i];
if(negated == 0)
sat = max(sat, d_interpretation[predId][dbIndex] == d_valTrue[i]);
else
sat = max(sat, d_interpretation[predId][dbIndex] != d_valTrue[i]);
}
d_satArray[idx] = sat;
}
}
extern "C"
__global__ void evalCNFKernel(int totalVars, int totalClauses, int *totalPredsInClause, int *d_varDomainSizes,
int **d_predicates, int **d_negated, int **d_predBaseIdx, int **d_valTrue,
int **d_predVarMat, int *d_satArray, int **d_interpretation, long totalGroundings,
long offset, int *d_mem)
{
long idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < totalGroundings)
{
int memBase = idx * totalVars;
long n = idx + offset;
for(int i = totalVars-1; i >= 0; i--)
{
int domainSize = d_varDomainSizes[i];
long temp = n / domainSize;
int val = n - temp * domainSize;
n = temp;
d_mem[memBase + i] = val;
}
int sat = 1;
for(int c = 0; c < totalClauses; c++) {
if(sat == 0)
break;
int clauseSat = 0;
int totalPredicates = totalPredsInClause[c];
for(int i = 0; i < totalPredicates; i++)
{
int predId = d_predicates[c][i];
int negated = d_negated[c][i];
int dbIndex = d_predBaseIdx[c][i];
for(int j = 0; j < totalVars; j++)
dbIndex += d_mem[memBase + j] * d_predVarMat[c][j * totalPredicates + i];
if(negated == 0)
clauseSat = max(clauseSat, d_interpretation[predId][dbIndex] == d_valTrue[c][i]);
else
clauseSat = max(clauseSat, d_interpretation[predId][dbIndex] != d_valTrue[c][i]);
}
sat = min(sat, clauseSat);
}
d_satArray[idx] = sat;
}
}
extern "C"
__global__ void evalCNFdiffKernel(int totalVars, int totalClauses, int *totalPredsInClause, int *d_varDomainSizes,
int **d_predicates, int **d_negated, int **d_predBaseIdx, int **d_valTrue,
int **d_predVarMat, int *d_satArray, int **d_interpretation, long totalGroundings,
long offset, int *d_mem, int d_predicateId, int d_groundId, int d_oldVal,
int d_newVal, int d_clauseIdx, int d_predicateIdx)
{
long idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < totalGroundings)
{
int memBase = idx * totalVars;
long n = idx + offset;
for(int i = totalVars-1; i >= 0; i--)
{
int domainSize = d_varDomainSizes[i];
long temp = n / domainSize;
int val = n - temp * domainSize;
n = temp;
d_mem[memBase + i] = val;
}
int sat = 1;
int oldSat = 1;
for(int c = 0; c < totalClauses; c++) {
if(sat == 0 && oldSat == 0)
break;
int clauseSat = 0;
int oldClauseSat = 0;
int totalPredicates = totalPredsInClause[c];
for(int i = 0; i < totalPredicates; i++)
{
int predId = d_predicates[c][i];
int negated = d_negated[c][i];
int dbIndex = d_predBaseIdx[c][i];
for(int j = 0; j < totalVars; j++)
dbIndex += d_mem[memBase + j] * d_predVarMat[c][j * totalPredicates + i];
if(predId == d_predicateId && dbIndex == d_groundId) {
if((c < d_clauseIdx) || (c == d_clauseIdx && i < d_predicateIdx)) {
sat = 0; oldSat = 0;
break;
} else {
if(negated == 0) {
oldClauseSat = max(oldClauseSat, d_oldVal == d_valTrue[c][i]);
clauseSat = max(clauseSat, d_newVal == d_valTrue[c][i]);
} else {
oldClauseSat = max(oldClauseSat, d_oldVal != d_valTrue[c][i]);
clauseSat = max(clauseSat, d_newVal != d_valTrue[c][i]);
}
}
} else {
int valMatched = d_interpretation[predId][dbIndex] == d_valTrue[c][i];
if(negated == 0) {
oldClauseSat = max(oldClauseSat, valMatched);
clauseSat = max(clauseSat, valMatched);
} else {
oldClauseSat = max(oldClauseSat, !valMatched);
clauseSat = max(clauseSat, !valMatched);
}
}
}
sat = min(sat, clauseSat);
oldSat = min(oldSat, oldClauseSat);
}
d_satArray[idx] = sat - oldSat;
}
}
/*extern "C"
__global__ void evalClauseWithoutDbKernel(int totalVars, int totalPreds, int *d_varDomainSizes,
int *d_predicates, int *d_predBaseIdx, int *d_valTrue, int *d_predVarMat,
int *d_satArray, int **d_interpretation, long totalGroundings)
{
int dbIndex[5];
long idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < totalGroundings && d_satArray[idx] == 1)
{
//long baseDbIndex = idx * totalPreds;
for(int i = 0; i < totalPreds; i++)
dbIndex[i] = d_predBaseIdx[i];
//d_dbIndex[baseDbIndex + i] = d_predBaseIdx[i];
long n = idx;
for(int i = totalVars-1; i >= 0; i--)
{
int domainSize = d_varDomainSizes[i];
long temp = n / domainSize;
int val = n - temp * domainSize;
n = temp;
int basePredVarMatIndex = i * totalPreds;
for(int j = 0; j < totalPreds; j++)
dbIndex[j] += d_predVarMat[basePredVarMatIndex + j] * val;
//d_dbIndex[baseDbIndex + j] += d_predVarMat[basePredVarMatIndex + j] * val;
}
int sat = 0;
for(int i = 0; i < totalPreds; i++)
{
int predId = d_predicates[i];
long interpretationIdx = dbIndex[i];
sat = max(sat, d_interpretation[predId][interpretationIdx] == d_valTrue[i]);
}
d_satArray[idx] = sat;
}
}*/
|
18,866 | #include<stdio.h>
#include<cuda.h>
__global__ void sumRandC(int* A, int* B, int m, int n, int p, int q, int k)
{
int id=blockIdx.x*blockDim.x + threadIdx.x,idx;
if(id<((m*n)/k))
{
for(int i=0;i<k;i++)
{
idx = id+i*((m*n)/k);
B[idx+(idx/n)] = A[idx];
atomicAdd(&B[(((idx/n)+1)*n)+(idx/n)],A[idx]); // Adds elements to the row end
atomicAdd(&B[(m*n)+m+(idx%n)],A[idx]); // Adds elements to the column end
if(idx==0)
B[p*q-1] = INT_MAX;
}
}
}
__global__ void findMIn( int* A, int* B, int m, int n, int p, int q, int k)
{
int id=blockIdx.x*blockDim.x + threadIdx.x,idx;
if(id<((m*n)/k))
{
for(int i=0;i<k;i++)
{
idx=id+i*((m*n)/k);
atomicMin(&B[p*q-1],B[(((idx/n)+1)*n)+(idx/n)]); // Checks minimum of row end elements
atomicMin(&B[p*q-1],B[(m*n)+m+(idx%n)]); // Checks minimum of column end elements
}
}
}
__global__ void updateMin( int* A, int* B, int m, int n, int p, int q, int k)
{
int id=blockIdx.x*blockDim.x + threadIdx.x,idx;
if(id<((m*n)/k))
{
for(int i=0;i<k;i++)
{
idx = id+i*((m*n)/k)+((id+i*((m*n)/k))/n);
if(idx%q!=n && idx/q!=m)
{
atomicAdd(&B[idx],B[p*q-1]); // Adds minimum to all the elements not in the last row and column
}
}
}
}
int main()
{
int M,N,k;
scanf( "%d %d %d", &M,&N,&k);
int *matrix,*matrix1, *hmatrix,*h1matrix;
cudaMalloc(&matrix, (M) * (N) * sizeof(int));
cudaMalloc(&matrix1, (M+1) * (N+1) * sizeof(int));
hmatrix = (int *)malloc(M * N * sizeof(int));
h1matrix = (int *)malloc((M+1) * (N+1) * sizeof(int));
for (int ii = 0; ii < M; ++ii)
{
for (int jj = 0; jj < N; ++jj)
{
scanf("%d",&hmatrix[ii*N+jj]);
}
}
cudaMemcpy(matrix, hmatrix, M * N * sizeof(int), cudaMemcpyHostToDevice);
sumRandC<<<ceil((float)(M*N)/(k*1024)),1024>>>(matrix,matrix1,M,N,M+1,N+1,k);
findMIn<<<ceil((float)(M*N)/(k*1024)),1024>>>(matrix,matrix1,M,N,M+1,N+1,k);
updateMin<<<ceil((float)(M*N)/(k*1024)),1024>>>(matrix,matrix1,M,N,M+1,N+1,k);
cudaDeviceSynchronize();
cudaMemcpy(h1matrix, matrix1, (M+1) * (N+1) * sizeof(int), cudaMemcpyDeviceToHost);
for (int ii = 0; ii < M+1; ++ii)
{
for (int jj = 0; jj < N+1; ++jj)
{
printf("%d ",h1matrix[ii*(N+1)+jj]);
}
printf("\n");
}
return 0;
}
|
18,867 | /*
* ECE 5720 Parallel Computing final project
* Substring matching with CUDA
* Shicong Li sl3295
* Siyu Liu sl3282
* Cornell University
*
* Compile : /usr/local/cuda-10.1/bin/nvcc -arch=compute_52 -o KMP_cuda KMP_cuda.cu
* Run : ./KMP_cuda
*/
#include "cuda_profiler_api.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#define n_s 1e9
#define n_p 4
#define M 10000
#define N 8
#define BILLION 1E9L
__global__ void match(char *dev_s, char *dev_p, int *dev_lps, uint *dev_res_map)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
// calculate the start point and end point
int start = i * n_s / (M * N);
int end = (i + 1) * n_s / (M * N) + n_p - 1;
// local variable for KMP matching
int id_p = 0;
int id_s = start;
while(id_s < end) {
if(dev_s[id_s] == dev_p[id_p]) {
id_s++;
id_p++;
}
if(id_p == n_p) {
int idx = id_s - id_p;
dev_res_map[idx / 32] |= 1 << (idx % 32);
id_p = dev_lps[id_p - 1];
}
else if(id_s < end && dev_s[id_s] != dev_p[id_p]) {
if(id_p != 0) id_p = dev_lps[id_p - 1];
else id_s++;
}
}
}
void computeLPS(char* p, int* lps, int n) {
// Initialization of lps array
int len = 0;
lps[0] = 0;
int id = 1;
while(id < n) {
// record and move forward the pointer if character are identical
if(p[id] == p[len]) {
len++;
lps[id] = len;
id++;
}
// If not, move the id pointer backward and compare again
else {
if(len != 0) len = lps[len - 1];
else {
lps[id] = 0;
id++;
}
}
}
}
int main() {
char *s, *p, *dev_s, *dev_p;
int *lps, *dev_lps;
uint *res_map, *dev_res_map;
s = (char *) malloc((n_s + n_p - 1) * sizeof(char));
p = (char *) malloc(n_p * sizeof(char));
lps = (int *) malloc(n_p * sizeof(int));
res_map = (uint *) calloc((n_s/32), sizeof(uint));
FILE * fptr = fopen( "../data_5.txt" , "r");
fgets(s, n_s + 1, fptr);
fgets(p, n_p + 1, fptr);
fclose(fptr);
for(int i = n_s; i < n_s + n_p - 1; i++) s[i] = p[1] + 1;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaMalloc( (void**)&dev_s, (n_s + n_p - 1)*sizeof(char));
cudaMalloc( (void**)&dev_p, n_p*sizeof(char));
cudaMalloc( (void**)&dev_lps, n_p*sizeof(int));
cudaMalloc( (void**)&dev_res_map, (n_s/32)*sizeof(uint));
computeLPS(p, lps, n_p);
cudaMemcpy(dev_s, s, (n_s + n_p - 1)*sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(dev_p, p, n_p*sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(dev_lps, lps, n_p*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_res_map, res_map, (n_s/32)*sizeof(uint), cudaMemcpyHostToDevice);
match<<<M, N>>>(dev_s, dev_p, dev_lps, dev_res_map);
cudaMemcpy(res_map, dev_res_map, (n_s/32)*sizeof(uint), cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Total time is %lf\n", milliseconds);
cudaFree(dev_s); cudaFree(dev_p); cudaFree(dev_lps); cudaFree(dev_res_map);
free(s); free(p); free(lps); free(res_map);
} |
18,868 | #include <stdio.h>
#include <cuda.h>
#include <time.h>
__global__ void add_vectors(float *a, float *b, float *c, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
c[idx] = a[idx] + b[idx];
}
}
int main(void) {
float *a_h, *a_d, *b_h, *b_d, *c_h, *c_d;
const int N = 10;
size_t size = N * sizeof(float);
a_h = (float *)malloc(size);
b_h = (float *)malloc(size);
c_h = (float *)malloc(size);
srand(time(NULL));
cudaMalloc((void **) &a_d, size);
cudaMalloc((void **) &b_d, size);
cudaMalloc((void **) &c_d, size);
for (int i=0; i<N; i++) {
a_h[i] = rand() / (float)RAND_MAX;
b_h[i] = rand() / (float)RAND_MAX;
}
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice);
int block_size = 4;
int n_blocks = N/block_size + (N % block_size == 0 ? 0 : 1);
add_vectors<<<n_blocks, block_size>>> (a_d, b_d, c_d, N);
cudaMemcpy(c_h, c_d, sizeof(float)*N, cudaMemcpyDeviceToHost);
for (int i=0; i<N; i++) {
printf("%d\t%f\t%f\t= %f\n", i, a_h[i], b_h[i], c_h[i]);
}
free(a_h);
free(b_h);
free(c_h);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
} |
18,869 | #include <stdio.h>
void CPUFunction() {
printf("Hello world from the CPU.\n");
}
__global__ void GPUFunction() {
printf("Hello world from the GPU.\n");
}
int main() {
// function to run on the cpu
CPUFunction();
// function to run on the gpu
GPUFunction<<<1, 1>>>();
// kernel execution is asynchronous so sync on its completion
cudaDeviceSynchronize();
}
|
18,870 | /*
============================================================================
Filename : algorithm.c
Author : Arthur Vernet, Simon Maulini
SCIPER : 245828, 248115
============================================================================
*/
#include <iostream>
#include <iomanip>
#include <sys/time.h>
#include <cuda_runtime.h>
using namespace std;
// CPU Baseline
void array_process(double *input, double *output, int length, int iterations)
{
double *temp;
for(int n=0; n<(int) iterations; n++)
{
for(int i=1; i<length-1; i++)
{
for(int j=1; j<length-1; j++)
{
output[(i)*(length)+(j)] = (input[(i-1)*(length)+(j-1)] +
input[(i-1)*(length)+(j)] +
input[(i-1)*(length)+(j+1)] +
input[(i)*(length)+(j-1)] +
input[(i)*(length)+(j)] +
input[(i)*(length)+(j+1)] +
input[(i+1)*(length)+(j-1)] +
input[(i+1)*(length)+(j)] +
input[(i+1)*(length)+(j+1)] ) / 9;
}
}
output[(length/2-1)*length+(length/2-1)] = 1000;
output[(length/2)*length+(length/2-1)] = 1000;
output[(length/2-1)*length+(length/2)] = 1000;
output[(length/2)*length+(length/2)] = 1000;
temp = input;
input = output;
output = temp;
}
}
__global__ void kernel_row(double *input, double *output, int length) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int i = y * length + x;
//check if the coordinates are out of bounds or corresponding to the heat core
if(x >= length || y >= length || x == 0 || x == length - 1 || (y == length / 2 || y == length / 2 - 1)
&& (x == length / 2 - 1 || x == length / 2))
return;
output[i] = input[i];
output[i] += input[i - 1];
output[i] += input[i + 1];
}
__global__ void kernel_column(double *input, double *output, int length) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int i = y * length + x;
//check if the coordinates are out of bounds or corresponding to the heat core
if(x >= length || y >= length || y == 0 || y == length - 1 || (y == length / 2 || y == length / 2 - 1)
&& (x == length / 2 - 1 || x == length / 2))
return;
output[i] = input[i];
output[i] += input[i - length];
output[i] += input[i + length];
output[i] /= 9; //divide by 9 as this kernel is called the last
}
// GPU Optimized function
void GPU_array_process(double *input, double *output, int length, int iterations)
{
//Cuda events for calculating elapsed time
cudaEvent_t cpy_H2D_start, cpy_H2D_end, comp_start, comp_end, cpy_D2H_start, cpy_D2H_end;
cudaEventCreate(&cpy_H2D_start);
cudaEventCreate(&cpy_H2D_end);
cudaEventCreate(&cpy_D2H_start);
cudaEventCreate(&cpy_D2H_end);
cudaEventCreate(&comp_start);
cudaEventCreate(&comp_end);
/* Preprocessing goes here */
cudaSetDevice(0);
size_t size = length*length*sizeof(double);
double* input_data;
double* output_data;
dim3 threadPerBlocks(32, 32);
dim3 blocks(4, 4);
// allocate array on device
if (cudaMalloc((void **) &input_data, size) != cudaSuccess)
cout << "error in cudaMalloc" << endl;
if (cudaMalloc((void **) &output_data, size) != cudaSuccess)
cout << "error in cudaMalloc" << endl;
cudaEventRecord(cpy_H2D_start);
/* Copying array from host to device goes here */
if (cudaMemcpy(input_data, input, size, cudaMemcpyHostToDevice) != cudaSuccess)
cout << "error in cudaMemcpy" << endl;
cudaEventRecord(cpy_H2D_end);
cudaEventSynchronize(cpy_H2D_end);
cudaEventRecord(comp_start);
/* GPU calculation goes here */
for(int i = 0; i < iterations; ++i) {
kernel_row <<< blocks, threadPerBlocks >>> (input_data, output_data, length);
kernel_column <<< blocks, threadPerBlocks >>> (output_data, input_data, length);
cudaThreadSynchronize(); //synchronize at every iterations, works as a barrier
}
cudaEventRecord(comp_end);
cudaEventSynchronize(comp_end);
cudaEventRecord(cpy_D2H_start);
/* Copying array from device to host goes here */
if(cudaMemcpy(output, output_data, size, cudaMemcpyDeviceToHost) != cudaSuccess)
cout << "Cuda Memcpy DeviceToHost Error: cannot copy output\n";
cudaEventRecord(cpy_D2H_end);
cudaEventSynchronize(cpy_D2H_end);
/* Postprocessing goes here */
cudaFree(input_data);
cudaFree(output_data);
float time;
cudaEventElapsedTime(&time, cpy_H2D_start, cpy_H2D_end);
cout<<"Host to Device MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl;
cudaEventElapsedTime(&time, comp_start, comp_end);
cout<<"Computation takes "<<setprecision(4)<<time/1000<<"s"<<endl;
cudaEventElapsedTime(&time, cpy_D2H_start, cpy_D2H_end);
cout<<"Device to Host MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl;
}
|
18,871 | #include <stdio.h>
#include <cuda.h>
#include <assert.h>
#define N 2//64
__device__ int* bar(int* p) {
return p;
}
__global__ void foo(int* p) {
int* q = bar(p);
q[threadIdx.x] = 0;
//printf(" %d; ", q[threadIdx.x]);
}
|
18,872 | #include <stdio.h>
int main()
{
int count = 0;
if (cudaSuccess != cudaGetDeviceCount(&count)){return -1;}
if (count == 0) {return -1;}
for (int device = 0; device < count; ++device)
{
cudaDeviceProp prop;
if (cudaSuccess != cudaGetDeviceProperties(&prop, device)){ continue;}
printf("%d.%d ", prop.major, prop.minor);
}
return 0;
}
|
18,873 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <iostream>
int main(){
// H has storage for 4 integers
thrust::host_vector<int> H(1000);
for(int c = 0; c < 1000; c++){
H[c] = 1000-c;
}
for(int c = 0; c < 10; c++){
std::cout << H[c] << "\n";
}
// Copy host_vector H to device_vector D
//thrust::device_vector<int> D = H;
thrust::sort(H.begin(),H.end());
//thrust::sort(D.begin(),D.end());
//H = D;
for(int c = 0; c < 10; c++){
std::cout << H[c] << "\n";
}
return 0;
} |
18,874 | #include "includes.h"
//=============================================================================
// FILE: mytoy.cu
// AUTHORS: Raul Segura & Manuel Ujaldon (copyright 2014)
// Look for the string "MU" whenever Manuel suggests you to introduce changes
// Feel free to change some other parts of the code too (at your own risk)
//=============================================================================
//=============================================================================
// CUDA functions.
//=============================================================================
//Error handler for CUDA functions.
__global__ void kernelAdd(float *dvalues, int numOperations, int firstInd, int nextColInd)
{
int vi = firstInd + blockIdx.x * blockDim.x + threadIdx.x;
// "numOperations" is the 2nd input parameter to our executable
if (vi < nextColInd) {
for (int j=0; j<numOperations; ++j) {
// The operation performed on each nonzero of our sparse matrix:
dvalues[vi] *=dvalues[vi]+dvalues[vi]*dvalues[vi]; // POINT 3: Choices you may try here:
} // *= (for multiply), /= (for division),
} // or you may investigate some other :-)
} |
18,875 | #include "kernel.cuh"
#define N 5
__global__ void gpuSquareKernel(float* d_in, float* d_out)
{
int tid = threadIdx.x;
float temp = d_in[tid];
d_out[tid] = temp * temp;
}
void gpuSquare(float* h_in, float* h_out)
{
float *d_in, *d_out;
cudaMalloc((void**)&d_in, N * sizeof(float));
cudaMalloc((void**)&d_out, N * sizeof(float));
cudaMemcpy(d_in, h_in, N * sizeof(float), cudaMemcpyHostToDevice);
gpuSquareKernel << <1, N >> > (d_in, d_out);
cudaMemcpy(h_out, d_out, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
}
|
18,876 | // based on https://gist.github.com/dpiponi/1502434
#include <stdio.h>
#define N 256 // 0x1d710 // 65536 // 4096 //1024
#define h2d(h,d,n) cudaMemcpy(d,h,sizeof(int)*n, cudaMemcpyHostToDevice)
#define d2h(d,h,n) cudaMemcpy(h,d,sizeof(int)*n, cudaMemcpyDeviceToHost)
#define I(n) for(int i=0;i<n;++i)
__global__
void add(int *a, int *b) { int i = blockIdx.x; if (i<N) { b[i] = 2*a[i]; }}
int main() {
// arrays on the cpu: h:'host'
int ha[N], hb[N];
// arrays on the gpu d:'device'
int *da, *db;
cudaMalloc((void **)&da, N*sizeof(int));
cudaMalloc((void **)&db, N*sizeof(int));
// ha: !N
I(N) ha[i] = i;
h2d(ha, da, N);
add<<<N, 1>>>(da, db);
d2h(db, hb, N);
for(int i=0; i<N;){ printf("%6x ", hb[i]); if(!(++i&15))printf("\n"); }
printf("\n");
cudaFree(da);
cudaFree(db);
printf("hello from CUDA!\n");
int dc; cudaGetDeviceCount(&dc);
printf("Device count: %d\n", dc);
return 0;
}
|
18,877 | #include <stdio.h>
#include <string.h>
const int N = 8;
const int BLOCKSIZE = 8;
const int GRIDSIZE = 1;
// ---------------------------------------------- KERNELS ---------------------------------------------------------------
__global__ void gpu_inclusive_scan (int *in, int *out)
{
extern __shared__ int cache[];
int tid = threadIdx.x;
int offset = 1;
// Load the input into shared memory
cache[2*tid] = in[2*tid];
cache[2*tid+1] = in[2*tid+1];
__syncthreads();
// Build sum in place up the tree
for (int d = N >> 1; d > 0; d >>= 1)
{
__syncthreads();
if (tid < d)
{
int ai = offset*(2*tid+1)-1;
int bi = offset*(2*tid+2)-1;
cache[bi] += cache[ai];
}
offset *= 2;
}
// Clear the last element
if (tid == 0)
{
cache[N-1] = 0;
}
// Transverse down and build scan
for (int d = 1; d < N; d *= 2)
{
offset >>= 1;
__syncthreads();
if (tid < d)
{
int ai = offset*(2*tid+1)-1;
int bi = offset*(2*tid+2)-1;
int aux = cache[ai];
cache[ai] = cache[bi];
cache[bi] += aux;
}
}
__syncthreads();
// Write results to output
out[2*tid] = cache[2*tid];
out[2*tid+1] = cache[2*tid+1];
}
// ---------------------------------------------------------------------------------------------------------------------------
// -------------------------------------------------- CPU Functions ----------------------------------------------------------
void print (int *v)
{
for (int i = 0; i < N; i++)
printf("%d ",v[i]);
printf("\n\n");
}
void generate (int *v)
{
for (int i = 0; i < N; i++)
v[i] = i+1;
/*
v[0] = 13;
v[1] = 7;
v[2] = 16;
v[3] = 21;
v[4] = 8;
v[5] = 20;
v[6] = 13;
v[7] = 12;
*/
}
void Usage (char pName[])
{
printf("============================================\n");
printf("Usage:> %s \n",pName);
printf("============================================\n");
}
// ---------------------------------------------------------------------------------------------------------------------------
// ------------------------------------------------ MAIN FUNCTION ------------------------------------------------------------
int main (int argc, char *argv[])
{
if (argc-1 != 0)
{
Usage(argv[0]);
exit(1);
}
// Declare and allocate memory for the host and device structures
int *h_in, *h_out;
int *d_in, *d_out;
size_t sizeIn = N*sizeof(int);
size_t sizeOut = N*sizeof(int);
h_in = (int*)malloc(sizeIn); generate(h_in); print(h_in);
cudaMalloc(&d_in,sizeIn);
cudaMemcpy(d_in,h_in,sizeIn,cudaMemcpyHostToDevice);
h_out = (int*)malloc(sizeOut);
cudaMalloc(&d_out,sizeOut);
dim3 gridSize(1,1);
dim3 blockSize(BLOCKSIZE,1);
size_t sharedMem = sizeof(int)*BLOCKSIZE*2;
// Call reduce kernel
gpu_inclusive_scan<<<gridSize,blockSize,sharedMem>>>(d_in,d_out);
cudaMemcpy(h_out,d_out,sizeOut,cudaMemcpyDeviceToHost);
// Print the result
print(h_out);
free(h_in); free(h_out);
cudaFree(d_in); cudaFree(d_out);
return 0;
} |
18,878 | //nvcc -ptx electron_transport.cu -ccbin "F:Visual Studio\VC\Tools\MSVC\14.12.25827\bin\Hostx64\x64"
#include "curand_kernel.h"
__device__ void EM1(double *x,
double *y,
double *z,
double *vx,
double *vy,
double *vz,
double *sig_elastic,
double *sig_ionization,
double *sig_excitation,
double *energy,
double *xInterp,
double *yInterp,
double *V_interp,
double *density,
double *temperature,
double *dphi,
double *kExc,
double *Aexc,
double *omegaExc,
double *vExc,
double *gammaExc,
bool *secondaryParticle,
double *secondary_vx,
double *secondary_vy,
double *secondary_vz,
double *particleSamplingOut,
double *ion1_vx,
double *ion1_vy,
double *ion1_vz,
int *sum_cell0,
const int lenExc,
const int cellnumxy,
const double dr,
const double dz,
const double dt,
const int parNum) {
int globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
int localThreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
int threadsPerBlock = blockDim.x*blockDim.y;
int n = localThreadIdx + globalBlockIndex*threadsPerBlock;
if ( n >= parNum ){
return;
}
double e,c,m,pi,E_e,m_ion,kb;
e = 1.6022e-19;
c = 3e+08;
m = 9.109e-31;
m_ion = 4.65e-26; // mass of one N2
kb = 1.38064853e-23;
E_e = m*c*c / e;
pi = 3.1415926;
double p_elastic,p_ionization,p_excitation,p_total,a,b,p1,p2,rand,a0,b0,vInterp,E_p,v_p,ratio;
double r,phi,v_e,v,dx,vx0,vy0,vz0,theta_ES,phi_ES,DCS_A,PDF,intPDFmax,intPDF1,intPDF2,gamma,T;
int i,k,index_r,index_z,index_phi,index;
double I,T_0,T_a,T_b,t_b,T_s,t_s,t,T_m,Rn,k_1,k_2,k0,v_s;
double E_1,E_2,E_s,phi_p,phi_s,sin_theta_p,theta_p,sin_theta_s,theta_s;
I = 15.6; //Ionization threshold, unit:eV
double q0,aExc,bExc,cExc,E_dep;
v_e = sqrt( vx[n]*vx[n] + vy[n]*vy[n] + vz[n]*vz[n] );
dx = v_e*dt;
r = sqrt(x[n]*x[n]+y[n]*y[n]);
phi = atan(y[n]/x[n]);
index_z = floor(z[n]/dz);
index_r = floor(r/dr);
index_phi = floor(phi/dphi[index_r]);
index = index_z*cellnumxy + sum_cell0[index_r] + index_phi;
p_elastic = dx * density[index] * sig_elastic[n];
p_ionization = dx * density[index] * sig_ionization[n];
p_excitation = dx * density[index] * sig_excitation[n];
p_total = p_elastic + p_ionization + p_excitation;
a = p_elastic/p_total;
b = a + p_ionization/p_total;
curandState state;
curand_init((unsigned long long)clock(),0,n, & state);
p1=curand_uniform_double(&state);
p2=curand_uniform_double(&state);
if(p1< p_total){
if(p2 < a){
//elastic
i = 0;
while( energy[n] > xInterp[i+1] ){
i++;
}
a0 = (xInterp[i+1]-energy[n])/(xInterp[i+1]-xInterp[i]);
b0 = (energy[n]-xInterp[i])/(xInterp[i+1]-xInterp[i]);
double TCS_A = (a0*yInterp[i]+b0*yInterp[i+1])*2;
intPDFmax = 0;
for( k = 0; k<=180; k++ ){
vInterp = a0*V_interp[i*181+k] + b0*V_interp[(i+1)*181+k];
DCS_A = exp(vInterp);
DCS_A = 2*pi*sin(k*pi/180)*DCS_A*2;
PDF = 1/TCS_A * DCS_A;
intPDFmax = intPDFmax + PDF* pi/180;
}
rand = curand_uniform_double(&state) * intPDFmax;
intPDF1 = 0;
k = 0;
while( rand > intPDF1 && k<181 ){
vInterp = a0*V_interp[i*181+k] + b0*V_interp[(i+1)*181+k];
DCS_A = exp(vInterp);
DCS_A = 2*pi*sin(k*pi/180)*DCS_A*2;
PDF = 1/TCS_A * DCS_A;
intPDF1 = intPDF1 + PDF* pi/180;
k++;
}
vInterp = a0*V_interp[i*181+k] + b0*V_interp[(i+1)*181+k];
DCS_A = exp(vInterp);
DCS_A = 2*pi*sin(k*pi/180)*DCS_A*2;
PDF = 1/TCS_A * DCS_A;
intPDF2 = intPDF1 + PDF* pi/180;
a0 = (intPDF2 - rand)/(intPDF2 - intPDF1);
b0 = (rand - intPDF1)/(intPDF2 - intPDF1);
theta_ES = ( a0*(k-1)+b0*k )*pi/180;
phi_ES = 2* pi* curand_uniform_double(&state);
v = sqrt( vx[n]*vx[n] + vy[n]*vy[n] + vz[n]*vz[n] );
if( v*v - vz[n]*vz[n] > 0.0001){
vx0 = vx[n]*cos(theta_ES) + sin(theta_ES)/sqrt(v*v-vz[n]*vz[n])*( vx[n]*vz[n]*cos(phi_ES) - v*vy[n]* sin(phi_ES) );
vy0 = vy[n]*cos(theta_ES) + sin(theta_ES)/sqrt(v*v-vz[n]*vz[n])*( vy[n]*vz[n]*cos(phi_ES) + v*vx[n]* sin(phi_ES) );
vz0 = vz[n]*cos(theta_ES) - sqrt( v*v - vz[n]*vz[n] )*sin(theta_ES)*cos(phi_ES);
}
else{
vx0 = v*sin(theta_ES)*cos(phi_ES);
vy0 = v*sin(theta_ES)*sin(phi_ES);
vz0 = v*cos(theta_ES);
}
vx[n] = vx0;
vy[n] = vy0;
vz[n] = vz0;
}
else if(p2<b && energy[n]>I/1000){
//ionization
T = energy[n] * 1000; //unit: eV
T_a = 1000;
T_b = 2*I;
t_b = I;
T_s = 4.17;
t_s = 13.8;
T_0 = T_s - T_a / (T+T_b);
t = t_s*T/(T+t_b);
T_m = (T-I)/2;
Rn = curand_uniform_double(&state);
k_1 = atan((T_m-T_0)/t);
k_2 = atan(T_0/t);
k0 = T_0 + t * tan( Rn*(k_1+k_2) - k_2 );
E_1 = k0;
E_2 = T - E_1 - I;
if(E_2>E_1){
E_p = E_2;
E_s = E_1;
}
else{
E_p = E_1;
E_s = E_2;
}
E_p = E_p * e;
E_s = E_s * e;
phi_p = 2*pi* curand_uniform_double(&state);
phi_s = phi_p - pi;
sin_theta_p = sqrt( (k0/T) / ((1-k0/T)*T/(2*E_e)+1) );
theta_p = asin(sin_theta_p);
sin_theta_s = sqrt( (1-k0/T)/(1+k0/(2*E_e)) );
theta_s = asin(sin_theta_s);
gamma = 1 + E_p/(m*c*c);
v_p = c*sqrt(1 - 1/(gamma*gamma));
ratio = v_p/v_e;
vx[n] = ratio*vx[n];
vy[n] = ratio*vy[n];
vz[n] = ratio*vz[n];
v = sqrt( vx[n]*vx[n] + vy[n]*vy[n] + vz[n]*vz[n] );
if( v*v - vz[n]*vz[n] > 0.0001){
vx0 = vx[n]*cos(theta_p) + sin(theta_p)/sqrt(v*v-vz[n]*vz[n])*( vx[n]*vz[n]*cos(phi_p) - v*vy[n]* sin(phi_p) );
vy0 = vy[n]*cos(theta_p) + sin(theta_p)/sqrt(v*v-vz[n]*vz[n])*( vy[n]*vz[n]*cos(phi_p) + v*vx[n]* sin(phi_p) );
vz0 = vz[n]*cos(theta_p) - sqrt( v*v - vz[n]*vz[n] )*sin(theta_p)*cos(phi_p);
}
else{
vx0 = v*sin(theta_p)*cos(phi_p);
vy0 = v*sin(theta_p)*sin(phi_p);
vz0 = v*cos(theta_p);
}
vx[n] = vx0;
vy[n] = vy0;
vz[n] = vz0;
//secondary e
gamma = 1 + E_s/(m*c*c);
v_s = c*sqrt(1 - 1/(gamma*gamma));
secondaryParticle[n] = 1;
secondary_vx[n] = v_s*sin(theta_s)*cos(phi_s);
secondary_vy[n] = v_s*sin(theta_s)*sin(phi_s);
secondary_vz[n] = v_s*cos(theta_s);
particleSamplingOut[n] = index;
ion1_vx[n] = curand_normal(&state)/sqrt(m_ion/(kb*temperature[index]));
ion1_vy[n] = curand_normal(&state)/sqrt(m_ion/(kb*temperature[index]));
ion1_vz[n] = curand_normal(&state)/sqrt(m_ion/(kb*temperature[index]));
}
else{
//excitation
q0 = 6.514e-14;
T = energy[n] * 1000; //unit: eV
k_1 = 0;
k_2 = 0;
for(i=0; i<lenExc; i++){
aExc = q0 * Aexc[i]/(kExc[i]*kExc[i]);
bExc = powf( kExc[i]/T, omegaExc[i] );
cExc = powf( powf( 1-kExc[i]/T, gammaExc[i] ), vExc[i]);
k_1 = k_1 + aExc*bExc*cExc*kExc[i];
k_2 = k_2 + aExc*bExc*cExc;
}
E_dep = (k_1/k_2); //unit: eV
E_p = T - E_dep; //unit: eV
gamma = 1 + E_p*e/(m*c*c);
v_p = c*sqrt(1 - 1/(gamma*gamma));
ratio = v_p/v_e;
vx[n] = ratio*vx[n];
vy[n] = ratio*vy[n];
vz[n] = ratio*vz[n];
}
}
}
__global__ void processMandelbrotElement(
double *x,
double *y,
double *z,
double *vx,
double *vy,
double *vz,
double *sig_elastic,
double *sig_ionization,
double *sig_excitation,
double *energy,
double *xInterp,
double *yInterp,
double *V_interp,
double *density,
double *temperature,
double *dphi,
double *kExc,
double *Aexc,
double *omegaExc,
double *vExc,
double *gammaExc,
bool *secondaryParticle,
double *secondary_vx,
double *secondary_vy,
double *secondary_vz,
double *particleSamplingOut,
double *ion1_vx,
double *ion1_vy,
double *ion1_vz,
int *sum_cell0,
const int lenExc,
const int cellnumxy,
const double dr,
const double dz,
const double dt,
const int parNum) {
EM1(x,y,z,vx,vy,vz,sig_elastic,sig_ionization,sig_excitation,energy,
xInterp,yInterp,V_interp,density,temperature,dphi,kExc,Aexc,omegaExc,vExc,
gammaExc,secondaryParticle,secondary_vx,secondary_vy,secondary_vz,particleSamplingOut,ion1_vx,ion1_vy,ion1_vz,
sum_cell0,lenExc,cellnumxy,dr,dz,dt,parNum);
}
|
18,879 | #include<cmath>
#include<cstdio>
//#define BLOCKSIZE 1
__global__
void dotproduct(int* A,int*B,int*C,int M,int N,int K)
{
printf("%d %d\n", A[0],A[1]);
printf("%d %d\n", B[0],B[1]);
printf("%d %d\n", C[0],C[1]);
int I=blockIdx.x*blockDim.x+threadIdx.x;
int J=blockIdx.y*blockDim.y+threadIdx.y;
int temp =0;
if( I < M || J < N){
//what is Bkj=B[k*N+J];
//int temp =0;
for(int k=0;k<K;k++){
temp +=A[I*K+k]*B[k*N+J];
}
}
C[I*N+J]=temp;
//}
printf("%d\n", C[I*N+J]);
//}
}
int main(){
//int *A=(iny )
int A[2]={1,2};
int B[2]={1,1};
int C[2]={0,0};
int* d_A;int* d_B;int* d_C;
//int* A;int* B;int* C;
int M=2;
int N=2;
int K=2;
//allocating space for variables on device
cudaMalloc(&d_A, M *sizeof(int));//let memory store that m*n space for you of size ints
cudaMalloc(&d_B, M *sizeof(int));
cudaMalloc(&d_C, sizeof(int));
/* //alocate space for variables on the host
cudaMalloc(&A, M *N*sizeof(int));//let memory store that m*n space for you of size ints
cudaMalloc(&B, M *N*sizeof(int));
cudaMalloc(&C, M *N*sizeof(int));
*/
//copy Aand B FROM HOST TO DEVICE
cudaMemcpy(d_A, &A[0],M *sizeof(int) , cudaMemcpyHostToDevice);
cudaMemcpy(d_B, &B[0],M *sizeof(int) , cudaMemcpyHostToDevice);
cudaMemcpy(d_C, &C[0],sizeof(int) , cudaMemcpyHostToDevice);
dotproduct<<<1,1>>>(d_A,d_B,d_C,M,N,K );
//COPY RESULT BACK TO HOST
cudaMemcpy(&C[0], d_C, sizeof(int), cudaMemcpyDeviceToHost);
//printf("%d", C[0]);
cudaFree(A);//TO FREE MEMORY
cudaFree(B);
cudaFree(C);
}
|
18,880 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <cassert>
using namespace std;
__global__ void global_reduce_kernel(int* d_out, int* d_in, int size)
{ //indices
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// do reduction in global mem
for (unsigned int cap = blockDim.x / 2; cap > 0; cap >>= 1)
{
//only compute if on lower portion of block
if (tid < cap)
{
//if thread out of range or threads comp out of range, do nothing
if(myId >= size || myId + cap >=size){
//do nothing
}
else{
// store minimum only between two valid elements in lower portion
d_in[myId] = min(d_in[myId], d_in[myId + cap]);
}
}
//wait for all threads to complete
__syncthreads();
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = d_in[myId];
}
}
void reduce(int* d_out, int* d_intermediate, int* d_in, int size)
{
/*int threads_num, numProcs;
cudaDeviceGetAttribute(&threads_num, cudaDevAttrMaxThreadsPerMultiProcessor, 0);
printf("max threads per mp: %d\n", threads_num);
cudaDeviceGetAttribute(&numProcs,cudaDevAttrMultiProcessorCount, 0);
printf("mp count: %d\n", numProcs);*/
const int maxThreadsPerBlock = 512;
int threads = maxThreadsPerBlock;
//ceiling of blocks required
int blocks = (size / maxThreadsPerBlock)+1;
global_reduce_kernel<<<blocks, threads >>>(d_intermediate, d_in, size);
// now we're down to one block left, so reduce it
threads = blocks; // launch one thread for each block in prev step
blocks = 1;
// set threads to multiple of two greater than or equal to size
int mult = 1;
while (mult < threads) mult *= 2;
//launch kernel with multiple of 2 threads, and size equal to number of valid entries
global_reduce_kernel<<<blocks, mult >>>(d_out, d_intermediate, threads);
}
__global__ void global_parity(int* d_out, int* d_in, int size) {
//indices
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
//idiomatic code from nvidia help
for (int i = index; i < size; i += stride) {
d_out[i] = d_in[i] % 10;
}
}
void parity(int* d_out, int* d_in, int size) {
const int maxThreadsPerBlock = 512;
int threads = maxThreadsPerBlock;
//ceiling of blocks required
int blocks = (size / maxThreadsPerBlock) + 1;
//run kernel
global_parity <<<blocks, threads>>> (d_out, d_in, size);
//wait for all threads to synch
cudaDeviceSynchronize();
}
int main() {
vector<int> arr;
string line;
ifstream myfile("inp.txt");
if (myfile.is_open())
{
//gets next int
while (getline(myfile, line, ','))
{
arr.push_back(stoi(line, nullptr));
}
myfile.close();
}
else cout << "Unable to open file";
//timing stuff
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//allocated device memory
int *d_arr, *d_out, *d_intermediate;
cudaMalloc((void**)&d_arr, arr.size() * sizeof(int));
cudaMalloc((void**)&d_out, sizeof(int));
cudaMalloc((void**)&d_intermediate, arr.size() * sizeof(int));
// treat pointer to start of vector as array pointer
cudaMemcpy(d_arr, &arr[0], arr.size() * sizeof(int), cudaMemcpyHostToDevice);
//run reduce operation
cudaEventRecord(start, 0);
reduce(d_out, d_intermediate, d_arr, arr.size());
cudaEventRecord(stop, 0);
//wait for it to finish
cudaDeviceSynchronize();
//store answer on host
int ans;
cudaMemcpy(&ans, d_out, sizeof(int), cudaMemcpyDeviceToHost);
//find time
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
//output to file
ofstream outfile("q1a.txt");
if (outfile.is_open())
{
outfile << ans;
outfile.close();
}
//else cout << "Unable to open file";
//print stuff
//cout << "minimum entry found: " << ans << endl;
//cout << "elapsted reduce time: " << elapsedTime << endl;
//free device memory
cudaFree(d_arr);
cudaFree(d_intermediate);
cudaFree(d_out);
//////////////////////////////////***PARITY CODE***///////////////////////////////////////
//allocate device memory
int* d_arrb, * d_outb;
cudaMalloc((void**)&d_arrb, arr.size() * sizeof(int));
cudaMalloc((void**)&d_outb, arr.size() * sizeof(int));
//copy array A to device Memory
cudaMemcpy(d_arrb, &arr[0], arr.size() * sizeof(int), cudaMemcpyHostToDevice);
//cuda events
cudaEvent_t b1, b2;
cudaEventCreate(&b1);
cudaEventCreate(&b2);
//run parity
cudaEventRecord(b1, 0);
parity(d_outb, d_arrb, arr.size());
cudaEventRecord(b2, 0);
//calc time
float b_time;
cudaEventElapsedTime(&b_time, b1, b2);
//store answer on host
int* ans_arr = (int*)malloc(sizeof(int) * arr.size());
cudaMemcpy(ans_arr, d_outb, sizeof(int) * arr.size(), cudaMemcpyDeviceToHost);
//validate
/*for (int i = 0; i < arr.size(); i++) {
assert(arr[i] % 10 == ans_arr[i]);
}*/
//output to file
ofstream outfile2("q1b.txt");
if (outfile2.is_open())
{
//avoid comma at end of string
outfile2 << ans_arr[0];
for (int i = 1; i < arr.size(); i++) {
outfile2 << "," << ans_arr[i];
}
outfile2.close();
}
//else cout << "Unable to open file";
//time taken output
//cout << "Parity time taken: " << b_time << endl;
cudaFree(d_arrb);
cudaFree(d_outb);
free(ans_arr);
return 0;
}
|
18,881 | #include <stdio.h>
struct StringData {
char str[11];
};
unsigned int *devDataInput;
StringData *devStringDataOutput;
unsigned int dataCount;
template< typename T >
void check(T result, char const *const func, const char *const file, int const line)
{
if (result)
{
fprintf(stderr, "CUDA error at %s:%d code=%d(%s) \"%s\" \n",
file, line, static_cast<unsigned int>(result), cudaGetErrorString(result), func);
// Make sure we call CUDA Device Reset before exiting
cudaDeviceReset();
exit(EXIT_FAILURE);
}
}
#define checkCudaErrors(val) check ( (val), #val, __FILE__, __LINE__ )
bool cdInit(unsigned int dataCountArg, void **hostInputMemory, void **hostOutputMemory, bool allocPinnedMemory)
{
dataCount = dataCountArg;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaSetDevice error: %d\r\n", (int)cudaStatus);
return false;
}
if(allocPinnedMemory)
{
checkCudaErrors(cudaHostAlloc((void **)hostInputMemory, dataCount * sizeof(unsigned int), cudaHostAllocWriteCombined));
checkCudaErrors(cudaHostAlloc((void **)hostOutputMemory, dataCount * sizeof(StringData), 0));
}
else
{
*hostInputMemory = (void *)malloc(dataCount * sizeof(unsigned int));
*hostOutputMemory = (void *)malloc(dataCount * sizeof(StringData));
}
cudaStatus = cudaMalloc((void**)&devDataInput, dataCount * sizeof(unsigned int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc error: %d\r\n", (int)cudaStatus);
return false;
}
cudaStatus = cudaMalloc((void**)&devStringDataOutput, dataCount * sizeof(StringData));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc error: %d\r\n", (int)cudaStatus);
return false;
}
return true;
}
__device__ void uintToStringDevice(unsigned int x, char *output)
{
const int bufSize = 11;
char buf[bufSize];
buf[bufSize - 1] = 0;
int ind = bufSize - 2;
do {
buf[ind] = '0' + x % 10;
x /= 10;
--ind;
} while (x != 0);
++ind;
int i;
for(i = 0; buf[ind] != 0; ++i, ++ind)
{
output[i] = buf[ind];
}
output[i] = 0;
}
__global__ void cdItoaDevice(unsigned int *dataInput, StringData *stringDataOutput)
{
int threadIndex = blockIdx.x * blockDim.x + threadIdx.x;
uintToStringDevice(dataInput[threadIndex], stringDataOutput[threadIndex].str);
}
bool cdItoa(unsigned int *dataInput, StringData *stringDataOutput)
{
cudaError_t cudaStatus=cudaSuccess;
cudaStatus = cudaMemcpy(devDataInput, dataInput, dataCount * sizeof(unsigned int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy error: %d\r\n", (int)cudaStatus);
return false;
}
int numberOfBlocks = 1024;
int threadsPerBlock = dataCount / numberOfBlocks;
cdItoaDevice<<<numberOfBlocks, threadsPerBlock>>>(devDataInput, devStringDataOutput);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cuda error: %s\r\n", cudaGetErrorString(cudaStatus));
return false;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaDeviceSynchronize error: %d %s\r\n", (int)cudaStatus, cudaGetErrorString(cudaStatus));
return false;
}
cudaStatus = cudaMemcpy(stringDataOutput, devStringDataOutput, dataCount * sizeof(StringData), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy error: %d\r\n", (int)cudaStatus);
return false;
}
return true;
}
|
18,882 | /*
* Author:
* Yixin Li, Email: liyixin@mit.edu
* convert the image from RGB to LAB
*/
__global__ void rgb_to_lab( double * img, const int nPts) {
// getting the index of the pixel
const int t = threadIdx.x + blockIdx.x * blockDim.x;
if (t>=nPts) return;
double sR = img[3*t];
double sG = img[3*t+1];
double sB = img[3*t+2];
if (sR!=sR || sG!=sG || sB!=sB) return;
//RGB (D65 illuninant assumption) to XYZ conversion
double R = sR/255.0;
double G = sG/255.0;
double B = sB/255.0;
double r, g, b;
if(R <= 0.04045) r = R/12.92;
else r = pow((R+0.055)/1.055,2.4);
if(G <= 0.04045) g = G/12.92;
else g = pow((G+0.055)/1.055,2.4);
if(B <= 0.04045) b = B/12.92;
else b = pow((B+0.055)/1.055,2.4);
double X = r*0.4124564 + g*0.3575761 + b*0.1804375;
double Y = r*0.2126729 + g*0.7151522 + b*0.0721750;
double Z = r*0.0193339 + g*0.1191920 + b*0.9503041;
//convert from XYZ to LAB
double epsilon = 0.008856; //actual CIE standard
double kappa = 903.3; //actual CIE standard
double Xr = 0.950456; //reference white
double Yr = 1.0; //reference white
double Zr = 1.088754; //reference white
double xr = X/Xr;
double yr = Y/Yr;
double zr = Z/Zr;
double fx, fy, fz;
if(xr > epsilon) fx = pow(xr, 1.0/3.0);
else fx = (kappa*xr + 16.0)/116.0;
if(yr > epsilon) fy = pow(yr, 1.0/3.0);
else fy = (kappa*yr + 16.0)/116.0;
if(zr > epsilon) fz = pow(zr, 1.0/3.0);
else fz = (kappa*zr + 16.0)/116.0;
double lval = 116.0*fy-16.0;
double aval = 500.0*(fx-fy);
double bval = 200.0*(fy-fz);
img[3*t] = lval;
img[3*t+1] = aval;
img[3*t+2] = bval;
} |
18,883 | #include <cuda.h>
#include <stdio.h>
__global__ void gTest(float* a) {
a[threadIdx.x + blockDim.x * blockIdx.x]
= (float) (threadIdx.x + blockDim.x * blockIdx.x);
}
__global__ void gSGEVV(float* a, float* b, float* c) {
c[threadIdx.x + blockDim.x * blockIdx.x] =
a[threadIdx.x + blockDim.x * blockIdx.x] +
b[threadIdx.x + blockDim.x * blockIdx.x];
}
__global__ void gInitArray(float* a, float x) {
a[threadIdx.x + blockDim.x * blockIdx.x] = x;
}
int main(int argc, char *argv[]) {
float *a, *d;
int num_of_blocks = (argc > 1) ? atoi(argv[1]) : 32;
int threads_per_block = (argc > 2) ? atoi(argv[2]) : 512;
int N = num_of_blocks * threads_per_block;
int size_array = N;
cudaMalloc((void**) &a, size_array * sizeof(float));
d = (float*) calloc(size_array, sizeof(float));
gInitArray<<<dim3(num_of_blocks), dim3(threads_per_block)>>>(a, 1.0);
cudaDeviceSynchronize();
cudaMemcpy(d, a, size_array * sizeof(float), cudaMemcpyDeviceToHost);
printf("Blocks: %d\nThreads: %d\n", num_of_blocks, threads_per_block);
printf("Size array: %d\n", size_array);
free(d);
cudaFree(a);
return 0;
}
|
18,884 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void __launch_bounds__ (128,2) sw4_a (double * uacc_0, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * __restrict__ strx, double * __restrict__ stry, double * __restrict__ strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
/* Total 687 flops */
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 3
for (int k=2; k<=N-3; k+=2) {
double a_mux1;
double _t_0_;
double a_mux2;
double _t_1_;
double a_mux3;
double _t_2_;
double a_mux4;
double _t_3_;
double a_muy1;
double _t_4_;
double a_muy2;
double _t_5_;
double a_muy3;
double _t_6_;
double a_muy4;
double _t_7_;
double a_muz1;
double _t_8_;
double a_muz2;
double _t_9_;
double a_muz3;
double _t_10_;
double a_muz4;
double _t_11_;
double _t_14_;
double _t_16_;
double _t_15_;
double _t_13_;
double _t_17_;
double _t_19_;
double _t_18_;
double _t_20_;
double _t_22_;
double _t_21_;
double _t_23_;
double _t_25_;
double _t_24_;
double _t_12_;
double _t_27_;
double _t_26_;
double _t_28_;
double _t_29_;
double _t_30_;
double _t_32_;
double _t_31_;
double _t_33_;
double _t_34_;
double _t_35_;
double r1;
double _t_39_;
double _t_37_;
double _t_40_;
double _t_41_;
double _t_38_;
double _t_43_;
double _t_44_;
double _t_42_;
double _t_46_;
double _t_47_;
double _t_45_;
double _t_48_;
double _t_49_;
double _t_36_;
double _t_52_;
double _t_50_;
double _t_53_;
double _t_54_;
double _t_51_;
double _t_56_;
double _t_57_;
double _t_55_;
double _t_59_;
double _t_60_;
double _t_58_;
double _t_61_;
double _t_62_;
double _t_65_;
double _t_63_;
double _t_66_;
double _t_67_;
double _t_64_;
double _t_69_;
double _t_70_;
double _t_68_;
double _t_72_;
double _t_73_;
double _t_71_;
double _t_74_;
double _t_75_;
double _t_78_;
double _t_76_;
double _t_79_;
double _t_80_;
double _t_77_;
double _t_82_;
double _t_83_;
double _t_81_;
double _t_85_;
double _t_86_;
double _t_84_;
double _t_87_;
double _t_88_;
double uacc_0kc0jc0ic0 = uacc_0[k*N*N+j*N+i];
double b_mux1;
double _t_89_;
double b_mux2;
double _t_90_;
double b_mux3;
double _t_91_;
double b_mux4;
double _t_92_;
double b_muy1;
double _t_93_;
double b_muy2;
double _t_94_;
double b_muy3;
double _t_95_;
double b_muy4;
double _t_96_;
double b_muz1;
double _t_97_;
double b_muz2;
double _t_98_;
double b_muz3;
double _t_99_;
double b_muz4;
double _t_100_;
double _t_103_;
double _t_105_;
double _t_104_;
double _t_102_;
double _t_106_;
double _t_108_;
double _t_107_;
double _t_109_;
double _t_111_;
double _t_110_;
double _t_112_;
double _t_114_;
double _t_113_;
double _t_101_;
double _t_116_;
double _t_115_;
double _t_117_;
double _t_118_;
double _t_119_;
double _t_121_;
double _t_120_;
double _t_122_;
double _t_123_;
double _t_124_;
double r2;
double _t_128_;
double _t_126_;
double _t_129_;
double _t_130_;
double _t_127_;
double _t_132_;
double _t_133_;
double _t_131_;
double _t_135_;
double _t_136_;
double _t_134_;
double _t_137_;
double _t_138_;
double _t_125_;
double _t_141_;
double _t_139_;
double _t_142_;
double _t_143_;
double _t_140_;
double _t_145_;
double _t_146_;
double _t_144_;
double _t_148_;
double _t_149_;
double _t_147_;
double _t_150_;
double _t_151_;
double _t_154_;
double _t_152_;
double _t_155_;
double _t_156_;
double _t_153_;
double _t_158_;
double _t_159_;
double _t_157_;
double _t_161_;
double _t_162_;
double _t_160_;
double _t_163_;
double _t_164_;
double _t_167_;
double _t_165_;
double _t_168_;
double _t_169_;
double _t_166_;
double _t_171_;
double _t_172_;
double _t_170_;
double _t_174_;
double _t_175_;
double _t_173_;
double _t_176_;
double _t_177_;
double uacc_0kp1jc0ic0 = uacc_0[(k+1)*N*N+j*N+i];
a_mux1 = mu[k][j][i-1] * strx[i-1];
_t_0_ = mu[k][j][i] * strx[i];
_t_0_ += mu[k][j][i-2] * strx[i-2];
a_mux1 -= 3.0 / 4.0 * _t_0_;
a_mux2 = mu[k][j][i-2] * strx[i-2];
a_mux2 += mu[k][j][i+1] * strx[i+1];
_t_1_ = mu[k][j][i] * strx[i];
_t_1_ += mu[k][j][i-1] * strx[i-1];
a_mux2 += 3.0 * _t_1_;
a_mux3 = mu[k][j][i-1] * strx[i-1];
a_mux3 += mu[k][j][i+2] * strx[i+2];
_t_2_ = mu[k][j][i+1] * strx[i+1];
_t_2_ += mu[k][j][i] * strx[i];
a_mux3 += 3.0 * _t_2_;
a_mux4 = mu[k][j][i+1] * strx[i+1];
_t_3_ = mu[k][j][i] * strx[i];
_t_3_ += mu[k][j][i+2] * strx[i+2];
a_mux4 -= 3.0 / 4.0 * _t_3_;
a_muy1 = mu[k][j-1][i] * stry[j-1];
_t_4_ = mu[k][j][i] * stry[j];
_t_4_ += mu[k][j-2][i] * stry[j-2];
a_muy1 -= 3.0 / 4.0 * _t_4_;
a_muy2 = mu[k][j-2][i] * stry[j-2];
a_muy2 += mu[k][j+1][i] * stry[j+1];
_t_5_ = mu[k][j][i] * stry[j];
_t_5_ += mu[k][j-1][i] * stry[j-1];
a_muy2 += 3.0 * _t_5_;
a_muy3 = mu[k][j-1][i] * stry[j-1];
a_muy3 += mu[k][j+2][i] * stry[j+2];
_t_6_ = mu[k][j+1][i] * stry[j+1];
_t_6_ += mu[k][j][i] * stry[j];
a_muy3 += 3.0 * _t_6_;
a_muy4 = mu[k][j+1][i] * stry[j+1];
_t_7_ = mu[k][j][i] * stry[j];
_t_7_ += mu[k][j+2][i] * stry[j+2];
a_muy4 -= 3.0 / 4.0 * _t_7_;
a_muz1 = mu[k-1][j][i] * strz[k-1];
_t_8_ = mu[k][j][i] * strz[k];
_t_8_ += mu[k-2][j][i] * strz[k-2];
a_muz1 -= 3.0 / 4.0 * _t_8_;
a_muz2 = mu[k-2][j][i] * strz[k-2];
a_muz2 += mu[k+1][j][i] * strz[k+1];
_t_9_ = mu[k][j][i] * strz[k];
_t_9_ += mu[k-1][j][i] * strz[k-1];
a_muz2 += 3.0 * _t_9_;
a_muz3 = mu[k-1][j][i] * strz[k-1];
a_muz3 += mu[k+2][j][i] * strz[k+2];
_t_10_ = mu[k+1][j][i] * strz[k+1];
_t_10_ += mu[k][j][i] * strz[k];
a_muz3 += 3.0 * _t_10_;
a_muz4 = mu[k+1][j][i] * strz[k+1];
_t_11_ = mu[k][j][i] * strz[k];
_t_11_ += mu[k+2][j][i] * strz[k+2];
a_muz4 -= 3.0 / 4.0 * _t_11_;
_t_14_ = 2.0 * a_mux1;
_t_14_ += la[k][j][i-1] * strx[i-1];
_t_16_ = la[k][j][i] * strx[i];
_t_16_ += la[k][j][i-2] * strx[i-2];
_t_14_ -= 3.0 / 4.0 * _t_16_;
_t_15_ = u_0[k][j][i-2];
_t_15_ -= u_0[k][j][i];
_t_13_ = _t_14_ * _t_15_;
_t_17_ = 2.0 * a_mux2;
_t_17_ += la[k][j][i-2] * strx[i-2];
_t_17_ += la[k][j][i+1] * strx[i+1];
_t_19_ = la[k][j][i] * strx[i];
_t_19_ += la[k][j][i-1] * strx[i-1];
_t_17_ += 3.0 * _t_19_;
_t_18_ = u_0[k][j][i-1];
_t_18_ -= u_0[k][j][i];
_t_13_ += _t_17_ * _t_18_;
_t_20_ = 2.0 * a_mux3;
_t_20_ += la[k][j][i-1] * strx[i-1];
_t_20_ += la[k][j][i+2] * strx[i+2];
_t_22_ = la[k][j][i+1] * strx[i+1];
_t_22_ += la[k][j][i] * strx[i];
_t_20_ += 3.0 * _t_22_;
_t_21_ = u_0[k][j][i+1];
_t_21_ -= u_0[k][j][i];
_t_13_ += _t_20_ * _t_21_;
_t_23_ = 2.0 * a_mux4;
_t_23_ += la[k][j][i+1] * strx[i+1];
_t_25_ = la[k][j][i] * strx[i];
_t_25_ += la[k][j][i+2] * strx[i+2];
_t_23_ -= 3.0 / 4.0 * _t_25_;
_t_24_ = u_0[k][j][i+2];
_t_24_ -= u_0[k][j][i];
_t_13_ += _t_23_ * _t_24_;
_t_12_ = strx[i] * _t_13_;
_t_27_ = u_0[k][j-2][i];
_t_27_ -= u_0[k][j][i];
_t_26_ = a_muy1 * _t_27_;
_t_28_ = u_0[k][j-1][i];
_t_28_ -= u_0[k][j][i];
_t_26_ += a_muy2 * _t_28_;
_t_29_ = u_0[k][j+1][i];
_t_29_ -= u_0[k][j][i];
_t_26_ += a_muy3 * _t_29_;
_t_30_ = u_0[k][j+2][i];
_t_30_ -= u_0[k][j][i];
_t_26_ += a_muy4 * _t_30_;
_t_12_ += stry[j] * _t_26_;
_t_32_ = u_0[k-2][j][i];
_t_32_ -= u_0[k][j][i];
_t_31_ = a_muz1 * _t_32_;
_t_33_ = u_0[k-1][j][i];
_t_33_ -= u_0[k][j][i];
_t_31_ += a_muz2 * _t_33_;
_t_34_ = u_0[k+1][j][i];
_t_34_ -= u_0[k][j][i];
_t_31_ += a_muz3 * _t_34_;
_t_35_ = u_0[k+2][j][i];
_t_35_ -= u_0[k][j][i];
_t_31_ += a_muz4 * _t_35_;
_t_12_ += strz[k] * _t_31_;
r1 = 1.0 / 6.0 * _t_12_;
_t_39_ = strx[i] * stry[j];
_t_37_ = _t_39_ * 1.0 / 144.0;
_t_40_ = u_1[k][j-2][i-2];
_t_40_ -= u_1[k][j+2][i-2];
_t_41_ = -u_1[k][j-1][i-2];
_t_41_ += u_1[k][j+1][i-2];
_t_40_ += 8.0 * _t_41_;
_t_38_ = la[k][j][i-2] * _t_40_;
_t_43_ = u_1[k][j-2][i-1];
_t_43_ -= u_1[k][j+2][i-1];
_t_44_ = -u_1[k][j-1][i-1];
_t_44_ += u_1[k][j+1][i-1];
_t_43_ += 8.0 * _t_44_;
_t_42_ = la[k][j][i-1] * _t_43_;
_t_38_ -= 8.0 * _t_42_;
_t_46_ = u_1[k][j-2][i+1];
_t_46_ -= u_1[k][j+2][i+1];
_t_47_ = -u_1[k][j-1][i+1];
_t_47_ += u_1[k][j+1][i+1];
_t_46_ += 8.0 * _t_47_;
_t_45_ = la[k][j][i+1] * _t_46_;
_t_38_ += 8.0 * _t_45_;
_t_48_ = u_1[k][j-2][i+2];
_t_48_ -= u_1[k][j+2][i+2];
_t_49_ = -u_1[k][j-1][i+2];
_t_49_ += u_1[k][j+1][i+2];
_t_48_ += 8.0 * _t_49_;
_t_38_ -= la[k][j][i+2] * _t_48_;
_t_36_ = _t_37_ * _t_38_;
_t_52_ = strx[i] * strz[k];
_t_50_ = _t_52_ * 1.0 / 144.0;
_t_53_ = u_2[k-2][j][i-2];
_t_53_ -= u_2[k+2][j][i-2];
_t_54_ = -u_2[k-1][j][i-2];
_t_54_ += u_2[k+1][j][i-2];
_t_53_ += 8.0 * _t_54_;
_t_51_ = la[k][j][i-2] * _t_53_;
_t_56_ = u_2[k-2][j][i-1];
_t_56_ -= u_2[k+2][j][i-1];
_t_57_ = -u_2[k-1][j][i-1];
_t_57_ += u_2[k+1][j][i-1];
_t_56_ += 8.0 * _t_57_;
_t_55_ = la[k][j][i-1] * _t_56_;
_t_51_ -= 8.0 * _t_55_;
_t_59_ = u_2[k-2][j][i+1];
_t_59_ -= u_2[k+2][j][i+1];
_t_60_ = -u_2[k-1][j][i+1];
_t_60_ += u_2[k+1][j][i+1];
_t_59_ += 8.0 * _t_60_;
_t_58_ = la[k][j][i+1] * _t_59_;
_t_51_ += 8.0 * _t_58_;
_t_61_ = u_2[k-2][j][i+2];
_t_61_ -= u_2[k+2][j][i+2];
_t_62_ = -u_2[k-1][j][i+2];
_t_62_ += u_2[k+1][j][i+2];
_t_61_ += 8.0 * _t_62_;
_t_51_ -= la[k][j][i+2] * _t_61_;
_t_36_ += _t_50_ * _t_51_;
_t_65_ = strx[i] * stry[j];
_t_63_ = _t_65_ * 1.0 / 144.0;
_t_66_ = u_1[k][j-2][i-2];
_t_66_ -= u_1[k][j-2][i+2];
_t_67_ = -u_1[k][j-2][i-1];
_t_67_ += u_1[k][j-2][i+1];
_t_66_ += 8.0 * _t_67_;
_t_64_ = mu[k][j-2][i] * _t_66_;
_t_69_ = u_1[k][j-1][i-2];
_t_69_ -= u_1[k][j-1][i+2];
_t_70_ = -u_1[k][j-1][i-1];
_t_70_ += u_1[k][j-1][i+1];
_t_69_ += 8.0 * _t_70_;
_t_68_ = mu[k][j-1][i] * _t_69_;
_t_64_ -= 8.0 * _t_68_;
_t_72_ = u_1[k][j+1][i-2];
_t_72_ -= u_1[k][j+1][i+2];
_t_73_ = -u_1[k][j+1][i-1];
_t_73_ += u_1[k][j+1][i+1];
_t_72_ += 8.0 * _t_73_;
_t_71_ = mu[k][j+1][i] * _t_72_;
_t_64_ += 8.0 * _t_71_;
_t_74_ = u_1[k][j+2][i-2];
_t_74_ -= u_1[k][j+2][i+2];
_t_75_ = -u_1[k][j+2][i-1];
_t_75_ += u_1[k][j+2][i+1];
_t_74_ += 8.0 * _t_75_;
_t_64_ -= mu[k][j+2][i] * _t_74_;
_t_36_ += _t_63_ * _t_64_;
_t_78_ = strx[i] * strz[k];
_t_76_ = _t_78_ * 1.0 / 144.0;
_t_79_ = u_2[k-2][j][i-2];
_t_79_ -= u_2[k-2][j][i+2];
_t_80_ = -u_2[k-2][j][i-1];
_t_80_ += u_2[k-2][j][i+1];
_t_79_ += 8.0 * _t_80_;
_t_77_ = mu[k-2][j][i] * _t_79_;
_t_82_ = u_2[k-1][j][i-2];
_t_82_ -= u_2[k-1][j][i+2];
_t_83_ = -u_2[k-1][j][i-1];
_t_83_ += u_2[k-1][j][i+1];
_t_82_ += 8.0 * _t_83_;
_t_81_ = mu[k-1][j][i] * _t_82_;
_t_77_ -= 8.0 * _t_81_;
_t_85_ = u_2[k+1][j][i-2];
_t_85_ -= u_2[k+1][j][i+2];
_t_86_ = -u_2[k+1][j][i-1];
_t_86_ += u_2[k+1][j][i+1];
_t_85_ += 8.0 * _t_86_;
_t_84_ = mu[k+1][j][i] * _t_85_;
_t_77_ += 8.0 * _t_84_;
_t_87_ = u_2[k+2][j][i-2];
_t_87_ -= u_2[k+2][j][i+2];
_t_88_ = -u_2[k+2][j][i-1];
_t_88_ += u_2[k+2][j][i+1];
_t_87_ += 8.0 * _t_88_;
_t_77_ -= mu[k+2][j][i] * _t_87_;
_t_36_ += _t_76_ * _t_77_;
r1 += _t_36_;
uacc_0kc0jc0ic0 = a1 * uacc_0kc0jc0ic0;
uacc_0kc0jc0ic0 += cof * r1;
uacc_0[k*N*N+j*N+i] = uacc_0kc0jc0ic0;
b_mux1 = mu[k+1][j][i-1] * strx[i-1];
_t_89_ = mu[k+1][j][i] * strx[i];
_t_89_ += mu[k+1][j][i-2] * strx[i-2];
b_mux1 -= 3.0 / 4.0 * _t_89_;
b_mux2 = mu[k+1][j][i-2] * strx[i-2];
b_mux2 += mu[k+1][j][i+1] * strx[i+1];
_t_90_ = mu[k+1][j][i] * strx[i];
_t_90_ += mu[k+1][j][i-1] * strx[i-1];
b_mux2 += 3.0 * _t_90_;
b_mux3 = mu[k+1][j][i-1] * strx[i-1];
b_mux3 += mu[k+1][j][i+2] * strx[i+2];
_t_91_ = mu[k+1][j][i+1] * strx[i+1];
_t_91_ += mu[k+1][j][i] * strx[i];
b_mux3 += 3.0 * _t_91_;
b_mux4 = mu[k+1][j][i+1] * strx[i+1];
_t_92_ = mu[k+1][j][i] * strx[i];
_t_92_ += mu[k+1][j][i+2] * strx[i+2];
b_mux4 -= 3.0 / 4.0 * _t_92_;
b_muy1 = mu[k+1][j-1][i] * stry[j-1];
_t_93_ = mu[k+1][j][i] * stry[j];
_t_93_ += mu[k+1][j-2][i] * stry[j-2];
b_muy1 -= 3.0 / 4.0 * _t_93_;
b_muy2 = mu[k+1][j-2][i] * stry[j-2];
b_muy2 += mu[k+1][j+1][i] * stry[j+1];
_t_94_ = mu[k+1][j][i] * stry[j];
_t_94_ += mu[k+1][j-1][i] * stry[j-1];
b_muy2 += 3.0 * _t_94_;
b_muy3 = mu[k+1][j-1][i] * stry[j-1];
b_muy3 += mu[k+1][j+2][i] * stry[j+2];
_t_95_ = mu[k+1][j+1][i] * stry[j+1];
_t_95_ += mu[k+1][j][i] * stry[j];
b_muy3 += 3.0 * _t_95_;
b_muy4 = mu[k+1][j+1][i] * stry[j+1];
_t_96_ = mu[k+1][j][i] * stry[j];
_t_96_ += mu[k+1][j+2][i] * stry[j+2];
b_muy4 -= 3.0 / 4.0 * _t_96_;
b_muz1 = mu[k][j][i] * strz[k];
_t_97_ = mu[k+1][j][i] * strz[k+1];
_t_97_ += mu[k-1][j][i] * strz[k-1];
b_muz1 -= 3.0 / 4.0 * _t_97_;
b_muz2 = mu[k-1][j][i] * strz[k-1];
b_muz2 += mu[k+2][j][i] * strz[k+2];
_t_98_ = mu[k+1][j][i] * strz[k+1];
_t_98_ += mu[k][j][i] * strz[k];
b_muz2 += 3.0 * _t_98_;
b_muz3 = mu[k][j][i] * strz[k];
b_muz3 += mu[k+3][j][i] * strz[k+3];
_t_99_ = mu[k+2][j][i] * strz[k+2];
_t_99_ += mu[k+1][j][i] * strz[k+1];
b_muz3 += 3.0 * _t_99_;
b_muz4 = mu[k+2][j][i] * strz[k+2];
_t_100_ = mu[k+1][j][i] * strz[k+1];
_t_100_ += mu[k+3][j][i] * strz[k+3];
b_muz4 -= 3.0 / 4.0 * _t_100_;
_t_103_ = 2.0 * b_mux1;
_t_103_ += la[k+1][j][i-1] * strx[i-1];
_t_105_ = la[k+1][j][i] * strx[i];
_t_105_ += la[k+1][j][i-2] * strx[i-2];
_t_103_ -= 3.0 / 4.0 * _t_105_;
_t_104_ = u_0[k+1][j][i-2];
_t_104_ -= u_0[k+1][j][i];
_t_102_ = _t_103_ * _t_104_;
_t_106_ = 2.0 * b_mux2;
_t_106_ += la[k+1][j][i-2] * strx[i-2];
_t_106_ += la[k+1][j][i+1] * strx[i+1];
_t_108_ = la[k+1][j][i] * strx[i];
_t_108_ += la[k+1][j][i-1] * strx[i-1];
_t_106_ += 3.0 * _t_108_;
_t_107_ = u_0[k+1][j][i-1];
_t_107_ -= u_0[k+1][j][i];
_t_102_ += _t_106_ * _t_107_;
_t_109_ = 2.0 * b_mux3;
_t_109_ += la[k+1][j][i-1] * strx[i-1];
_t_109_ += la[k+1][j][i+2] * strx[i+2];
_t_111_ = la[k+1][j][i+1] * strx[i+1];
_t_111_ += la[k+1][j][i] * strx[i];
_t_109_ += 3.0 * _t_111_;
_t_110_ = u_0[k+1][j][i+1];
_t_110_ -= u_0[k+1][j][i];
_t_102_ += _t_109_ * _t_110_;
_t_112_ = 2.0 * b_mux4;
_t_112_ += la[k+1][j][i+1] * strx[i+1];
_t_114_ = la[k+1][j][i] * strx[i];
_t_114_ += la[k+1][j][i+2] * strx[i+2];
_t_112_ -= 3.0 / 4.0 * _t_114_;
_t_113_ = u_0[k+1][j][i+2];
_t_113_ -= u_0[k+1][j][i];
_t_102_ += _t_112_ * _t_113_;
_t_101_ = strx[i] * _t_102_;
_t_116_ = u_0[k+1][j-2][i];
_t_116_ -= u_0[k+1][j][i];
_t_115_ = b_muy1 * _t_116_;
_t_117_ = u_0[k+1][j-1][i];
_t_117_ -= u_0[k+1][j][i];
_t_115_ += b_muy2 * _t_117_;
_t_118_ = u_0[k+1][j+1][i];
_t_118_ -= u_0[k+1][j][i];
_t_115_ += b_muy3 * _t_118_;
_t_119_ = u_0[k+1][j+2][i];
_t_119_ -= u_0[k+1][j][i];
_t_115_ += b_muy4 * _t_119_;
_t_101_ += stry[j] * _t_115_;
_t_121_ = u_0[k-1][j][i];
_t_121_ -= u_0[k+1][j][i];
_t_120_ = b_muz1 * _t_121_;
_t_122_ = u_0[k][j][i];
_t_122_ -= u_0[k+1][j][i];
_t_120_ += b_muz2 * _t_122_;
_t_123_ = u_0[k+2][j][i];
_t_123_ -= u_0[k+1][j][i];
_t_120_ += b_muz3 * _t_123_;
_t_124_ = u_0[k+3][j][i];
_t_124_ -= u_0[k+1][j][i];
_t_120_ += b_muz4 * _t_124_;
_t_101_ += strz[k+1] * _t_120_;
r2 = 1.0 / 6.0 * _t_101_;
_t_128_ = strx[i] * stry[j];
_t_126_ = _t_128_ * 1.0 / 144.0;
_t_129_ = u_1[k+1][j-2][i-2];
_t_129_ -= u_1[k+1][j+2][i-2];
_t_130_ = -u_1[k+1][j-1][i-2];
_t_130_ += u_1[k+1][j+1][i-2];
_t_129_ += 8.0 * _t_130_;
_t_127_ = la[k+1][j][i-2] * _t_129_;
_t_132_ = u_1[k+1][j-2][i-1];
_t_132_ -= u_1[k+1][j+2][i-1];
_t_133_ = -u_1[k+1][j-1][i-1];
_t_133_ += u_1[k+1][j+1][i-1];
_t_132_ += 8.0 * _t_133_;
_t_131_ = la[k+1][j][i-1] * _t_132_;
_t_127_ -= 8.0 * _t_131_;
_t_135_ = u_1[k+1][j-2][i+1];
_t_135_ -= u_1[k+1][j+2][i+1];
_t_136_ = -u_1[k+1][j-1][i+1];
_t_136_ += u_1[k+1][j+1][i+1];
_t_135_ += 8.0 * _t_136_;
_t_134_ = la[k+1][j][i+1] * _t_135_;
_t_127_ += 8.0 * _t_134_;
_t_137_ = u_1[k+1][j-2][i+2];
_t_137_ -= u_1[k+1][j+2][i+2];
_t_138_ = -u_1[k+1][j-1][i+2];
_t_138_ += u_1[k+1][j+1][i+2];
_t_137_ += 8.0 * _t_138_;
_t_127_ -= la[k+1][j][i+2] * _t_137_;
_t_125_ = _t_126_ * _t_127_;
_t_141_ = strx[i] * strz[k+1];
_t_139_ = _t_141_ * 1.0 / 144.0;
_t_142_ = u_2[k-1][j][i-2];
_t_142_ -= u_2[k+3][j][i-2];
_t_143_ = -u_2[k][j][i-2];
_t_143_ += u_2[k+2][j][i-2];
_t_142_ += 8.0 * _t_143_;
_t_140_ = la[k+1][j][i-2] * _t_142_;
_t_145_ = u_2[k-1][j][i-1];
_t_145_ -= u_2[k+3][j][i-1];
_t_146_ = -u_2[k][j][i-1];
_t_146_ += u_2[k+2][j][i-1];
_t_145_ += 8.0 * _t_146_;
_t_144_ = la[k+1][j][i-1] * _t_145_;
_t_140_ -= 8.0 * _t_144_;
_t_148_ = u_2[k-1][j][i+1];
_t_148_ -= u_2[k+3][j][i+1];
_t_149_ = -u_2[k][j][i+1];
_t_149_ += u_2[k+2][j][i+1];
_t_148_ += 8.0 * _t_149_;
_t_147_ = la[k+1][j][i+1] * _t_148_;
_t_140_ += 8.0 * _t_147_;
_t_150_ = u_2[k-1][j][i+2];
_t_150_ -= u_2[k+3][j][i+2];
_t_151_ = -u_2[k][j][i+2];
_t_151_ += u_2[k+2][j][i+2];
_t_150_ += 8.0 * _t_151_;
_t_140_ -= la[k+1][j][i+2] * _t_150_;
_t_125_ += _t_139_ * _t_140_;
_t_154_ = strx[i] * stry[j];
_t_152_ = _t_154_ * 1.0 / 144.0;
_t_155_ = u_1[k+1][j-2][i-2];
_t_155_ -= u_1[k+1][j-2][i+2];
_t_156_ = -u_1[k+1][j-2][i-1];
_t_156_ += u_1[k+1][j-2][i+1];
_t_155_ += 8.0 * _t_156_;
_t_153_ = mu[k+1][j-2][i] * _t_155_;
_t_158_ = u_1[k+1][j-1][i-2];
_t_158_ -= u_1[k+1][j-1][i+2];
_t_159_ = -u_1[k+1][j-1][i-1];
_t_159_ += u_1[k+1][j-1][i+1];
_t_158_ += 8.0 * _t_159_;
_t_157_ = mu[k+1][j-1][i] * _t_158_;
_t_153_ -= 8.0 * _t_157_;
_t_161_ = u_1[k+1][j+1][i-2];
_t_161_ -= u_1[k+1][j+1][i+2];
_t_162_ = -u_1[k+1][j+1][i-1];
_t_162_ += u_1[k+1][j+1][i+1];
_t_161_ += 8.0 * _t_162_;
_t_160_ = mu[k+1][j+1][i] * _t_161_;
_t_153_ += 8.0 * _t_160_;
_t_163_ = u_1[k+1][j+2][i-2];
_t_163_ -= u_1[k+1][j+2][i+2];
_t_164_ = -u_1[k+1][j+2][i-1];
_t_164_ += u_1[k+1][j+2][i+1];
_t_163_ += 8.0 * _t_164_;
_t_153_ -= mu[k+1][j+2][i] * _t_163_;
_t_125_ += _t_152_ * _t_153_;
_t_167_ = strx[i] * strz[k+1];
_t_165_ = _t_167_ * 1.0 / 144.0;
_t_168_ = u_2[k-1][j][i-2];
_t_168_ -= u_2[k-1][j][i+2];
_t_169_ = -u_2[k-1][j][i-1];
_t_169_ += u_2[k-1][j][i+1];
_t_168_ += 8.0 * _t_169_;
_t_166_ = mu[k-1][j][i] * _t_168_;
_t_171_ = u_2[k][j][i-2];
_t_171_ -= u_2[k][j][i+2];
_t_172_ = -u_2[k][j][i-1];
_t_172_ += u_2[k][j][i+1];
_t_171_ += 8.0 * _t_172_;
_t_170_ = mu[k][j][i] * _t_171_;
_t_166_ -= 8.0 * _t_170_;
_t_174_ = u_2[k+2][j][i-2];
_t_174_ -= u_2[k+2][j][i+2];
_t_175_ = -u_2[k+2][j][i-1];
_t_175_ += u_2[k+2][j][i+1];
_t_174_ += 8.0 * _t_175_;
_t_173_ = mu[k+2][j][i] * _t_174_;
_t_166_ += 8.0 * _t_173_;
_t_176_ = u_2[k+3][j][i-2];
_t_176_ -= u_2[k+3][j][i+2];
_t_177_ = -u_2[k+3][j][i-1];
_t_177_ += u_2[k+3][j][i+1];
_t_176_ += 8.0 * _t_177_;
_t_166_ -= mu[k+3][j][i] * _t_176_;
_t_125_ += _t_165_ * _t_166_;
r2 += _t_125_;
uacc_0kp1jc0ic0 = a1 * uacc_0kp1jc0ic0;
uacc_0kp1jc0ic0 += cof * r2;
uacc_0[(k+1)*N*N+j*N+i] = uacc_0kp1jc0ic0;
}
}
}
__global__ void __launch_bounds__ (128,2) sw4_b (double * uacc_1, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * __restrict__ strx, double * __restrict__ stry, double * __restrict__ strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
/* Total 687 flops */
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 2
for (int k=2; k<=N-3; k+=2) {
double a_mux1;
double _t_0_;
double a_mux2;
double _t_1_;
double a_mux3;
double _t_2_;
double a_mux4;
double _t_3_;
double a_muy1;
double _t_4_;
double a_muy2;
double _t_5_;
double a_muy3;
double _t_6_;
double a_muy4;
double _t_7_;
double a_muz1;
double _t_8_;
double a_muz2;
double _t_9_;
double a_muz3;
double _t_10_;
double a_muz4;
double _t_11_;
double _t_14_;
double _t_13_;
double _t_15_;
double _t_16_;
double _t_17_;
double _t_12_;
double _t_19_;
double _t_21_;
double _t_20_;
double _t_18_;
double _t_22_;
double _t_24_;
double _t_23_;
double _t_25_;
double _t_27_;
double _t_26_;
double _t_28_;
double _t_30_;
double _t_29_;
double _t_32_;
double _t_31_;
double _t_33_;
double _t_34_;
double _t_35_;
double r2;
double _t_39_;
double _t_37_;
double _t_40_;
double _t_41_;
double _t_38_;
double _t_43_;
double _t_44_;
double _t_42_;
double _t_46_;
double _t_47_;
double _t_45_;
double _t_48_;
double _t_49_;
double _t_36_;
double _t_52_;
double _t_50_;
double _t_53_;
double _t_54_;
double _t_51_;
double _t_56_;
double _t_57_;
double _t_55_;
double _t_59_;
double _t_60_;
double _t_58_;
double _t_61_;
double _t_62_;
double _t_65_;
double _t_63_;
double _t_66_;
double _t_67_;
double _t_64_;
double _t_69_;
double _t_70_;
double _t_68_;
double _t_72_;
double _t_73_;
double _t_71_;
double _t_74_;
double _t_75_;
double _t_78_;
double _t_76_;
double _t_79_;
double _t_80_;
double _t_77_;
double _t_82_;
double _t_83_;
double _t_81_;
double _t_85_;
double _t_86_;
double _t_84_;
double _t_87_;
double _t_88_;
double uacc_1kc0jc0ic0 = uacc_1[k*N*N+j*N+i];
double b_mux1;
double _t_89_;
double b_mux2;
double _t_90_;
double b_mux3;
double _t_91_;
double b_mux4;
double _t_92_;
double b_muy1;
double _t_93_;
double b_muy2;
double _t_94_;
double b_muy3;
double _t_95_;
double b_muy4;
double _t_96_;
double b_muz1;
double _t_97_;
double b_muz2;
double _t_98_;
double b_muz3;
double _t_99_;
double b_muz4;
double _t_100_;
double _t_103_;
double _t_102_;
double _t_104_;
double _t_105_;
double _t_106_;
double _t_101_;
double _t_108_;
double _t_110_;
double _t_109_;
double _t_107_;
double _t_111_;
double _t_113_;
double _t_112_;
double _t_114_;
double _t_116_;
double _t_115_;
double _t_117_;
double _t_119_;
double _t_118_;
double _t_121_;
double _t_120_;
double _t_122_;
double _t_123_;
double _t_124_;
double r3;
double _t_128_;
double _t_126_;
double _t_129_;
double _t_130_;
double _t_127_;
double _t_132_;
double _t_133_;
double _t_131_;
double _t_135_;
double _t_136_;
double _t_134_;
double _t_137_;
double _t_138_;
double _t_125_;
double _t_141_;
double _t_139_;
double _t_142_;
double _t_143_;
double _t_140_;
double _t_145_;
double _t_146_;
double _t_144_;
double _t_148_;
double _t_149_;
double _t_147_;
double _t_150_;
double _t_151_;
double _t_154_;
double _t_152_;
double _t_155_;
double _t_156_;
double _t_153_;
double _t_158_;
double _t_159_;
double _t_157_;
double _t_161_;
double _t_162_;
double _t_160_;
double _t_163_;
double _t_164_;
double _t_167_;
double _t_165_;
double _t_168_;
double _t_169_;
double _t_166_;
double _t_171_;
double _t_172_;
double _t_170_;
double _t_174_;
double _t_175_;
double _t_173_;
double _t_176_;
double _t_177_;
double uacc_1kp1jc0ic0 = uacc_1[(k+1)*N*N+j*N+i];
a_mux1 = mu[k][j][i-1] * strx[i-1];
_t_0_ = mu[k][j][i] * strx[i];
_t_0_ += mu[k][j][i-2] * strx[i-2];
a_mux1 -= 3.0 / 4.0 * _t_0_;
a_mux2 = mu[k][j][i-2] * strx[i-2];
a_mux2 += mu[k][j][i+1] * strx[i+1];
_t_1_ = mu[k][j][i] * strx[i];
_t_1_ += mu[k][j][i-1] * strx[i-1];
a_mux2 += 3.0 * _t_1_;
a_mux3 = mu[k][j][i-1] * strx[i-1];
a_mux3 += mu[k][j][i+2] * strx[i+2];
_t_2_ = mu[k][j][i+1] * strx[i+1];
_t_2_ += mu[k][j][i] * strx[i];
a_mux3 += 3.0 * _t_2_;
a_mux4 = mu[k][j][i+1] * strx[i+1];
_t_3_ = mu[k][j][i] * strx[i];
_t_3_ += mu[k][j][i+2] * strx[i+2];
a_mux4 -= 3.0 / 4.0 * _t_3_;
a_muy1 = mu[k][j-1][i] * stry[j-1];
_t_4_ = mu[k][j][i] * stry[j];
_t_4_ += mu[k][j-2][i] * stry[j-2];
a_muy1 -= 3.0 / 4.0 * _t_4_;
a_muy2 = mu[k][j-2][i] * stry[j-2];
a_muy2 += mu[k][j+1][i] * stry[j+1];
_t_5_ = mu[k][j][i] * stry[j];
_t_5_ += mu[k][j-1][i] * stry[j-1];
a_muy2 += 3.0 * _t_5_;
a_muy3 = mu[k][j-1][i] * stry[j-1];
a_muy3 += mu[k][j+2][i] * stry[j+2];
_t_6_ = mu[k][j+1][i] * stry[j+1];
_t_6_ += mu[k][j][i] * stry[j];
a_muy3 += 3.0 * _t_6_;
a_muy4 = mu[k][j+1][i] * stry[j+1];
_t_7_ = mu[k][j][i] * stry[j];
_t_7_ += mu[k][j+2][i] * stry[j+2];
a_muy4 -= 3.0 / 4.0 * _t_7_;
a_muz1 = mu[k-1][j][i] * strz[k-1];
_t_8_ = mu[k][j][i] * strz[k];
_t_8_ += mu[k-2][j][i] * strz[k-2];
a_muz1 -= 3.0 / 4.0 * _t_8_;
a_muz2 = mu[k-2][j][i] * strz[k-2];
a_muz2 += mu[k+1][j][i] * strz[k+1];
_t_9_ = mu[k][j][i] * strz[k];
_t_9_ += mu[k-1][j][i] * strz[k-1];
a_muz2 += 3.0 * _t_9_;
a_muz3 = mu[k-1][j][i] * strz[k-1];
a_muz3 += mu[k+2][j][i] * strz[k+2];
_t_10_ = mu[k+1][j][i] * strz[k+1];
_t_10_ += mu[k][j][i] * strz[k];
a_muz3 += 3.0 * _t_10_;
a_muz4 = mu[k+1][j][i] * strz[k+1];
_t_11_ = mu[k][j][i] * strz[k];
_t_11_ += mu[k+2][j][i] * strz[k+2];
a_muz4 -= 3.0 / 4.0 * _t_11_;
_t_14_ = u_1[k][j][i-2];
_t_14_ -= u_1[k][j][i];
_t_13_ = a_mux1 * _t_14_;
_t_15_ = u_1[k][j][i-1];
_t_15_ -= u_1[k][j][i];
_t_13_ += a_mux2 * _t_15_;
_t_16_ = u_1[k][j][i+1];
_t_16_ -= u_1[k][j][i];
_t_13_ += a_mux3 * _t_16_;
_t_17_ = u_1[k][j][i+2];
_t_17_ -= u_1[k][j][i];
_t_13_ += a_mux4 * _t_17_;
_t_12_ = strx[i] * _t_13_;
_t_19_ = 2.0 * a_muy1;
_t_19_ += la[k][j-1][i] * stry[j-1];
_t_21_ = la[k][j][i] * stry[j];
_t_21_ += la[k][j-2][i] * stry[j-2];
_t_19_ -= 3.0 / 4.0 * _t_21_;
_t_20_ = u_1[k][j-2][i];
_t_20_ -= u_1[k][j][i];
_t_18_ = _t_19_ * _t_20_;
_t_22_ = 2.0 * a_muy2;
_t_22_ += la[k][j-2][i] * stry[j-2];
_t_22_ += la[k][j+1][i] * stry[j+1];
_t_24_ = la[k][j][i] * stry[j];
_t_24_ += la[k][j-1][i] * stry[j-1];
_t_22_ += 3.0 * _t_24_;
_t_23_ = u_1[k][j-1][i];
_t_23_ -= u_1[k][j][i];
_t_18_ += _t_22_ * _t_23_;
_t_25_ = 2.0 * a_muy3;
_t_25_ += la[k][j-1][i] * stry[j-1];
_t_25_ += la[k][j+2][i] * stry[j+2];
_t_27_ = la[k][j+1][i] * stry[j+1];
_t_27_ += la[k][j][i] * stry[j];
_t_25_ += 3.0 * _t_27_;
_t_26_ = u_1[k][j+1][i];
_t_26_ -= u_1[k][j][i];
_t_18_ += _t_25_ * _t_26_;
_t_28_ = 2.0 * a_muy4;
_t_28_ += la[k][j+1][i] * stry[j+1];
_t_30_ = la[k][j][i] * stry[j];
_t_30_ += la[k][j+2][i] * stry[j+2];
_t_28_ -= 3.0 / 4.0 * _t_30_;
_t_29_ = u_1[k][j+2][i];
_t_29_ -= u_1[k][j][i];
_t_18_ += _t_28_ * _t_29_;
_t_12_ += stry[j] * _t_18_;
_t_32_ = u_1[k-2][j][i];
_t_32_ -= u_1[k][j][i];
_t_31_ = a_muz1 * _t_32_;
_t_33_ = u_1[k-1][j][i];
_t_33_ -= u_1[k][j][i];
_t_31_ += a_muz2 * _t_33_;
_t_34_ = u_1[k+1][j][i];
_t_34_ -= u_1[k][j][i];
_t_31_ += a_muz3 * _t_34_;
_t_35_ = u_1[k+2][j][i];
_t_35_ -= u_1[k][j][i];
_t_31_ += a_muz4 * _t_35_;
_t_12_ += strz[k] * _t_31_;
r2 = 1.0 / 6.0 * _t_12_;
_t_39_ = strx[i] * stry[j];
_t_37_ = _t_39_ * 1.0 / 144.0;
_t_40_ = u_0[k][j-2][i-2];
_t_40_ -= u_0[k][j+2][i-2];
_t_41_ = -u_0[k][j-1][i-2];
_t_41_ += u_0[k][j+1][i-2];
_t_40_ += 8.0 * _t_41_;
_t_38_ = mu[k][j][i-2] * _t_40_;
_t_43_ = u_0[k][j-2][i-1];
_t_43_ -= u_0[k][j+2][i-1];
_t_44_ = -u_0[k][j-1][i-1];
_t_44_ += u_0[k][j+1][i-1];
_t_43_ += 8.0 * _t_44_;
_t_42_ = mu[k][j][i-1] * _t_43_;
_t_38_ -= 8.0 * _t_42_;
_t_46_ = u_0[k][j-2][i+1];
_t_46_ -= u_0[k][j+2][i+1];
_t_47_ = -u_0[k][j-1][i+1];
_t_47_ += u_0[k][j+1][i+1];
_t_46_ += 8.0 * _t_47_;
_t_45_ = mu[k][j][i+1] * _t_46_;
_t_38_ += 8.0 * _t_45_;
_t_48_ = u_0[k][j-2][i+2];
_t_48_ -= u_0[k][j+2][i+2];
_t_49_ = -u_0[k][j-1][i+2];
_t_49_ += u_0[k][j+1][i+2];
_t_48_ += 8.0 * _t_49_;
_t_38_ -= mu[k][j][i+2] * _t_48_;
_t_36_ = _t_37_ * _t_38_;
_t_52_ = strx[i] * stry[j];
_t_50_ = _t_52_ * 1.0 / 144.0;
_t_53_ = u_0[k][j-2][i-2];
_t_53_ -= u_0[k][j-2][i+2];
_t_54_ = -u_0[k][j-2][i-1];
_t_54_ += u_0[k][j-2][i+1];
_t_53_ += 8.0 * _t_54_;
_t_51_ = la[k][j-2][i] * _t_53_;
_t_56_ = u_0[k][j-1][i-2];
_t_56_ -= u_0[k][j-1][i+2];
_t_57_ = -u_0[k][j-1][i-1];
_t_57_ += u_0[k][j-1][i+1];
_t_56_ += 8.0 * _t_57_;
_t_55_ = la[k][j-1][i] * _t_56_;
_t_51_ -= 8.0 * _t_55_;
_t_59_ = u_0[k][j+1][i-2];
_t_59_ -= u_0[k][j+1][i+2];
_t_60_ = -u_0[k][j+1][i-1];
_t_60_ += u_0[k][j+1][i+1];
_t_59_ += 8.0 * _t_60_;
_t_58_ = la[k][j+1][i] * _t_59_;
_t_51_ += 8.0 * _t_58_;
_t_61_ = u_0[k][j+2][i-2];
_t_61_ -= u_0[k][j+2][i+2];
_t_62_ = -u_0[k][j+2][i-1];
_t_62_ += u_0[k][j+2][i+1];
_t_61_ += 8.0 * _t_62_;
_t_51_ -= la[k][j+2][i] * _t_61_;
_t_36_ += _t_50_ * _t_51_;
_t_65_ = stry[j] * strz[k];
_t_63_ = _t_65_ * 1.0 / 144.0;
_t_66_ = u_2[k-2][j-2][i];
_t_66_ -= u_2[k+2][j-2][i];
_t_67_ = -u_2[k-1][j-2][i];
_t_67_ += u_2[k+1][j-2][i];
_t_66_ += 8.0 * _t_67_;
_t_64_ = la[k][j-2][i] * _t_66_;
_t_69_ = u_2[k-2][j-1][i];
_t_69_ -= u_2[k+2][j-1][i];
_t_70_ = -u_2[k-1][j-1][i];
_t_70_ += u_2[k+1][j-1][i];
_t_69_ += 8.0 * _t_70_;
_t_68_ = la[k][j-1][i] * _t_69_;
_t_64_ -= 8.0 * _t_68_;
_t_72_ = u_2[k-2][j+1][i];
_t_72_ -= u_2[k+2][j+1][i];
_t_73_ = -u_2[k-1][j+1][i];
_t_73_ += u_2[k+1][j+1][i];
_t_72_ += 8.0 * _t_73_;
_t_71_ = la[k][j+1][i] * _t_72_;
_t_64_ += 8.0 * _t_71_;
_t_74_ = u_2[k-2][j+2][i];
_t_74_ -= u_2[k+2][j+2][i];
_t_75_ = -u_2[k-1][j+2][i];
_t_75_ += u_2[k+1][j+2][i];
_t_74_ += 8.0 * _t_75_;
_t_64_ -= la[k][j+2][i] * _t_74_;
_t_36_ += _t_63_ * _t_64_;
_t_78_ = stry[j] * strz[k];
_t_76_ = _t_78_ * 1.0 / 144.0;
_t_79_ = u_2[k-2][j-2][i];
_t_79_ -= u_2[k-2][j+2][i];
_t_80_ = -u_2[k-2][j-1][i];
_t_80_ += u_2[k-2][j+1][i];
_t_79_ += 8.0 * _t_80_;
_t_77_ = mu[k-2][j][i] * _t_79_;
_t_82_ = u_2[k-1][j-2][i];
_t_82_ -= u_2[k-1][j+2][i];
_t_83_ = -u_2[k-1][j-1][i];
_t_83_ += u_2[k-1][j+1][i];
_t_82_ += 8.0 * _t_83_;
_t_81_ = mu[k-1][j][i] * _t_82_;
_t_77_ -= 8.0 * _t_81_;
_t_85_ = u_2[k+1][j-2][i];
_t_85_ -= u_2[k+1][j+2][i];
_t_86_ = -u_2[k+1][j-1][i];
_t_86_ += u_2[k+1][j+1][i];
_t_85_ += 8.0 * _t_86_;
_t_84_ = mu[k+1][j][i] * _t_85_;
_t_77_ += 8.0 * _t_84_;
_t_87_ = u_2[k+2][j-2][i];
_t_87_ -= u_2[k+2][j+2][i];
_t_88_ = -u_2[k+2][j-1][i];
_t_88_ += u_2[k+2][j+1][i];
_t_87_ += 8.0 * _t_88_;
_t_77_ -= mu[k+2][j][i] * _t_87_;
_t_36_ += _t_76_ * _t_77_;
r2 += _t_36_;
uacc_1kc0jc0ic0 = a1 * uacc_1kc0jc0ic0;
uacc_1kc0jc0ic0 += cof * r2;
uacc_1[k*N*N+j*N+i] = uacc_1kc0jc0ic0;
b_mux1 = mu[k+1][j][i-1] * strx[i-1];
_t_89_ = mu[k+1][j][i] * strx[i];
_t_89_ += mu[k+1][j][i-2] * strx[i-2];
b_mux1 -= 3.0 / 4.0 * _t_89_;
b_mux2 = mu[k+1][j][i-2] * strx[i-2];
b_mux2 += mu[k+1][j][i+1] * strx[i+1];
_t_90_ = mu[k+1][j][i] * strx[i];
_t_90_ += mu[k+1][j][i-1] * strx[i-1];
b_mux2 += 3.0 * _t_90_;
b_mux3 = mu[k+1][j][i-1] * strx[i-1];
b_mux3 += mu[k+1][j][i+2] * strx[i+2];
_t_91_ = mu[k+1][j][i+1] * strx[i+1];
_t_91_ += mu[k+1][j][i] * strx[i];
b_mux3 += 3.0 * _t_91_;
b_mux4 = mu[k+1][j][i+1] * strx[i+1];
_t_92_ = mu[k+1][j][i] * strx[i];
_t_92_ += mu[k+1][j][i+2] * strx[i+2];
b_mux4 -= 3.0 / 4.0 * _t_92_;
b_muy1 = mu[k+1][j-1][i] * stry[j-1];
_t_93_ = mu[k+1][j][i] * stry[j];
_t_93_ += mu[k+1][j-2][i] * stry[j-2];
b_muy1 -= 3.0 / 4.0 * _t_93_;
b_muy2 = mu[k+1][j-2][i] * stry[j-2];
b_muy2 += mu[k+1][j+1][i] * stry[j+1];
_t_94_ = mu[k+1][j][i] * stry[j];
_t_94_ += mu[k+1][j-1][i] * stry[j-1];
b_muy2 += 3.0 * _t_94_;
b_muy3 = mu[k+1][j-1][i] * stry[j-1];
b_muy3 += mu[k+1][j+2][i] * stry[j+2];
_t_95_ = mu[k+1][j+1][i] * stry[j+1];
_t_95_ += mu[k+1][j][i] * stry[j];
b_muy3 += 3.0 * _t_95_;
b_muy4 = mu[k+1][j+1][i] * stry[j+1];
_t_96_ = mu[k+1][j][i] * stry[j];
_t_96_ += mu[k+1][j+2][i] * stry[j+2];
b_muy4 -= 3.0 / 4.0 * _t_96_;
b_muz1 = mu[k][j][i] * strz[k];
_t_97_ = mu[k+1][j][i] * strz[k+1];
_t_97_ += mu[k-1][j][i] * strz[k-1];
b_muz1 -= 3.0 / 4.0 * _t_97_;
b_muz2 = mu[k-1][j][i] * strz[k-1];
b_muz2 += mu[k+2][j][i] * strz[k+2];
_t_98_ = mu[k+1][j][i] * strz[k+1];
_t_98_ += mu[k][j][i] * strz[k];
b_muz2 += 3.0 * _t_98_;
b_muz3 = mu[k][j][i] * strz[k];
b_muz3 += mu[k+3][j][i] * strz[k+3];
_t_99_ = mu[k+2][j][i] * strz[k+2];
_t_99_ += mu[k+1][j][i] * strz[k+1];
b_muz3 += 3.0 * _t_99_;
b_muz4 = mu[k+2][j][i] * strz[k+2];
_t_100_ = mu[k+1][j][i] * strz[k+1];
_t_100_ += mu[k+3][j][i] * strz[k+3];
b_muz4 -= 3.0 / 4.0 * _t_100_;
_t_103_ = u_1[k+1][j][i-2];
_t_103_ -= u_1[k+1][j][i];
_t_102_ = b_mux1 * _t_103_;
_t_104_ = u_1[k+1][j][i-1];
_t_104_ -= u_1[k+1][j][i];
_t_102_ += b_mux2 * _t_104_;
_t_105_ = u_1[k+1][j][i+1];
_t_105_ -= u_1[k+1][j][i];
_t_102_ += b_mux3 * _t_105_;
_t_106_ = u_1[k+1][j][i+2];
_t_106_ -= u_1[k+1][j][i];
_t_102_ += b_mux4 * _t_106_;
_t_101_ = strx[i] * _t_102_;
_t_108_ = 2.0 * b_muy1;
_t_108_ += la[k+1][j-1][i] * stry[j-1];
_t_110_ = la[k+1][j][i] * stry[j];
_t_110_ += la[k+1][j-2][i] * stry[j-2];
_t_108_ -= 3.0 / 4.0 * _t_110_;
_t_109_ = u_1[k+1][j-2][i];
_t_109_ -= u_1[k+1][j][i];
_t_107_ = _t_108_ * _t_109_;
_t_111_ = 2.0 * b_muy2;
_t_111_ += la[k+1][j-2][i] * stry[j-2];
_t_111_ += la[k+1][j+1][i] * stry[j+1];
_t_113_ = la[k+1][j][i] * stry[j];
_t_113_ += la[k+1][j-1][i] * stry[j-1];
_t_111_ += 3.0 * _t_113_;
_t_112_ = u_1[k+1][j-1][i];
_t_112_ -= u_1[k+1][j][i];
_t_107_ += _t_111_ * _t_112_;
_t_114_ = 2.0 * b_muy3;
_t_114_ += la[k+1][j-1][i] * stry[j-1];
_t_114_ += la[k+1][j+2][i] * stry[j+2];
_t_116_ = la[k+1][j+1][i] * stry[j+1];
_t_116_ += la[k+1][j][i] * stry[j];
_t_114_ += 3.0 * _t_116_;
_t_115_ = u_1[k+1][j+1][i];
_t_115_ -= u_1[k+1][j][i];
_t_107_ += _t_114_ * _t_115_;
_t_117_ = 2.0 * b_muy4;
_t_117_ += la[k+1][j+1][i] * stry[j+1];
_t_119_ = la[k+1][j][i] * stry[j];
_t_119_ += la[k+1][j+2][i] * stry[j+2];
_t_117_ -= 3.0 / 4.0 * _t_119_;
_t_118_ = u_1[k+1][j+2][i];
_t_118_ -= u_1[k+1][j][i];
_t_107_ += _t_117_ * _t_118_;
_t_101_ += stry[j] * _t_107_;
_t_121_ = u_1[k-1][j][i];
_t_121_ -= u_1[k+1][j][i];
_t_120_ = b_muz1 * _t_121_;
_t_122_ = u_1[k][j][i];
_t_122_ -= u_1[k+1][j][i];
_t_120_ += b_muz2 * _t_122_;
_t_123_ = u_1[k+2][j][i];
_t_123_ -= u_1[k+1][j][i];
_t_120_ += b_muz3 * _t_123_;
_t_124_ = u_1[k+3][j][i];
_t_124_ -= u_1[k+1][j][i];
_t_120_ += b_muz4 * _t_124_;
_t_101_ += strz[k+1] * _t_120_;
r3 = 1.0 / 6.0 * _t_101_;
_t_128_ = strx[i] * stry[j];
_t_126_ = _t_128_ * 1.0 / 144.0;
_t_129_ = u_0[k+1][j-2][i-2];
_t_129_ -= u_0[k+1][j+2][i-2];
_t_130_ = -u_0[k+1][j-1][i-2];
_t_130_ += u_0[k+1][j+1][i-2];
_t_129_ += 8.0 * _t_130_;
_t_127_ = mu[k+1][j][i-2] * _t_129_;
_t_132_ = u_0[k+1][j-2][i-1];
_t_132_ -= u_0[k+1][j+2][i-1];
_t_133_ = -u_0[k+1][j-1][i-1];
_t_133_ += u_0[k+1][j+1][i-1];
_t_132_ += 8.0 * _t_133_;
_t_131_ = mu[k+1][j][i-1] * _t_132_;
_t_127_ -= 8.0 * _t_131_;
_t_135_ = u_0[k+1][j-2][i+1];
_t_135_ -= u_0[k+1][j+2][i+1];
_t_136_ = -u_0[k+1][j-1][i+1];
_t_136_ += u_0[k+1][j+1][i+1];
_t_135_ += 8.0 * _t_136_;
_t_134_ = mu[k+1][j][i+1] * _t_135_;
_t_127_ += 8.0 * _t_134_;
_t_137_ = u_0[k+1][j-2][i+2];
_t_137_ -= u_0[k+1][j+2][i+2];
_t_138_ = -u_0[k+1][j-1][i+2];
_t_138_ += u_0[k+1][j+1][i+2];
_t_137_ += 8.0 * _t_138_;
_t_127_ -= mu[k+1][j][i+2] * _t_137_;
_t_125_ = _t_126_ * _t_127_;
_t_141_ = strx[i] * stry[j];
_t_139_ = _t_141_ * 1.0 / 144.0;
_t_142_ = u_0[k+1][j-2][i-2];
_t_142_ -= u_0[k+1][j-2][i+2];
_t_143_ = -u_0[k+1][j-2][i-1];
_t_143_ += u_0[k+1][j-2][i+1];
_t_142_ += 8.0 * _t_143_;
_t_140_ = la[k+1][j-2][i] * _t_142_;
_t_145_ = u_0[k+1][j-1][i-2];
_t_145_ -= u_0[k+1][j-1][i+2];
_t_146_ = -u_0[k+1][j-1][i-1];
_t_146_ += u_0[k+1][j-1][i+1];
_t_145_ += 8.0 * _t_146_;
_t_144_ = la[k+1][j-1][i] * _t_145_;
_t_140_ -= 8.0 * _t_144_;
_t_148_ = u_0[k+1][j+1][i-2];
_t_148_ -= u_0[k+1][j+1][i+2];
_t_149_ = -u_0[k+1][j+1][i-1];
_t_149_ += u_0[k+1][j+1][i+1];
_t_148_ += 8.0 * _t_149_;
_t_147_ = la[k+1][j+1][i] * _t_148_;
_t_140_ += 8.0 * _t_147_;
_t_150_ = u_0[k+1][j+2][i-2];
_t_150_ -= u_0[k+1][j+2][i+2];
_t_151_ = -u_0[k+1][j+2][i-1];
_t_151_ += u_0[k+1][j+2][i+1];
_t_150_ += 8.0 * _t_151_;
_t_140_ -= la[k+1][j+2][i] * _t_150_;
_t_125_ += _t_139_ * _t_140_;
_t_154_ = stry[j] * strz[k+1];
_t_152_ = _t_154_ * 1.0 / 144.0;
_t_155_ = u_2[k-1][j-2][i];
_t_155_ -= u_2[k+3][j-2][i];
_t_156_ = -u_2[k][j-2][i];
_t_156_ += u_2[k+2][j-2][i];
_t_155_ += 8.0 * _t_156_;
_t_153_ = la[k+1][j-2][i] * _t_155_;
_t_158_ = u_2[k-1][j-1][i];
_t_158_ -= u_2[k+3][j-1][i];
_t_159_ = -u_2[k][j-1][i];
_t_159_ += u_2[k+2][j-1][i];
_t_158_ += 8.0 * _t_159_;
_t_157_ = la[k+1][j-1][i] * _t_158_;
_t_153_ -= 8.0 * _t_157_;
_t_161_ = u_2[k-1][j+1][i];
_t_161_ -= u_2[k+3][j+1][i];
_t_162_ = -u_2[k][j+1][i];
_t_162_ += u_2[k+2][j+1][i];
_t_161_ += 8.0 * _t_162_;
_t_160_ = la[k+1][j+1][i] * _t_161_;
_t_153_ += 8.0 * _t_160_;
_t_163_ = u_2[k-1][j+2][i];
_t_163_ -= u_2[k+3][j+2][i];
_t_164_ = -u_2[k][j+2][i];
_t_164_ += u_2[k+2][j+2][i];
_t_163_ += 8.0 * _t_164_;
_t_153_ -= la[k+1][j+2][i] * _t_163_;
_t_125_ += _t_152_ * _t_153_;
_t_167_ = stry[j] * strz[k+1];
_t_165_ = _t_167_ * 1.0 / 144.0;
_t_168_ = u_2[k-1][j-2][i];
_t_168_ -= u_2[k-1][j+2][i];
_t_169_ = -u_2[k-1][j-1][i];
_t_169_ += u_2[k-1][j+1][i];
_t_168_ += 8.0 * _t_169_;
_t_166_ = mu[k-1][j][i] * _t_168_;
_t_171_ = u_2[k][j-2][i];
_t_171_ -= u_2[k][j+2][i];
_t_172_ = -u_2[k][j-1][i];
_t_172_ += u_2[k][j+1][i];
_t_171_ += 8.0 * _t_172_;
_t_170_ = mu[k][j][i] * _t_171_;
_t_166_ -= 8.0 * _t_170_;
_t_174_ = u_2[k+2][j-2][i];
_t_174_ -= u_2[k+2][j+2][i];
_t_175_ = -u_2[k+2][j-1][i];
_t_175_ += u_2[k+2][j+1][i];
_t_174_ += 8.0 * _t_175_;
_t_173_ = mu[k+2][j][i] * _t_174_;
_t_166_ += 8.0 * _t_173_;
_t_176_ = u_2[k+3][j-2][i];
_t_176_ -= u_2[k+3][j+2][i];
_t_177_ = -u_2[k+3][j-1][i];
_t_177_ += u_2[k+3][j+1][i];
_t_176_ += 8.0 * _t_177_;
_t_166_ -= mu[k+3][j][i] * _t_176_;
_t_125_ += _t_165_ * _t_166_;
r3 += _t_125_;
uacc_1kp1jc0ic0 = a1 * uacc_1kp1jc0ic0;
uacc_1kp1jc0ic0 += cof * r3;
uacc_1[(k+1)*N*N+j*N+i] = uacc_1kp1jc0ic0;
}
}
}
__global__ void __launch_bounds__ (128,2) sw4_c (double * uacc_2, double * __restrict__ u_0, double * __restrict__ u_1, double * __restrict__ u_2, double * __restrict__ mu, double * __restrict__ la, double * __restrict__ strx, double * __restrict__ stry, double * __restrict__ strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
/* Total 687 flops */
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 10
for (int k=2; k<=N-3; k++) {
/* 28 * 3 = 84 flops */
double mux1 = mu[k*N*N+j*N+i-1] * strx[i-1] - 3e0 / 4 * (mu[k*N*N+j*N+i] * strx[i] + mu[k*N*N+j*N+i-2] * strx[i-2]);
double mux2 = mu[k*N*N+j*N+i-2] * strx[i-2] + mu[k*N*N+j*N+i+1] * strx[i+1] + 3 * (mu[k*N*N+j*N+i] * strx[i] + mu[k*N*N+j*N+i-1] * strx[i-1]);
double mux3 = mu[k*N*N+j*N+i-1] * strx[i-1] + mu[k*N*N+j*N+i+2] * strx[i+2] + 3 * (mu[k*N*N+j*N+i+1] * strx[i+1] + mu[k*N*N+j*N+i] * strx[i]);
double mux4 = mu[k*N*N+j*N+i+1] * strx[i+1] - 3e0 / 4 * (mu[k*N*N+j*N+i] * strx[i] + mu[k*N*N+j*N+i+2] * strx[i+2]);
double muy1 = mu[k*N*N+(j-1)*N+i] * stry[j-1] - 3e0 / 4 * (mu[k*N*N+j*N+i] * stry[j] + mu[k*N*N+(j-2)*N+i] * stry[j-2]);
double muy2 = mu[k*N*N+(j-2)*N+i] * stry[j-2] + mu[k*N*N+(j+1)*N+i] * stry[j+1] + 3 * (mu[k*N*N+j*N+i] * stry[j] + mu[k*N*N+(j-1)*N+i] * stry[j-1]);
double muy3 = mu[k*N*N+(j-1)*N+i] * stry[j-1] + mu[k*N*N+(j+2)*N+i] * stry[j+2] + 3 * (mu[k*N*N+(j+1)*N+i] * stry[j+1] + mu[k*N*N+j*N+i] * stry[j]);
double muy4 = mu[k*N*N+(j+1)*N+i] * stry[j+1] - 3e0 / 4 * (mu[k*N*N+j*N+i] * stry[j] + mu[k*N*N+(j+2)*N+i] * stry[j+2]);
double muz1 = mu[(k-1)*N*N+j*N+i] * strz[k-1] - 3e0 / 4 * (mu[k*N*N+j*N+i] * strz[k] + mu[(k-2)*N*N+j*N+i] * strz[k-2]);
double muz2 = mu[(k-2)*N*N+j*N+i] * strz[k-2] + mu[(k+1)*N*N+j*N+i] * strz[k+1] + 3 * (mu[k*N*N+j*N+i] * strz[k] + mu[(k-1)*N*N+j*N+i] * strz[k-1]);
double muz3 = mu[(k-1)*N*N+j*N+i] * strz[k-1] + mu[(k+2)*N*N+j*N+i] * strz[k+2] + 3 * (mu[(k+1)*N*N+j*N+i] * strz[k+1] + mu[k*N*N+j*N+i] * strz[k]);
double muz4 = mu[(k+1)*N*N+j*N+i] * strz[k+1] - 3e0 / 4 * (mu[k*N*N+j*N+i] * strz[k] + mu[(k+2)*N*N+j*N+i] * strz[k+2]);
double r3 = 1e0 / 6 * (strx[i] * (mux1 * (u_2[k*N*N+j*N+i-2] - u_2[k*N*N+j*N+i]) + mux2 * (u_2[k*N*N+j*N+i-1] - u_2[k*N*N+j*N+i]) + mux3 * (u_2[k*N*N+j*N+i+1] - u_2[k*N*N+j*N+i]) + mux4 * (u_2[k*N*N+j*N+i+2] - u_2[k*N*N+j*N+i])) + stry[j] * (muy1 * (u_2[k*N*N+(j-2)*N+i] - u_2[k*N*N+j*N+i]) + muy2 * (u_2[k*N*N+(j-1)*N+i] - u_2[k*N*N+j*N+i]) + muy3 * (u_2[k*N*N+(j+1)*N+i] - u_2[k*N*N+j*N+i]) + muy4 * (u_2[k*N*N+(j+2)*N+i] - u_2[k*N*N+j*N+i])) + strz[k] * ((2 * muz1 + la[(k-1)*N*N+j*N+i] * strz[k-1] - 3e0 / 4 * (la[k*N*N+j*N+i] * strz[k] + la[(k-2)*N*N+j*N+i] * strz[k-2])) * (u_2[(k-2)*N*N+j*N+i] - u_2[k*N*N+j*N+i]) + (2 * muz2 + la[(k-2)*N*N+j*N+i] * strz[k-2] + la[(k+1)*N*N+j*N+i] * strz[k+1] + 3 * (la[k*N*N+j*N+i] * strz[k] + la[(k-1)*N*N+j*N+i] * strz[k-1])) * (u_2[(k-1)*N*N+j*N+i] - u_2[k*N*N+j*N+i]) + (2 * muz3 + la[(k-1)*N*N+j*N+i] * strz[k-1] + la[(k+2)*N*N+j*N+i] * strz[k+2] + 3 * (la[(k+1)*N*N+j*N+i] * strz[k+1] + la[k*N*N+j*N+i] * strz[k])) * (u_2[(k+1)*N*N+j*N+i] - u_2[k*N*N+j*N+i]) + (2 * muz4 + la[(k+1)*N*N+j*N+i] * strz[k+1] - 3e0 / 4 * (la[k*N*N+j*N+i] * strz[k] + la[(k+2)*N*N+j*N+i] * strz[k+2])) * (u_2[(k+2)*N*N+j*N+i] - u_2[k*N*N+j*N+i])));
r3 += strx[i] * strz[k] * (1e0 / 144) * (mu[k*N*N+j*N+i-2] * (u_0[(k-2)*N*N+j*N+i-2] - u_0[(k+2)*N*N+j*N+i-2] + 8 * (-u_0[(k-1)*N*N+j*N+i-2] + u_0[(k+1)*N*N+j*N+i-2])) - 8 * (mu[k*N*N+j*N+i-1] * (u_0[(k-2)*N*N+j*N+i-1] - u_0[(k+2)*N*N+j*N+i-1] + 8 * (-u_0[(k-1)*N*N+j*N+i-1] + u_0[(k+1)*N*N+j*N+i-1]))) + 8 * (mu[k*N*N+j*N+i+1] * (u_0[(k-2)*N*N+j*N+i+1] - u_0[(k+2)*N*N+j*N+i+1] + 8 * (-u_0[(k-1)*N*N+j*N+i+1] + u_0[(k+1)*N*N+j*N+i+1]))) - (mu[k*N*N+j*N+i+2] * (u_0[(k-2)*N*N+j*N+i+2] - u_0[(k+2)*N*N+j*N+i+2] + 8 * (-u_0[(k-1)*N*N+j*N+i+2] + u_0[(k+1)*N*N+j*N+i+2]))));
r3 += strx[i] * strz[k] * (1e0 / 144) * (la[(k-2)*N*N+j*N+i] * (u_0[(k-2)*N*N+j*N+i-2] - u_0[(k-2)*N*N+j*N+i+2] + 8 * (-u_0[(k-2)*N*N+j*N+i-1] + u_0[(k-2)*N*N+j*N+i+1])) - 8 * (la[(k-1)*N*N+j*N+i] * (u_0[(k-1)*N*N+j*N+i-2] - u_0[(k-1)*N*N+j*N+i+2] + 8 * (-u_0[(k-1)*N*N+j*N+i-1] + u_0[(k-1)*N*N+j*N+i+1]))) + 8 * (la[(k+1)*N*N+j*N+i] * (u_0[(k+1)*N*N+j*N+i-2] - u_0[(k+1)*N*N+j*N+i+2] + 8 * (-u_0[(k+1)*N*N+j*N+i-1] + u_0[(k+1)*N*N+j*N+i+1]))) - (la[(k+2)*N*N+j*N+i] * (u_0[(k+2)*N*N+j*N+i-2] - u_0[(k+2)*N*N+j*N+i+2] + 8 * (-u_0[(k+2)*N*N+j*N+i-1] + u_0[(k+2)*N*N+j*N+i+1]))));
r3 += stry[j] * strz[k] * (1e0 / 144) * (mu[k*N*N+(j-2)*N+i] * (u_1[(k-2)*N*N+(j-2)*N+i] - u_1[(k+2)*N*N+(j-2)*N+i] + 8 * (-u_1[(k-1)*N*N+(j-2)*N+i] + u_1[(k+1)*N*N+(j-2)*N+i])) - 8 * (mu[k*N*N+(j-1)*N+i] * (u_1[(k-2)*N*N+(j-1)*N+i] - u_1[(k+2)*N*N+(j-1)*N+i] + 8 * (-u_1[(k-1)*N*N+(j-1)*N+i] + u_1[(k+1)*N*N+(j-1)*N+i]))) + 8 * (mu[k*N*N+(j+1)*N+i] * (u_1[(k-2)*N*N+(j+1)*N+i] - u_1[(k+2)*N*N+(j+1)*N+i] + 8 * (-u_1[(k-1)*N*N+(j+1)*N+i] + u_1[(k+1)*N*N+(j+1)*N+i]))) - (mu[k*N*N+(j+2)*N+i] * (u_1[(k-2)*N*N+(j+2)*N+i] - u_1[(k+2)*N*N+(j+2)*N+i] + 8 * (-u_1[(k-1)*N*N+(j+2)*N+i] + u_1[(k+1)*N*N+(j+2)*N+i]))));
r3 += stry[j] * strz[k] * (1e0 / 144) * (la[(k-2)*N*N+j*N+i] * (u_1[(k-2)*N*N+(j-2)*N+i] - u_1[(k-2)*N*N+(j+2)*N+i] + 8 * (-u_1[(k-2)*N*N+(j-1)*N+i] + u_1[(k-2)*N*N+(j+1)*N+i])) - 8 * (la[(k-1)*N*N+j*N+i] * (u_1[(k-1)*N*N+(j-2)*N+i] - u_1[(k-1)*N*N+(j+2)*N+i] + 8 * (-u_1[(k-1)*N*N+(j-1)*N+i] + u_1[(k-1)*N*N+(j+1)*N+i]))) + 8 * (la[(k+1)*N*N+j*N+i] * (u_1[(k+1)*N*N+(j-2)*N+i] - u_1[(k+1)*N*N+(j+2)*N+i] + 8 * (-u_1[(k+1)*N*N+(j-1)*N+i] + u_1[(k+1)*N*N+(j+1)*N+i]))) - (la[(k+2)*N*N+j*N+i] * (u_1[(k+2)*N*N+(j-2)*N+i] - u_1[(k+2)*N*N+(j+2)*N+i] + 8 * (-u_1[(k+2)*N*N+(j-1)*N+i] + u_1[(k+2)*N*N+(j+1)*N+i]))));
/* 3 * 3 = 9 flops */
uacc_2[k*N*N+j*N+i] = a1 * uacc_2[k*N*N+j*N+i] + cof * r3;
}
}
}
extern "C" void host_code (double *h_uacc_0, double *h_uacc_1, double *h_uacc_2, double *h_u_0, double *h_u_1, double *h_u_2, double *h_mu, double *h_la, double *h_strx, double *h_stry, double *h_strz, int N) {
double *uacc_0;
cudaMalloc (&uacc_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_0\n");
cudaMemcpy (uacc_0, h_uacc_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *uacc_1;
cudaMalloc (&uacc_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_1\n");
cudaMemcpy (uacc_1, h_uacc_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *uacc_2;
cudaMalloc (&uacc_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_2\n");
cudaMemcpy (uacc_2, h_uacc_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_0;
cudaMalloc (&u_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_0\n");
cudaMemcpy (u_0, h_u_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_1;
cudaMalloc (&u_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_1\n");
cudaMemcpy (u_1, h_u_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_2;
cudaMalloc (&u_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_2\n");
cudaMemcpy (u_2, h_u_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
double *strz;
cudaMalloc (&strz, sizeof(double)*N);
check_error ("Failed to allocate device memory for strz\n");
cudaMemcpy (strz, h_strz, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig_a (16, 8, 1);
dim3 gridconfig_a (ceil(N, blockconfig_a.x), ceil(N, blockconfig_a.y), 1);
sw4_a <<<gridconfig_a, blockconfig_a>>> (uacc_0, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
dim3 blockconfig_b (16, 8, 1);
dim3 gridconfig_b (ceil(N, blockconfig_b.x), ceil(N, blockconfig_b.y), 1);
sw4_b <<<gridconfig_b, blockconfig_b>>> (uacc_1, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
dim3 blockconfig_c (16, 8, 1);
dim3 gridconfig_c (ceil(N, blockconfig_c.x), ceil(N, blockconfig_c.y), 1);
sw4_c <<<gridconfig_c, blockconfig_c>>> (uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
cudaMemcpy (h_uacc_0, uacc_0, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_uacc_1, uacc_1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_uacc_2, uacc_2, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaFree (uacc_0);
cudaFree (uacc_1);
cudaFree (uacc_2);
cudaFree (u_0);
cudaFree (u_1);
cudaFree (u_2);
cudaFree (mu);
cudaFree (la);
cudaFree (strx);
cudaFree (stry);
cudaFree (strz);
}
|
18,885 | #include <algorithm>
#include <fstream>
#include <iostream>
#include <sstream>
#include <vector>
// Error check-----
#define gpuErrchk(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line,
bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file,
line);
if (abort)
exit(code);
}
}
// Error check-----
// This is a very good idea to wrap your calls with that function.. Otherwise
// you will not b Moreover, you may also want to look at how to use
// cuda-memcheck and cuda-gdb for debuggin
__device__ bool device_contains(int *array, int start, int end, int item) {
for (int j = start; j < end; j++) {
if (array[j] == item)
return true;
}
return false;
}
__device__ void deviceDFS(int *xadj, int *adj, int *nov, int k, int max_k,
int vertex, int *counter, int start, int *my_path) {
my_path[max_k - k - 1] = vertex;
// for(int i=0; i<max_k-k;i++)
// printf("path element %d is %d\n",i,my_path[i]);
if (k == 0) {
if (device_contains(adj, xadj[vertex], xadj[vertex + 1], start))
counter[start]++;
return;
}
// printf("my marked is at%p\n",(void *) marked);
for (int j = xadj[vertex]; j < xadj[vertex + 1]; j++) {
if (!device_contains(my_path, 0, max_k - k, adj[j])) {
deviceDFS(xadj, adj, nov, k - 1, max_k, adj[j], counter, start, my_path);
}
}
}
__global__ void prep(int *xadj, int *adj, int *nov, int k, int max_k, int *ct,
int *paths) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < *nov) {
deviceDFS(xadj, adj, nov, k - 1, max_k, id, ct, id, &paths[id * 5]);
}
}
__global__ void setct(int *nov, int *ct) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < *nov) {
ct[id] = 0;
}
}
void wrapper(int *xadj, int *adj, int *nov, int nnz, int k) {
cudaSetDevice(0);
int *d_xadj;
int *d_adj;
int *d_nov;
int *d_ct;
int *d_paths;
int *ct = new int[*nov];
cudaMalloc((void **)&d_xadj, (*nov + 1) * sizeof(int));
cudaMalloc((void **)&d_adj, nnz * sizeof(int));
cudaMalloc((void **)&d_nov, sizeof(int));
cudaMalloc((void **)&d_ct, (*nov) * sizeof(int));
cudaMalloc((void **)&d_paths, (*nov) * 5 * sizeof(int));
cudaMemcpy(d_xadj, xadj, (*nov + 1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_adj, adj, (nnz) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_nov, nov, sizeof(int), cudaMemcpyHostToDevice);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
unsigned int threads = prop.maxThreadsPerBlock;
std::cout << "Device Properties" << std::endl;
std::cout << "The threads: " << threads << std::endl;
gpuErrchk(cudaDeviceSynchronize());
#ifdef DEBUG
std::cout << "malloc copy done" << std::endl;
#endif
setct<<<(*nov + threads - 1) / threads, threads>>>(d_nov, d_ct);
gpuErrchk(cudaDeviceSynchronize());
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
prep<<<(*nov + threads - 1) / threads, threads>>>(d_xadj, d_adj, d_nov, k, k,
d_ct, d_paths);
gpuErrchk(cudaDeviceSynchronize());
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaMemcpy(ct, d_ct, (*nov) * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < *nov; i++)
printf("%d %d\n", i, ct[i]);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("GPU scale took: %f s\n", elapsedTime / 1000);
cudaFree(d_xadj);
cudaFree(d_adj);
cudaFree(d_nov);
cudaFree(d_ct);
}
/*Read the given file and return CSR*/
void *read_edges(char *bin_name, int k) {
std::cout << "fname: " << bin_name << std::endl;
// count the newlines
unsigned int number_of_lines = 0;
FILE *infile = fopen(bin_name, "r");
int ch;
while (EOF != (ch = getc(infile)))
if ('\n' == ch)
++number_of_lines;
++number_of_lines;
#ifdef DEBUG
std::cout << number_of_lines << " lines" << std::endl;
#endif
fclose(infile);
// read the first line, set it to no vertices.
std::ifstream bp(bin_name);
int *no_vertices = new int;
std::string line;
int i, j, max = 0;
for (int iter = 0; iter < number_of_lines; iter++) {
std::getline(bp, line);
std::istringstream myss(line);
if (!(myss >> i >> j)) {
break;
}
if (i > max)
max = i;
if (j > max)
max = j;
}
bp.clear();
bp.seekg(0);
*no_vertices = max + 1;
int no_edges = (number_of_lines)*2; // bidirectional
/*TODO unique and no loop decreases this, we should resize adj accordingly.
* Not the end of the world, we will never reach those indices.*/
// if file ended with \n you'd keep it as is.
// std::cout << "allocating A: " << sizeof(std::vector<int>) * *no_vertices
// << "bytes. " << *no_vertices << " vectors." << std::endl;
std::vector<int> *A = new std::vector<int>[*no_vertices];
// std::cout << "allocated A" << std::endl;
for (int iter = 0; iter < number_of_lines; iter++) {
std::getline(bp, line);
std::istringstream myss(line);
if (!(myss >> i >> j)) {
break;
}
#ifdef DEBUG
std::cout << i << " " << j << std::endl;
#endif
if (i != j) {
// ignore diagonal edges
A[i].push_back(j);
A[j].push_back(i);
}
}
for (int i = 0; i < *no_vertices; i++) {
std::sort(A[i].begin(), A[i].end());
// sort then unique.
// you may have 3 1 and 1 3
// if you do not sort, unique doesn't do what I think it would.
// also we prefer them sorted in case the file has 1 2 before 1 0 or sth.
// using default comparison:
std::vector<int>::iterator it;
it = std::unique(A[i].begin(), A[i].end()); // 10 20 30 20 10 ? ? ? ?
// ^
A[i].resize(std::distance(A[i].begin(), it)); // 10 20 30 20 10
}
int sum = 0;
int *xadj = new int[*no_vertices + 1]; // last one marks the end of the adj.
int *adj = new int[no_edges]; // there are m+1 lines (m '\n's), 2m edges.
xadj[0] = 0;
for (int i = 0; i < *no_vertices; i++) {
// adj.add_to_end(A[i])
for (int j = 0; j < A[i].size(); j++) {
adj[sum + j] = A[i][j];
}
sum += A[i].size();
xadj[i + 1] = sum;
}
std::cout << "Done reading." << std::endl;
wrapper(xadj, adj, no_vertices, no_edges, k);
return 0;
}
int main(int argc, char *argv[]) {
/*first arg is filename, second is k*/
// omp_set_num_threads(8);
read_edges(argv[1], atoi(argv[2]));
return 0;
} |
18,886 | #include "includes.h"
__global__ void setPriorAtLast ( const int dim, const int nwl, const float *lst, float *prr ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if ( i < nwl ) {
prr[i] = lst[dim+3+i*(dim+1+1+1+1)];
}
} |
18,887 | #include<stdlib.h>
#include<stdio.h>
#include<time.h>
#define n 1024
__global__ void mul_mat(int *a, int *b, int *c) {
int myx, myy, i;
myx = blockIdx.x * blockDim.x + threadIdx.x;
myy = blockIdx.y * blockDim.y + threadIdx.y;
int local;
for (i = 0; i < n; i++)
local += a[myx+n*i] * b[n*i+myy];
c[myx*n+myy] = local;
}
int main() {
int i;
int *a = (int*)malloc(sizeof(int)*n*n);
int *b = (int*)malloc(sizeof(int)*n*n);
int *c = (int*)malloc(sizeof(int)*n*n);
cudaEvent_t start, stop;
float time;
dim3 dimGrid(64,64);
dim3 dimBlock(16,16);
for(i=0;i<n*n;i++) {
a[i] = 1;
b[i] = 2;
c[i] = 0;
}
int *gpua, *gpub, *gpuc;
cudaMalloc((void**)&gpua, sizeof(int)*n*n);
cudaMalloc((void**)&gpub, sizeof(int)*n*n);
cudaMalloc((void**)&gpuc, sizeof(int)*n*n);
cudaMemcpy(gpua, a, sizeof(int)*n*n, cudaMemcpyHostToDevice);
cudaMemcpy(gpub, b, sizeof(int)*n*n, cudaMemcpyHostToDevice);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
mul_mat<<<dimGrid, dimBlock>>> (gpua, gpub, gpuc);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaMemcpy(c, gpuc, sizeof(int)*n*n, cudaMemcpyDeviceToHost);
printf("C[451][451] = %d\n",c[451*1024 + 451]);
printf("Time - %f\n", time);
cudaEventDestroy(start);
cudaEventDestroy(stop);
free(a);
free(b);
free(c);
cudaFree(gpua);
cudaFree(gpub);
cudaFree(gpuc);
return 0;
}
|
18,888 | /** size of A = 640
size of B = 600
gridDim = 60
blockDim = 64
k= 10000
x = 10
**/
__global__ void MultiplyVectors(const float* A, const float* B, float* C, int x, int k)
{
int B_start_index = (blockIdx.x*gridDim.y + blockIdx.y)*x;
int A_start_index = (threadIdx.x*blockDim.y + threadIdx.y)*x;
int C_width = x*gridDim.x*gridDim.y;
int t;
float c_0_0, c_0_1, c_0_2, c_0_3, c_0_4, c_0_5, c_0_6, c_0_7, c_0_8, c_0_9, c_1_0, c_1_1, c_1_2, c_1_3, c_1_4, c_1_5, c_1_6, c_1_7, c_1_8, c_1_9, c_2_0, c_2_1, c_2_2, c_2_3, c_2_4, c_2_5, c_2_6, c_2_7, c_2_8, c_2_9, c_3_0, c_3_1, c_3_2, c_3_3, c_3_4, c_3_5, c_3_6, c_3_7, c_3_8, c_3_9, c_4_0, c_4_1, c_4_2, c_4_3, c_4_4, c_4_5, c_4_6, c_4_7, c_4_8, c_4_9, c_5_0, c_5_1, c_5_2, c_5_3, c_5_4, c_5_5, c_5_6, c_5_7, c_5_8, c_5_9, c_6_0, c_6_1, c_6_2, c_6_3, c_6_4, c_6_5, c_6_6, c_6_7, c_6_8, c_6_9, c_7_0, c_7_1, c_7_2, c_7_3, c_7_4, c_7_5, c_7_6, c_7_7, c_7_8, c_7_9, c_8_0, c_8_1, c_8_2, c_8_3, c_8_4, c_8_5, c_8_6, c_8_7, c_8_8, c_8_9, c_9_0, c_9_1, c_9_2, c_9_3, c_9_4, c_9_5, c_9_6, c_9_7, c_9_8, c_9_9;
float a_0, a_1, a_2, a_3, a_4, a_5, a_6, a_7, a_8, a_9;
float b_0, b_1, b_2, b_3, b_4, b_5, b_6, b_7, b_8, b_9;
a_0 = A[A_start_index+0];
a_1 = A[A_start_index+1];
a_2 = A[A_start_index+2];
a_3 = A[A_start_index+3];
a_4 = A[A_start_index+4];
a_5 = A[A_start_index+5];
a_6 = A[A_start_index+6];
a_7 = A[A_start_index+7];
a_8 = A[A_start_index+8];
a_9 = A[A_start_index+9];
b_0 = B[B_start_index+0];
b_1 = B[B_start_index+1];
b_2 = B[B_start_index+2];
b_3 = B[B_start_index+3];
b_4 = B[B_start_index+4];
b_5 = B[B_start_index+5];
b_6 = B[B_start_index+6];
b_7 = B[B_start_index+7];
b_8 = B[B_start_index+8];
b_9 = B[B_start_index+9];
c_0_0 = 0;
c_0_1 = 0;
c_0_2 = 0;
c_0_3 = 0;
c_0_4 = 0;
c_0_5 = 0;
c_0_6 = 0;
c_0_7 = 0;
c_0_8 = 0;
c_0_9 = 0;
c_1_0 = 0;
c_1_1 = 0;
c_1_2 = 0;
c_1_3 = 0;
c_1_4 = 0;
c_1_5 = 0;
c_1_6 = 0;
c_1_7 = 0;
c_1_8 = 0;
c_1_9 = 0;
c_2_0 = 0;
c_2_1 = 0;
c_2_2 = 0;
c_2_3 = 0;
c_2_4 = 0;
c_2_5 = 0;
c_2_6 = 0;
c_2_7 = 0;
c_2_8 = 0;
c_2_9 = 0;
c_3_0 = 0;
c_3_1 = 0;
c_3_2 = 0;
c_3_3 = 0;
c_3_4 = 0;
c_3_5 = 0;
c_3_6 = 0;
c_3_7 = 0;
c_3_8 = 0;
c_3_9 = 0;
c_4_0 = 0;
c_4_1 = 0;
c_4_2 = 0;
c_4_3 = 0;
c_4_4 = 0;
c_4_5 = 0;
c_4_6 = 0;
c_4_7 = 0;
c_4_8 = 0;
c_4_9 = 0;
c_5_0 = 0;
c_5_1 = 0;
c_5_2 = 0;
c_5_3 = 0;
c_5_4 = 0;
c_5_5 = 0;
c_5_6 = 0;
c_5_7 = 0;
c_5_8 = 0;
c_5_9 = 0;
c_6_0 = 0;
c_6_1 = 0;
c_6_2 = 0;
c_6_3 = 0;
c_6_4 = 0;
c_6_5 = 0;
c_6_6 = 0;
c_6_7 = 0;
c_6_8 = 0;
c_6_9 = 0;
c_7_0 = 0;
c_7_1 = 0;
c_7_2 = 0;
c_7_3 = 0;
c_7_4 = 0;
c_7_5 = 0;
c_7_6 = 0;
c_7_7 = 0;
c_7_8 = 0;
c_7_9 = 0;
c_8_0 = 0;
c_8_1 = 0;
c_8_2 = 0;
c_8_3 = 0;
c_8_4 = 0;
c_8_5 = 0;
c_8_6 = 0;
c_8_7 = 0;
c_8_8 = 0;
c_8_9 = 0;
c_9_0 = 0;
c_9_1 = 0;
c_9_2 = 0;
c_9_3 = 0;
c_9_4 = 0;
c_9_5 = 0;
c_9_6 = 0;
c_9_7 = 0;
c_9_8 = 0;
c_9_9 = 0;
for (t = 0; t < 10000; t++) {
c_0_0 += a_0*b_0;
c_0_1 += a_0*b_1;
c_0_2 += a_0*b_2;
c_0_3 += a_0*b_3;
c_0_4 += a_0*b_4;
c_0_5 += a_0*b_5;
c_0_6 += a_0*b_6;
c_0_7 += a_0*b_7;
c_0_8 += a_0*b_8;
c_0_9 += a_0*b_9;
c_1_0 += a_1*b_0;
c_1_1 += a_1*b_1;
c_1_2 += a_1*b_2;
c_1_3 += a_1*b_3;
c_1_4 += a_1*b_4;
c_1_5 += a_1*b_5;
c_1_6 += a_1*b_6;
c_1_7 += a_1*b_7;
c_1_8 += a_1*b_8;
c_1_9 += a_1*b_9;
c_2_0 += a_2*b_0;
c_2_1 += a_2*b_1;
c_2_2 += a_2*b_2;
c_2_3 += a_2*b_3;
c_2_4 += a_2*b_4;
c_2_5 += a_2*b_5;
c_2_6 += a_2*b_6;
c_2_7 += a_2*b_7;
c_2_8 += a_2*b_8;
c_2_9 += a_2*b_9;
c_3_0 += a_3*b_0;
c_3_1 += a_3*b_1;
c_3_2 += a_3*b_2;
c_3_3 += a_3*b_3;
c_3_4 += a_3*b_4;
c_3_5 += a_3*b_5;
c_3_6 += a_3*b_6;
c_3_7 += a_3*b_7;
c_3_8 += a_3*b_8;
c_3_9 += a_3*b_9;
c_4_0 += a_4*b_0;
c_4_1 += a_4*b_1;
c_4_2 += a_4*b_2;
c_4_3 += a_4*b_3;
c_4_4 += a_4*b_4;
c_4_5 += a_4*b_5;
c_4_6 += a_4*b_6;
c_4_7 += a_4*b_7;
c_4_8 += a_4*b_8;
c_4_9 += a_4*b_9;
c_5_0 += a_5*b_0;
c_5_1 += a_5*b_1;
c_5_2 += a_5*b_2;
c_5_3 += a_5*b_3;
c_5_4 += a_5*b_4;
c_5_5 += a_5*b_5;
c_5_6 += a_5*b_6;
c_5_7 += a_5*b_7;
c_5_8 += a_5*b_8;
c_5_9 += a_5*b_9;
c_6_0 += a_6*b_0;
c_6_1 += a_6*b_1;
c_6_2 += a_6*b_2;
c_6_3 += a_6*b_3;
c_6_4 += a_6*b_4;
c_6_5 += a_6*b_5;
c_6_6 += a_6*b_6;
c_6_7 += a_6*b_7;
c_6_8 += a_6*b_8;
c_6_9 += a_6*b_9;
c_7_0 += a_7*b_0;
c_7_1 += a_7*b_1;
c_7_2 += a_7*b_2;
c_7_3 += a_7*b_3;
c_7_4 += a_7*b_4;
c_7_5 += a_7*b_5;
c_7_6 += a_7*b_6;
c_7_7 += a_7*b_7;
c_7_8 += a_7*b_8;
c_7_9 += a_7*b_9;
c_8_0 += a_8*b_0;
c_8_1 += a_8*b_1;
c_8_2 += a_8*b_2;
c_8_3 += a_8*b_3;
c_8_4 += a_8*b_4;
c_8_5 += a_8*b_5;
c_8_6 += a_8*b_6;
c_8_7 += a_8*b_7;
c_8_8 += a_8*b_8;
c_8_9 += a_8*b_9;
c_9_0 += a_9*b_0;
c_9_1 += a_9*b_1;
c_9_2 += a_9*b_2;
c_9_3 += a_9*b_3;
c_9_4 += a_9*b_4;
c_9_5 += a_9*b_5;
c_9_6 += a_9*b_6;
c_9_7 += a_9*b_7;
c_9_8 += a_9*b_8;
c_9_9 += a_9*b_9;
a_0 += 10;
a_1 += 10;
a_2 += 10;
a_3 += 10;
a_4 += 10;
a_5 += 10;
a_6 += 10;
a_7 += 10;
a_8 += 10;
a_9 += 10;
b_0 += 10;
b_1 += 10;
b_2 += 10;
b_3 += 10;
b_4 += 10;
b_5 += 10;
b_6 += 10;
b_7 += 10;
b_8 += 10;
b_9 += 10;
}
C[(A_start_index+0)*C_width + B_start_index+0] = c_0_0;
C[(A_start_index+0)*C_width + B_start_index+1] = c_0_1;
C[(A_start_index+0)*C_width + B_start_index+2] = c_0_2;
C[(A_start_index+0)*C_width + B_start_index+3] = c_0_3;
C[(A_start_index+0)*C_width + B_start_index+4] = c_0_4;
C[(A_start_index+0)*C_width + B_start_index+5] = c_0_5;
C[(A_start_index+0)*C_width + B_start_index+6] = c_0_6;
C[(A_start_index+0)*C_width + B_start_index+7] = c_0_7;
C[(A_start_index+0)*C_width + B_start_index+8] = c_0_8;
C[(A_start_index+0)*C_width + B_start_index+9] = c_0_9;
C[(A_start_index+1)*C_width + B_start_index+0] = c_1_0;
C[(A_start_index+1)*C_width + B_start_index+1] = c_1_1;
C[(A_start_index+1)*C_width + B_start_index+2] = c_1_2;
C[(A_start_index+1)*C_width + B_start_index+3] = c_1_3;
C[(A_start_index+1)*C_width + B_start_index+4] = c_1_4;
C[(A_start_index+1)*C_width + B_start_index+5] = c_1_5;
C[(A_start_index+1)*C_width + B_start_index+6] = c_1_6;
C[(A_start_index+1)*C_width + B_start_index+7] = c_1_7;
C[(A_start_index+1)*C_width + B_start_index+8] = c_1_8;
C[(A_start_index+1)*C_width + B_start_index+9] = c_1_9;
C[(A_start_index+2)*C_width + B_start_index+0] = c_2_0;
C[(A_start_index+2)*C_width + B_start_index+1] = c_2_1;
C[(A_start_index+2)*C_width + B_start_index+2] = c_2_2;
C[(A_start_index+2)*C_width + B_start_index+3] = c_2_3;
C[(A_start_index+2)*C_width + B_start_index+4] = c_2_4;
C[(A_start_index+2)*C_width + B_start_index+5] = c_2_5;
C[(A_start_index+2)*C_width + B_start_index+6] = c_2_6;
C[(A_start_index+2)*C_width + B_start_index+7] = c_2_7;
C[(A_start_index+2)*C_width + B_start_index+8] = c_2_8;
C[(A_start_index+2)*C_width + B_start_index+9] = c_2_9;
C[(A_start_index+3)*C_width + B_start_index+0] = c_3_0;
C[(A_start_index+3)*C_width + B_start_index+1] = c_3_1;
C[(A_start_index+3)*C_width + B_start_index+2] = c_3_2;
C[(A_start_index+3)*C_width + B_start_index+3] = c_3_3;
C[(A_start_index+3)*C_width + B_start_index+4] = c_3_4;
C[(A_start_index+3)*C_width + B_start_index+5] = c_3_5;
C[(A_start_index+3)*C_width + B_start_index+6] = c_3_6;
C[(A_start_index+3)*C_width + B_start_index+7] = c_3_7;
C[(A_start_index+3)*C_width + B_start_index+8] = c_3_8;
C[(A_start_index+3)*C_width + B_start_index+9] = c_3_9;
C[(A_start_index+4)*C_width + B_start_index+0] = c_4_0;
C[(A_start_index+4)*C_width + B_start_index+1] = c_4_1;
C[(A_start_index+4)*C_width + B_start_index+2] = c_4_2;
C[(A_start_index+4)*C_width + B_start_index+3] = c_4_3;
C[(A_start_index+4)*C_width + B_start_index+4] = c_4_4;
C[(A_start_index+4)*C_width + B_start_index+5] = c_4_5;
C[(A_start_index+4)*C_width + B_start_index+6] = c_4_6;
C[(A_start_index+4)*C_width + B_start_index+7] = c_4_7;
C[(A_start_index+4)*C_width + B_start_index+8] = c_4_8;
C[(A_start_index+4)*C_width + B_start_index+9] = c_4_9;
C[(A_start_index+5)*C_width + B_start_index+0] = c_5_0;
C[(A_start_index+5)*C_width + B_start_index+1] = c_5_1;
C[(A_start_index+5)*C_width + B_start_index+2] = c_5_2;
C[(A_start_index+5)*C_width + B_start_index+3] = c_5_3;
C[(A_start_index+5)*C_width + B_start_index+4] = c_5_4;
C[(A_start_index+5)*C_width + B_start_index+5] = c_5_5;
C[(A_start_index+5)*C_width + B_start_index+6] = c_5_6;
C[(A_start_index+5)*C_width + B_start_index+7] = c_5_7;
C[(A_start_index+5)*C_width + B_start_index+8] = c_5_8;
C[(A_start_index+5)*C_width + B_start_index+9] = c_5_9;
C[(A_start_index+6)*C_width + B_start_index+0] = c_6_0;
C[(A_start_index+6)*C_width + B_start_index+1] = c_6_1;
C[(A_start_index+6)*C_width + B_start_index+2] = c_6_2;
C[(A_start_index+6)*C_width + B_start_index+3] = c_6_3;
C[(A_start_index+6)*C_width + B_start_index+4] = c_6_4;
C[(A_start_index+6)*C_width + B_start_index+5] = c_6_5;
C[(A_start_index+6)*C_width + B_start_index+6] = c_6_6;
C[(A_start_index+6)*C_width + B_start_index+7] = c_6_7;
C[(A_start_index+6)*C_width + B_start_index+8] = c_6_8;
C[(A_start_index+6)*C_width + B_start_index+9] = c_6_9;
C[(A_start_index+7)*C_width + B_start_index+0] = c_7_0;
C[(A_start_index+7)*C_width + B_start_index+1] = c_7_1;
C[(A_start_index+7)*C_width + B_start_index+2] = c_7_2;
C[(A_start_index+7)*C_width + B_start_index+3] = c_7_3;
C[(A_start_index+7)*C_width + B_start_index+4] = c_7_4;
C[(A_start_index+7)*C_width + B_start_index+5] = c_7_5;
C[(A_start_index+7)*C_width + B_start_index+6] = c_7_6;
C[(A_start_index+7)*C_width + B_start_index+7] = c_7_7;
C[(A_start_index+7)*C_width + B_start_index+8] = c_7_8;
C[(A_start_index+7)*C_width + B_start_index+9] = c_7_9;
C[(A_start_index+8)*C_width + B_start_index+0] = c_8_0;
C[(A_start_index+8)*C_width + B_start_index+1] = c_8_1;
C[(A_start_index+8)*C_width + B_start_index+2] = c_8_2;
C[(A_start_index+8)*C_width + B_start_index+3] = c_8_3;
C[(A_start_index+8)*C_width + B_start_index+4] = c_8_4;
C[(A_start_index+8)*C_width + B_start_index+5] = c_8_5;
C[(A_start_index+8)*C_width + B_start_index+6] = c_8_6;
C[(A_start_index+8)*C_width + B_start_index+7] = c_8_7;
C[(A_start_index+8)*C_width + B_start_index+8] = c_8_8;
C[(A_start_index+8)*C_width + B_start_index+9] = c_8_9;
C[(A_start_index+9)*C_width + B_start_index+0] = c_9_0;
C[(A_start_index+9)*C_width + B_start_index+1] = c_9_1;
C[(A_start_index+9)*C_width + B_start_index+2] = c_9_2;
C[(A_start_index+9)*C_width + B_start_index+3] = c_9_3;
C[(A_start_index+9)*C_width + B_start_index+4] = c_9_4;
C[(A_start_index+9)*C_width + B_start_index+5] = c_9_5;
C[(A_start_index+9)*C_width + B_start_index+6] = c_9_6;
C[(A_start_index+9)*C_width + B_start_index+7] = c_9_7;
C[(A_start_index+9)*C_width + B_start_index+8] = c_9_8;
C[(A_start_index+9)*C_width + B_start_index+9] = c_9_9;
}
|
18,889 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
int M=0, N=0, M_final, N_final;
int J=0, K=0;
long pos;
double A[10000][10000], H[10][10];
/*--------------------------------
Reading from the input matrix file
=> A of size M_final x N_final
=> H of size J x K
---------------------------------*/
void file_read(FILE *file){
int i, j;
if (file == NULL) {
printf("Error: file is not provided.");
return;;
}
char c, c1;
int space, space1 ,flag=0 ;
for(i = 0; i < 1000000; i++){
space1 = space;
space = 0;
M++;
for(j = 0; j < 100000; j++){
c=fgetc(file);
//printf("%d\t", c);
if(c == 32){
space++;
}
else if (c == '\n'){
//printf("hello\n");
break;
}
}
if(space==0) break;
}
N = space1 + 1;
M = M-1;
printf("size of array A is %d x %d\n",M,N);
rewind(file);
M_final = M;
N_final = N;
for(i=0;i<M;i++){
for(j=0;j<N;j++){
fscanf(file,"%lf",&A[i][j]);
}
}
pos = ftell(file);
rewind(file);
M=0;
for(i = 0; i < 1000000; i++){
if(space==0) M=0;
space1 = space;
space = 0;
M++;
for(j = 0; j < 100000; j++){
c1=c;
c=fgetc(file);
//printf("%d\t", c);
if(c == 32){
space++;
}
else if(c == '\n'){
break;
}
else if(c == EOF){
flag = 1;
break;
}
}
//printf("\n");
//printf("line = %d , space = %d\n",M, space);
if(flag == 1){
break;
}
}
K = space1+1;
if(c1=='\n'){
J = M-1;
}
else{
J = M;
}
printf("size of array H is %d x %d\n",J,K);
fseek(file, pos, SEEK_SET);
for(i=0;i<J;i++){
for(j=0;j<K;j++){
fscanf(file,"%lf",&H[i][j]);
}
}
rewind(file);
}
__global__ void convolution(double *d_A, double *d_C, int size_c, int J, int K, int M_final, int N_final)
{
int tidx = blockIdx.x*blockDim.x+threadIdx.x;
int tidy = blockIdx.y*blockDim.y+threadIdx.y;
int j,k;
double sum;
__shared__ double shared_H[100];
if(tidy%32<J && tidx%32<K){
shared_H[(tidy%32)*K+(tidx%32)] = d_A[(M_final*N_final)+(tidy%32)*K+(tidx%32)];
}
__syncthreads();
if (tidy<(M_final+J-1) && tidx<(N_final+K-1)){
sum=0;
for(j=0;j<J;j++){
for(k=0;k<K;k++){
if(!((tidy-j)<0 || (tidx-k)<0 || (tidy-j)>=M_final || (tidx-k)>=N_final)){
sum += d_A[((tidy-j)*N_final)+(tidx-k)]*shared_H[j*K+k];
}
}
}
d_C[tidy*(N_final+K-1)+tidx]=sum;
}
}
int main(int argc, char** argv){
char *inputfile;
double *d_A = NULL, *d_C = NULL;
cudaError_t err = cudaSuccess;
inputfile = argv[1];
int m;
FILE *fp = fopen(inputfile, "r");
file_read(fp);
int size_c = (M_final+J-1)*(N_final+K-1);
size_t size_A = ((M_final*N_final)+(J*K)) * sizeof(double);
size_t size_C = (M_final+J-1) * (N_final+K-1) * sizeof(double);
double *h_A, *h_C;
h_A = (double*) malloc (((M_final*N_final)+(J*K))*sizeof(double));
h_C = (double*) malloc ((M_final+J-1)*(N_final+K-1)*sizeof(double));
printf("\nh_A: %dx%d\n",M_final,N_final);
for(int i=0;i<M_final;i++){
for(int j=0;j<N_final;j++){
h_A[i*N_final+j]=A[i][j];
}
}
printf("\nh_H: %dx%d\n",J,K);
for(int i=0;i<J;i++){
for(int j=0;j<K;j++){
h_A[(M_final*N_final)+i*K+j]=H[i][j];
}
}
printf("Allocating memory on the CUDA device\n");
err = cudaMalloc((void **)&d_A, size_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_C, size_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Size of C = %dx%d\n",M_final+J-1,N_final+K-1);
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A,h_A,size_A,cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to Copy device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
dim3 Grid_Size(((N_final+K-2)/32)+1,((M_final+J-2)/32)+1,1);
dim3 Block_Size(32,32,1);
printf("No. of Blocks Launched = %dx%d\n",((N_final+K-2)/32)+1,((M_final+J-2)/32)+1);
convolution<<<Grid_Size,Block_Size>>>(d_A,d_C,size_c,J,K,M_final,N_final);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code: %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaDeviceSynchronize();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize the device (error code: %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(h_C,d_C,size_C,cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to Copy the result back (error code: %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Copied!!\n");
for(m=0;m<size_c;m++){
printf("C[%d] = %.3lf\n",m,h_C[m]);
}
cudaFree(d_A);
cudaFree(d_C);
free(h_A);
free(h_C);
cudaDeviceReset();
printf("-----------------Done and Dusted----------------\n");
fclose(fp);
return 0;
} |
18,890 | #include "includes.h"
__global__ void square_array(float *a, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N) a[idx] = a[idx] * a[idx];
} |
18,891 | ////3.5ܹ֧֡˺ú˺
//#include <stdio.h>
//
//
//
//__global__ void childKernel(int i)
//{
// int tid = blockIdx.x*blockDim.x+threadIdx.x;
// printf("parent:%d,child:%d\n",i,tid);
// for(int j=i;j<i+10;j++)
// {
// printf(",%d",j);
// }
// printf("\n");
//}
//
//__global__ void kernel()
//{
//
// int tid = blockIdx.x*blockDim.x+threadIdx.x;
// childKernel<<<1,2>>>(tid);
//}
//
//int main()
//{
//
// kernel<<<1,1>>>();
// cudaDeviceSynchronize();
//
//return 0;
//}
|
18,892 | #include <stdio.h>
#define BLOCK_SIZE 512
__global__ void spmv_csr_kernel(unsigned int dim, unsigned int *csrRowPtr,
unsigned int *csrColIdx, float *csrData, float *inVector,
float *outVector) {
// INSERT KERNEL CODE HERE
int row = blockDim.x*blockIdx.x+threadIdx.x;
if (row < dim){
float dot = 0;
int row_start = csrRowPtr[row];
int row_end = csrRowPtr[row+1];
for (int jj = row_start; jj < row_end; jj++)
dot += csrData[jj]*inVector[csrColIdx[jj]];
outVector[row] += dot;
}
}
__global__ void spmv_jds_kernel(unsigned int dim, unsigned int *jdsRowPerm,
unsigned int *jdsRowNNZ, unsigned int *jdsColStartIdx,
unsigned int *jdsColIdx, float *jdsData, float* inVector,
float *outVector) {
//INSERT KERNEL CODE HERE
int row = blockDim.x*blockIdx.x+threadIdx.x;
if (row < dim){
float temp = 0.0;
for (int jj = 0; jj < jdsRowNNZ[row]; jj++){
unsigned int idx = row + jdsColStartIdx[jj];
temp += jdsData[idx]*inVector[jdsColIdx[idx]];
}
outVector[jdsRowPerm[row]] = temp;
}
}
void spmv_csr(unsigned int dim, unsigned int *csrRowPtr, unsigned int *csrColIdx,
float *csrData, float *inVector, float *outVector) {
// INSERT CODE HERE
spmv_csr_kernel<<<((dim+ BLOCK_SIZE - 1)/ BLOCK_SIZE), BLOCK_SIZE>>>(dim, csrRowPtr, csrColIdx, csrData, inVector, outVector);
}
void spmv_jds(unsigned int dim, unsigned int *jdsRowPerm, unsigned int *jdsRowNNZ,
unsigned int *jdsColStartIdx, unsigned int *jdsColIdx, float *jdsData,
float* inVector, float *outVector) {
// INSERT CODE HERE
spmv_jds_kernel<<<((dim+ BLOCK_SIZE - 1)/ BLOCK_SIZE), BLOCK_SIZE>>>(dim, jdsRowPerm, jdsRowNNZ, jdsColStartIdx, jdsColIdx, jdsData, inVector, outVector);
}
|
18,893 | #include <cuda.h>
#include <stdio.h>
#include <time.h>
#define SIZE 10
__global__ void max(int *a , int *c) // kernel function definition
{
int i = threadIdx.x; // initialize i to thread ID
*c = a[0];
//printf("a[i] is %d \n",a[i]);
atomicMin(c,a[i]);
//printf("max is %d \n",*c);
}
int main()
{
int i;
srand(time(NULL)); //makes use of the computer's internal clock to control the choice of the seed
int a[10]={2,41,21,74,86,45,92,35,49,50};
int c;
int *dev_a, *dev_c; //GPU / device parameters
cudaMalloc((void **) &dev_a, SIZE*sizeof(int)); //assign memory to parameters on GPU
cudaMalloc((void **) &dev_c, SIZE*sizeof(int));
for( i = 0 ; i < SIZE ; i++)
{
a[i] = i; // rand()% 1000 + 1; // input the numbers
//printf("%d ",a[i]);
}
cudaMemcpy(dev_c, &c, sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_a, a, SIZE*sizeof(int),cudaMemcpyHostToDevice); //copy the array from CPU to GPU
max<<<1,SIZE>>>(dev_a,dev_c); // call kernel function <<<number of blocks, number of threads
cudaMemcpy(&c, dev_c, sizeof(int),cudaMemcpyDeviceToHost); // copy the result back from GPU to CPU
printf("\nmax = %d ",c);
cudaFree(dev_a); // Free the allocated memory
cudaFree(dev_c);
printf("");
return 0;
}
|
18,894 | #include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#define THREAD_PER_BLOCK 16 // on fixe le nombre de colonnes à 16
#define COLUMNS 16
//fct gpu
__global__ void multiplication_matrix_GPU(int *a, int *b, int*c)
{
int idx = blockIdx.x * THREAD_PER_BLOCK + threadIdx.x;
int sum = 0;
__shared__ int bs[16]; // définition de la mémoire partagée
bs[threadIdx.x]= b[threadIdx.x];
//for(int j = 0; j<COLUMNS;++j,++a,++b)
// sum += (a[idx*COLUMNS+j])*(b[j]);
sum+= (a[idx*COLUMNS+0])*(bs[0]);
sum+= (a[idx*COLUMNS+1])*(bs[1]);
sum+= (a[idx*COLUMNS+2])*(bs[2]);
sum+= (a[idx*COLUMNS+3])*(bs[3]);
sum+= (a[idx*COLUMNS+4])*(bs[4]);
sum+= (a[idx*COLUMNS+5])*(bs[5]);
sum+= (a[idx*COLUMNS+6])*(bs[6]);
sum+= (a[idx*COLUMNS+7])*(bs[7]);
sum+= (a[idx*COLUMNS+8])*(bs[8]);
sum+= (a[idx*COLUMNS+9])*(bs[9]);
sum+= (a[idx*COLUMNS+10])*(bs[10]);
sum+= (a[idx*COLUMNS+11])*(bs[11]);
sum+= (a[idx*COLUMNS+12])*(bs[12]);
sum+= (a[idx*COLUMNS+13])*(bs[13]);
sum+= (a[idx*COLUMNS+14])*(bs[14]);
sum+= (a[idx*COLUMNS+15])*(bs[15]);
c[idx]=sum;
__syncthreads();
}
int main(int agrc, char * argv[])
{
unsigned int rows = atoi(argv[1]), i, j; // il y a un malloc contenant ligne et colonnes --> Matrice A et un malloc contenant que colonne -> vecteur B
int * a_h = (int *) malloc(rows * COLUMNS * sizeof(int)), * b_h = (int *) malloc(COLUMNS * sizeof(int)), * c1_h = (int *) malloc(rows * sizeof(int)), * c2_h = (int *) malloc(rows * sizeof(int));
int *a_d, *b_d, *c_d;
//allocation sur GPU
cudaSetDevice (0);
cudaMalloc ((void**) &a_d , rows * COLUMNS * sizeof(int));
cudaMalloc ((void**) &b_d , COLUMNS * sizeof(int));
cudaMalloc ((void**) &c_d , rows * sizeof(int));
//copie vers GPU
cudaMemcpy (a_d , a_h , rows * COLUMNS *sizeof(int), cudaMemcpyHostToDevice ); // on copie les données du CPU vers le GPU
cudaMemcpy (b_d , b_h , COLUMNS * sizeof(int), cudaMemcpyHostToDevice );
unsigned long long ref1, ref2;
unsigned long long diffH = 0, diffD = 0;
struct timeval tim;
//remplissage de la matrice
for(i=0;i<COLUMNS*rows;++i){
if(i<COLUMNS){
b_h[i] = i+1;
}
a_h[i] = rand()%(COLUMNS*rows);
}
//multiplication sur CPU
gettimeofday(&tim, NULL);
ref1 = tim.tv_sec * 1000000L + tim.tv_usec;
int * a = a_h, *b, *c=c1_h;
for(i = 0; i<rows; ++i){
c1_h[i] = 0;
for(j = 0; j<COLUMNS;++j,++a,++b)
c1_h[i] += (a_h[i*COLUMNS+j])*(b_h[j]);
}
gettimeofday(&tim, NULL);
ref2 = tim.tv_sec * 1000000L + tim.tv_usec;
diffH+=ref2-ref1; // différence des timing
//multiplication sur GPU
gettimeofday(&tim, NULL);
ref1 = tim.tv_sec * 1000000L + tim.tv_usec;
// EXECUTION GPU c'est ici que nous allons travailler
int blocks = rows/THREAD_PER_BLOCK;
multiplication_matrix_GPU<<<blocks,THREAD_PER_BLOCK>>>(a_d, b_d, c_d);
gettimeofday(&tim, NULL);
ref2 = tim.tv_sec * 1000000L + tim.tv_usec;
diffD+=ref2-ref1;
cudaMemcpy(c2_h , c_d , rows * sizeof(int), cudaMemcpyDeviceToHost);
//vérification des résultats et nettoyage
int ok = 1;
for(i=0;i<10;++i)
if(c1_h[i]!=c2_h[i]){
//ok = 0;
printf("Différence : %d != %d\n", c1_h[i], c2_h[i]);
}
if(ok)
printf("Temps de calcul, CPU [%llu usec] GPU [%llu usec] \n", diffH, diffD);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
free(a_h);
free(b_h);
free(c1_h);
free(c2_h);
}
|
18,895 |
#include <cuda.h>
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define N 1920;
#define M 1080;
#define CHANNELS 3;
void colorTogrey(int *, int *,int,int,int);
// we have 3 channels corresponding to RGB
// The input image is encoded as unsigned characters [0, 255]
__global__
void colorToGreyscaleConvertion(int *Pin_d, int *Pout_d,
int width, int height) {
int Col = threadIdx.x + blockIdx.x * blockDim.x;
int Row = threadIdx.y + blockIdx.y * blockDim.y;
if (Col < (width) && Row < height) {
// get 1D coordinate for the grayscale image
int greyOffset = Row*width + Col;
// one can think of the RGB image having
// CHANNEL times columns of the gray scale image
int rgbOffset = greyOffset*3;
unsigned int r = Pin_d[rgbOffset ]; // red value for pixel
unsigned int g = Pin_d[rgbOffset + 1]; // green value for pixel
unsigned int b = Pin_d[rgbOffset + 2]; // blue value for pixel
// perform the rescaling and store it
// We multiply by floating point constants
Pout_d[greyOffset] = 0.21f*r + 0.72f*g + 0.07f*b;
}
}
int main()
{
int n=N;int m=M; int c=CHANNELS;
int *Pin_h = (int*) malloc( sizeof(int)*n*m*c);
int ind=0;
unsigned int tmp;
FILE *fp;
fp=fopen("test_image_RGB.txt","r");
while (!feof(fp)){
fscanf(fp,"%d",&tmp);
Pin_h[ind]=tmp;
ind=ind+1;
}
fclose(fp);
int *Pout_h = (int*) malloc( sizeof(int)*n*m);
colorTogrey ( Pin_h, Pout_h, n, m, c);
FILE *fp3;
fp3=fopen("testImage_Results_RGB.txt","w");
for (int i=0; i < m; i++){
for (int j=0; j < n; j++){
fprintf(fp3,"%4d ",Pout_h[i*n+j]);}
fprintf(fp3,"\n");
}
fclose(fp3);
// free the memory we allocated on the CPU
free( Pin_h);
free( Pout_h );
return 0;
}
void colorTogrey(int *Pin_h, int *Pout_h, int n, int m, int c)
{
int size_in = (n *m* c*sizeof(int)); int size_out = (n*m*sizeof(int));
int *Pin_d; int *Pout_d;
// Transfer Pin_h to device memory
cudaMalloc((void **) &Pin_d, size_in);
cudaMemcpy(Pin_d, Pin_h, size_in, cudaMemcpyHostToDevice);
// Allocate device memory for Pout_d
cudaMalloc((void **) &Pout_d, size_out);
dim3 dimGrid(ceil(n/16), ceil(m/16), 1);
dim3 dimBlock(16,16,1);
colorToGreyscaleConvertion<<<dimGrid,dimBlock>>>(Pin_d, Pout_d, n, m);
// Transfer Pout_d from device to host
cudaMemcpy(Pout_h, Pout_d, size_out, cudaMemcpyDeviceToHost);
// Free device memory for A_d, B_d, C_d
cudaFree(Pin_d); cudaFree(Pout_d);
}
|
18,896 | #include "includes.h"
__constant__ float *c_Kernel;
__global__ void convolutionRowsKernel_v1( float *d_Dst, float *d_Src, int imageW, int filter_Rad, int Halo_steps )
{
extern __shared__ float s_Data[];
//Offset to the left halo edge
const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - Halo_steps) * ROWS_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y;
d_Src += baseY * imageW + baseX;
d_Dst += baseY * imageW + baseX;
//Load main data
/*#pragma unroll
for (int i = Halo_steps; i < Halo_steps + ROWS_RESULT_STEPS; i++)
{
s_Data[(threadIdx.y*(ROWS_RESULT_STEPS + 2 * Halo_steps)*ROWS_BLOCKDIM_X) + threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X];
}*/
//Load left halo
#pragma unroll
for (int i = 0; i < Halo_steps; i++)
{
s_Data[(threadIdx.y*(ROWS_RESULT_STEPS + 2 * Halo_steps)*ROWS_BLOCKDIM_X) + threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX + i * ROWS_BLOCKDIM_X >= 0) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Load right halo and main data
#pragma unroll
for (int i = Halo_steps; i < Halo_steps + ROWS_RESULT_STEPS + Halo_steps; i++)
{
s_Data[(threadIdx.y*(ROWS_RESULT_STEPS + 2 * Halo_steps)*ROWS_BLOCKDIM_X) + threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX + i * ROWS_BLOCKDIM_X < imageW) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = Halo_steps; i < Halo_steps + ROWS_RESULT_STEPS; i++)
{
float sum = 0;
if (baseX + i * ROWS_BLOCKDIM_X < imageW)
{
#pragma unroll
for (int j = -filter_Rad; j <= filter_Rad; j++)
{
sum += c_Kernel[filter_Rad - j] * s_Data[(threadIdx.y*(ROWS_RESULT_STEPS + 2 * Halo_steps)*ROWS_BLOCKDIM_X) + threadIdx.x + i * ROWS_BLOCKDIM_X + j];
}
d_Dst[i * ROWS_BLOCKDIM_X] = sum;
}
}
} |
18,897 | __global__
void neg_kernel(int n, const float *x, float *z)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) z[i] = -x[i];
}
void neg(int n, const float *x, float *z) {
neg_kernel<<<(n+255)/256, 256>>>(n, x, z);
}
|
18,898 | #include <cuda.h>
#include <iostream>
using namespace std;
/* 2D thread block version of addOne kernel
*/
__global__ void addOne(double *data) {
int b = blockIdx.x;
int tx = threadIdx.x;
int ty = threadIdx.y;
// 2D threads are mapped to 1D memory
int i = b * (blockDim.x * blockDim.y) + (ty * blockDim.x + tx);
data[i]++;
}
int main() {
int n = 2048;
double *data = (double*) malloc(n * sizeof(double));
for (int i=0; i<n; i++) {
data[i] = (double)i;
}
double *data_dev;
cudaMalloc((void**) &data_dev, n * sizeof(double));
cudaMemcpy(data_dev, data, n * sizeof(double) , cudaMemcpyHostToDevice);
dim3 nBlocks(32,1);
dim3 nThreads(16,16,1);
addOne <<< nBlocks, nThreads >>> (data_dev);
cudaMemcpy(data, data_dev, n * sizeof(double) , cudaMemcpyDeviceToHost);
cudaFree(data_dev);
cout << "data[n-1] = " << data[n-1] << endl;
free(data);
}
|
18,899 | #include "kronmult.cuh"
#include <device_launch_parameters.h>
#include <type_traits>
/*
* computes number^power for integers
* does not care about performances
* does not use std::pow as it does an implicit float conversion
* that could lead to rounding errors for large numbers
*/
__host__ int pow_int(int const number, int const power)
{
if (power == 0) return 1;
return number * pow_int(number, power - 1);
}
/*
* converts row and col indices into a single index for a matrix stored in col-major
* `stride` is usually the number of rows of the matrix
*/
__device__ __forceinline__ constexpr int colmajor(int const row, int const col, int const stride)
{
return row + col * stride;
}
/*
* computes output = input^T
*
* `input` is a `matrix_size` by `matrix_size` square matrix of stride `input_stride`
* `output` is a `matrix_size` by `matrix_size` square matrix of stride `matrix_size`
*
* WARNING: the matrices are assumed to be stored in col-major order
*/
template<typename T>
__device__ void transpose(T const input[], T output[], int const matrix_size, int const input_stride)
{
for (int i = threadIdx.x; i < matrix_size * matrix_size; i += blockDim.x)
{
int const c = i / matrix_size;
int const r = i - c * matrix_size;
output[colmajor(r, c, matrix_size)] = input[colmajor(c, r, input_stride)];
}
}
/*
* Computes Y = X^T * M^T
*
* X is a `size_M` by `nb_col_X` matrix of stride `size_M`
* M_transposed is a `size_M` by `size_M` matrix of stride `size_M` that contains a precomputed M^T
* Y is a `nb_col_X` by `size_M` matrix of stride `nb_col_X`
*
* WARNING: the matrices are assumed to be stored in col-major order
*/
template<typename T>
__device__ void
multiply_transpose(T const X[], int const nb_col_X, T const M_transposed[], int const size_M, T Y[])
{
// strided loop, each thread threadIdx.x manages the inputs i such that i % threadIdx.x==0
for (int i = threadIdx.x; i < nb_col_X * size_M; i += blockDim.x)
{
// extracts the column and row number for the current thread
int const colX = i / size_M;
int const rowM = i - colX * size_M;
// computes the dot product to fill the [colX,rowM] cell of the matrix
T dotprod = 0.;
for (int k = 0; k < size_M; k++)
{
dotprod += X[colmajor(k, colX, size_M)] * M_transposed[colmajor(k, rowM, size_M)];
}
// this sync is there to synchronise the threads for significantly improved performance in float
// it does not impact correctness
if constexpr(std::is_same<float, T>::value) __syncthreads();
Y[colmajor(colX, rowM, nb_col_X)] = dotprod;
}
}
/*
* Computes output += kron(matrix_list) * input while insuring that the addition to output is thread-safe
*
* `matrix_list` is an array containing pointers to `matrix_number` square matrices of size `matrix_size` by
* `matrix_size` and stride `matrix_stride` `input` is a `size_input` (`matrix_size`^`matrix_number`) elements
* vector `output` is a `size_input` elements vector, to which the output of the multiplication will be added
* `workspace` is a `size_input` elements vector, to be used as workspace
* `transpose_workspace` is a vector of size `matrix_size`*`matrix_size` to store transposed matrices
* temporarily
*
* WARNINGS:
* - `input`, `workspace` and `transpose_workspace` will be used as temporary workspaces and thus modified
* - the matrices are assumed to be stored in col-major order
* - the sizes are assumed to be correct
*/
template<typename T>
__device__ void cuda_kronmult(int const matrix_count, int const matrix_size, T const *const matrix_list[],
int const matrix_stride, T input[], int const size_input, T output[],
T workspace[], T transpose_workspace[])
{
// how many column should `input` have for the multiplications to be legal
int const nb_col_input = size_input / matrix_size;
// iterates on the matrices from last to first
for (int i = matrix_count - 1; i >= 0; i--)
{
// transpose the matrix to get a better memory coalescing
T const *const matrix = matrix_list[i];
transpose(matrix, transpose_workspace, matrix_size, matrix_stride);
__syncthreads();
// performs the multiplication to consume the matrix
multiply_transpose<T>(input, nb_col_input, transpose_workspace, matrix_size, workspace);
__syncthreads();
// swap `input` and `workspace` such that `input` contains once again the input
// note that, while they have the same size flattened, the shape (nb_columns and nb_rows) of `input`
// and `workspace` *are* different this is on purpose and equivalent to a reshape operation that is
// actually needed by the algorithm
T *temp = input;
input = workspace;
workspace = temp;
}
// adds result to output in a thread-safe way
// strided loop, each thread threadIdx.x manages the input i such that i % threadIdx.x==0
for (int i = threadIdx.x; i < size_input; i += blockDim.x)
{
atomicAdd(&output[i], input[i]);
}
}
/*
* each block gets a single batch element to process
*
* computes the current batch element
* finds the corresponding inputs
* and calls kronmult on them
*/
template<typename T>
__global__ void cuda_kronmult_batchelement(int const matrix_count, int const matrix_size,
T const *const matrix_list_batched[], int const matrix_stride,
T *input_batched[], int const size_input, T *output_batched[],
T *workspace_batched[], int const nb_batch)
{
// each block corresponds to a single batch element
int const batchId = blockIdx.x;
// gets the inputs for a given batch element
T const *const *matrix_list = &matrix_list_batched[batchId * matrix_count];
T *input = input_batched[batchId];
T *output = output_batched[batchId];
T *workspace = workspace_batched[batchId];
// uses a thread to allocates the transpose workspace
// in shared memory for improved performances
__shared__ T *transpose_workspace;
if (threadIdx.x == 0) transpose_workspace = new T[matrix_size * matrix_size];
__syncthreads();
// does the kronmult computations
cuda_kronmult<T>(matrix_count, matrix_size, matrix_list, matrix_stride,
input, size_input, output,
workspace, transpose_workspace);
// frees the transpose workspace memory
__syncthreads();
if (threadIdx.x == 0) delete[] transpose_workspace;
}
/*
* calls the cuda kernel with the proper number of blocks and threads
* we expect the inputs to already be on the GPU
*/
template<typename T>
__host__ cudaError cuda_kronmult_batched(int const matrix_count, int const matrix_size,
T const *const matrix_list_batched[], int const matrix_stride,
T *input_batched[], T *output_batched[], T *workspace_batched[],
int const nb_batch)
{
// numbers of elements in the input vector
int const size_input = pow_int(matrix_size, matrix_count);
// each block will take care of a single batch element
// the threads within a block will loop over input_size
int deviceId;
cudaGetDevice(&deviceId);
int threadsPerBlock;
cudaDeviceGetAttribute(&threadsPerBlock, cudaDevAttrMaxThreadsPerBlock, deviceId);
if (size_input < threadsPerBlock) threadsPerBlock = size_input;
// parallelize over batch elements
cuda_kronmult_batchelement<<<nb_batch, threadsPerBlock>>>(matrix_count, matrix_size, matrix_list_batched,
matrix_stride, input_batched, size_input,
output_batched, workspace_batched, nb_batch);
// waits for kernel to finish and returns the error code
return cudaDeviceSynchronize();
}
/*
* double specialization of kronmult_batched
*/
template<>
__host__ cudaError kronmult_batched<double>(int const matrix_count, int const matrix_size,
double const *const matrix_list_batched[],
int const matrix_stride, double *input_batched[],
double *output_batched[], double *workspace_batched[],
int const nb_batch)
{
return cuda_kronmult_batched(matrix_count, matrix_size, matrix_list_batched, matrix_stride, input_batched,
output_batched, workspace_batched, nb_batch);
}
/*
* float specialization of kronmult_batched
*/
template<>
__host__ cudaError kronmult_batched<float>(int const matrix_count, int const matrix_size,
float const *const matrix_list_batched[], int const matrix_stride,
float *input_batched[], float *output_batched[],
float *workspace_batched[], int const nb_batch)
{
return cuda_kronmult_batched(matrix_count, matrix_size, matrix_list_batched, matrix_stride, input_batched,
output_batched, workspace_batched, nb_batch);
}
|
18,900 | #include <stdio.h>
__global__ void kernel(int *num1, int *num2, int *result) {
*result = *num1 + *num2;
}
int main(void) {
// host copies
int num1, num2, result;
// device copies
int *p_num1, *p_num2, *p_result;
// allocate space on device
cudaMalloc(&p_num1, sizeof(int));
cudaMalloc(&p_num2, sizeof(int));
cudaMalloc(&p_result, sizeof(int));
// pick numbers to add
num1 = 4;
num2 = 5;
// copy to device
cudaMemcpy(p_num1, &num1, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(p_num2, &num2, sizeof(int), cudaMemcpyHostToDevice);
// start
kernel<<<1,1>>>(p_num1, p_num2, p_result);
// copy from device
cudaMemcpy(&result, p_result, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d\n", result);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.