serial_no
int64 1
24.2k
| cuda_source
stringlengths 11
9.01M
|
|---|---|
1
|
#include "includes.h"
__global__ void vecProductKernel(float *d_z, const float *d_x, const float *d_y, unsigned int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
d_z[idx] = d_x[idx] * d_y[idx];
}
}
|
2
|
#include "includes.h"
__global__ void STREAM_Triad_double(double *a, double *b, double *c, double scalar, size_t len)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < len) {
c[idx] = a[idx]+scalar*b[idx];
idx += blockDim.x * gridDim.x;
}
}
|
3
|
#include <iostream>
#include "sys/time.h"
using namespace std;
double timeInSeconds (timeval& starttime, timeval& stopstime) {
return 1e-6*(1e6*(stopstime.tv_sec - starttime.tv_sec) + (stopstime.tv_usec - starttime.tv_usec));
}
__device__ double* dev_vector1 = 0;
__device__ double* dev_vector2 = 0;
__device__ double* dev_results = 0;
__global__ void device_vector_mult () {
// IMPLEMENT ME 6: Multiply the threadIdx.x element of dev_vector1 by the
// corresponding element of dev_vector2, and store in dev_results.
}
int main (int argc, char** argv) {
int sizeOfVector = 100;
if (argc > 1) sizeOfVector = atoi(argv[1]);
// Declare and fill host-side arrays of doubles.
double* vector1 = new double[sizeOfVector];
double* vector2 = new double[sizeOfVector];
double* results = new double[sizeOfVector];
srand(42);
for (int i = 0; i < sizeOfVector; ++i) {
vector1[i] = rand() % 100;
vector2[i] = rand() % 100;
results[i] = 0;
}
timeval startTime;
timeval interTime;
timeval stopsTime;
gettimeofday(&startTime, NULL);
// Use the CPU for this part.
// IMPLEMENT ME 1: Multiply each element of vector1 by the corresponding
// element in vector2 and store in results.
for (int i = 0; i < sizeOfVector; ++i) {
results[i] = vector1[i] * vector2[i];
}
gettimeofday(&interTime, NULL);
double total = 0;
// IMPLEMENT ME 2: Sum the results array and store the sum in total.
for (int i = 0; i < sizeOfVector; +i) {
total += results[i];
}
gettimeofday(&stopsTime, NULL);
cout << "Dot product is : " << total << endl;
// IMPLEMENT ME 3: Time the above operations together and separately
// using 'gettimeofday'.
cout << "Time for multiplication (seconds): " << timeInSeconds(startTime, interTime) << endl;
cout << "Time for addition (seconds): " << timeInSeconds(interTime, stopsTime) << endl;
cout << "Overall time (seconds): " << timeInSeconds(startTime, stopsTime) << endl;
// Now on to the GPU!
// IMPLEMENT ME 4: Use cudaMalloc to allocate space for the three device vectors.
// IMPLEMENT ME 5: Use cudaMemcpy to initialise dev_vector1 and dev_vector2 to have
// the same content as the host-side arrays.
// IMPLEMENT ME 6: Put in the function body for device_vector_mult, above.
// IMPLEMENT ME 7: Launch a kernel that runs device_vector_mult.
// IMPLEMENT ME 8: Use cudaMemcpy to copy back dev_results into results.
// IMPLEMENT ME 9: Calculate the dot product by summing over results, same
// as above.
// IMPLEMENT ME 10: Take the time for the kernel launch and the addition,
// and print out the results (including the dot product) as you did for the CPU.
// IMPLEMENT ME 11: Write a reduction kernel that sums over dev_results, and launch it.
// Time this operation and compare with the code that first moves the transformed data
// to the host, then sums over it.
return 0;
}
|
4
|
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#define TILE_SIZE 10
__global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) {
/********************************************************************
*
* Compute C = A x B
* where A is a (m x k) matrix
* where B is a (k x n) matrix
* where C is a (m x n) matrix
*
* Use shared memory for tiling
*
********************************************************************/
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int right_boundary = k*TILE_SIZE*by + k;
float Sum = 0;
for (int a=k*TILE_SIZE*by, b=bx*TILE_SIZE; a<right_boundary; a+=TILE_SIZE,b+=(TILE_SIZE*n))
{
__shared__ float Acache[TILE_SIZE][TILE_SIZE];
__shared__ float Bcache[TILE_SIZE][TILE_SIZE];
Acache[ty][tx] = A[a + k * ty + tx];
Bcache[ty][tx] = B[b + n * ty + tx];
__syncthreads();
for (int i=0; i<TILE_SIZE; i++) {
Sum += Acache[ty][i] * Bcache[i][tx];
}
__syncthreads();
}
// INSERT KERNEL CODE HERE
int c = n * TILE_SIZE * by + TILE_SIZE * bx;
C[c + n * ty + tx] = Sum;
}
void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc)
{
if ((transa != 'N') && (transa != 'n')) {
printf("unsupported value of 'transa'\n");
return;
}
if ((transb != 'N') && (transb != 'n')) {
printf("unsupported value of 'transb'\n");
return;
}
if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) {
printf("unsupported value of alpha\n");
return;
}
if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) {
printf("unsupported value of beta\n");
return;
}
// Initialize thread block and kernel grid dimensions ---------------------
const unsigned int BLOCK_SIZE = TILE_SIZE;
//INSERT CODE HERE
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(n / dimBlock.x, m / dimBlock.y);
mysgemm<<<dimGrid, dimBlock>>>(m, n, k, A, B, C);
// Invoke CUDA kernel -----------------------------------------------------
}
|
5
|
///*
// * LinearSysSolver.cpp
// *
// * Created on: Jul 8, 2013
// * Author: adm85
// */
//
//#include <vector>
//#include <iostream>
//#include <time.h>
//#include "LinearSysSolver.h"
//#include "cublas_v2.h"
//#include "cula.h"
//
//
//LinearSysSolver::LinearSysSolver()
//{
// // TODO Auto-generated constructor stub
//
//}
//
//LinearSysSolver::~LinearSysSolver()
//{
// // TODO Auto-generated destructor stub
//}
//
///**
// * Solves A*x=B for x. The result is stored in the vector pointed to by B.
// */
//void LinearSysSolver::solveSystem(cuComplex* A, int M_A, int N_A, cuComplex* B, int M_B, int N_B) {
// //Get the LU Factorization
// cuComplex* LUMat = new cuComplex[M_A*N_A];
// int ipivLength = N_A;
// int* ipiv = new int[ipivLength];
// getLUDecomposition(A, M_A, N_A, LUMat, ipiv, ipivLength);
//
// //Calculate P*b
// swapPivotRows(B, M_B, N_B, ipiv, ipivLength);
//
// //Solve the system. The result will be stored in B
// cublasSolveLinearSystem(LUMat, M_A, N_A, B, M_B, N_B);
//
// // DEBUG CODE -------
// //cuComplex* test = multiplyMatrices(xTxInv, N, N, xTx, N, N);
// cuComplex* test = multiplyMatrices(A, M_A, N_A, B, M_B, N_B);
// cout << endl << "X * XInv" << endl;
// columnMajorPrintArray(test, M_A, N_B);
// delete [] test;
// // END DEBUG CODE ---
//
// delete [] LUMat;
// delete [] ipiv;
//}
//
//
///**
// * Uses the CULA library to get the LU decomposition of the matrix.
// */
//void LinearSysSolver::getLUDecomposition(cuComplex* x, int M, int N, cuComplex* LUMat, int* ipiv, int ipivLength) {
//
// culaDeviceFloatComplex* devxTx;
// culaDeviceInt* devIPIV;
//
// cudaMalloc(&devxTx, M*N*sizeof(culaDeviceFloatComplex));
// cudaMalloc(&devIPIV, ipivLength*sizeof(culaDeviceInt));
// cudaMemcpy(devxTx, x, M*N*sizeof(culaDeviceFloatComplex), cudaMemcpyHostToDevice);
//
// culaStatus culaStat;
// culaInitialize();
//
// culaStat = culaDeviceCgetrf(M, N, devxTx, M, devIPIV);
// if(culaStat != culaNoError) {
// cout << "Cula Cgetrf failure" << endl;
// }
//
// culaShutdown();
//
// //LUMat = new cuComplex[M*N];
// cudaMemcpy(LUMat, devxTx, M*N*sizeof(culaDeviceFloatComplex), cudaMemcpyDeviceToHost);
// cudaMemcpy(ipiv, devIPIV, ipivLength*sizeof(culaDeviceInt), cudaMemcpyDeviceToHost);
//
//// getL(L, LUMat, M, N);
////
// cout << "LUMat Inside:" << endl;
// columnMajorPrintArray(LUMat, M, N);
////
//// getU(U, LUMat, M, N);
//// cout << endl << "U" << endl;
//// columnMajorPrintArray(U, M, N);
//
// cudaFree(devxTx);
// cudaFree(devIPIV);
//}
//
///**
// * Using the information from the CULA generated IPIF array,
// * this function swaps rows as appropriate.
// */
//void LinearSysSolver::swapPivotRows(cuComplex* x, int M, int N, int* ipiv, int ipivLength) {
// //Temporary row vector
// cuComplex rowVec[N];
//
// //We use index 1 based ordering because this is what CULA returns
// for(int i=1; i <= ipivLength; i++) {
// //Check to see if the row swaps. This happens when element x of the ipif
// //array is not equal to x. When element x is different, it means that row x
// //and the row specified in element x swap places.
// if(ipiv[i-1] != i) {
// int startIndex = i-1;
// //Copy the current row into the temporary row vector
// for(int j = 0; j < N; j++) {
// rowVec[j].x = x[startIndex+j*M].x;
// rowVec[j].y = x[startIndex+j*M].y;
// }
//
// //Copy the specified row into the current row
// int specRowStart = ipiv[i-1]-1;
// for(int j=0; j < N; j++) {
// x[startIndex+j*M].x = x[specRowStart+j*M].x;
// x[startIndex+j*M].y = x[specRowStart+j*M].y;
// }
//
// //Copy the temp row into the specified row
// for(int j=0; j < N; j++) {
// x[specRowStart+j*M].x = rowVec[j].x;
// x[specRowStart+j*M].y = rowVec[j].y;
// }
// }
// }
//
//}
//
//void LinearSysSolver::cublasSolveLinearSystem(cuComplex* A, int M, int N, cuComplex* B, int M_B, int N_B) {
// cuComplex* xInv = new cuComplex[M*N_B];
//
// //Now put L, U, and the I matrix on the GPU
// cublasStatus_t stat;
// cublasHandle_t handle;
//
// cuComplex* devA;
// cuComplex* devB;
// cudaMalloc(&devA, M*N*sizeof(cuComplex));
// cudaMalloc(&devB, M_B*N_B*sizeof(cuComplex));
//
// stat = cublasCreate(&handle);
// if(stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Error in solver" << endl;
// }
// stat = cublasSetMatrix(M, N, sizeof(cuComplex), A, M, devA, M);
// if(stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Error in solver" << endl;
// }
// stat = cublasSetMatrix(M_B, N_B, sizeof(cuComplex), B, M_B, devB, M_B);
// if(stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Error in solver" << endl;
// }
//
// //Set up Alpha
// cuComplex alpha;
// alpha.x = 1;
// alpha.y = 0;
//
// //First solve L*y = P*b
// stat = cublasCtrsm(handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_UNIT, M, N, &alpha, devA, M, devB, M_B);
// if(stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Error solving for y" << endl;
// }
//
// //Then solve U*x = y
// stat = cublasCtrsm(handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT, M, N, &alpha, devA, M, devB, M_B);
// if(stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Error solving for x" << endl;
// }
//
// //Get results, and store them in matrix B
// cudaMemcpy(B, devB, M*N_B*sizeof(cuComplex), cudaMemcpyDeviceToHost);
//
// //Free resources
// cublasDestroy(handle);
// cudaFree(devA);
// cudaFree(devB);
//}
//
///**
// * Multiplies two matrices together. Result is stored in B on exit.
// */
//cuComplex* LinearSysSolver::multiplyMatrices(cuComplex* A, int M_A, int N_A, cuComplex* B, int M_B, int N_B) {
// cudaError_t cudaStat;
// cublasStatus_t stat;
// cublasHandle_t handle;
//
// cuComplex* devA;
// cuComplex* devB;
// cuComplex* devC;
// cuComplex* alpha = new cuComplex;
// cuComplex* beta = new cuComplex;
// cuComplex* hostC = new cuComplex[M_A*N_B];
// alpha->x = 1;
// alpha->y = 0;
// beta->x = 0;
// beta->y = 0;
//
// cudaStat = cudaMalloc(&devA, M_A*N_A*sizeof(cuComplex));
// cudaStat = cudaMalloc(&devB, M_B*N_B*sizeof(cuComplex));
// cudaStat = cudaMalloc(&devC, M_A*N_B*sizeof(cuComplex));
// if(cudaStat != cudaSuccess) {
// cout << "Horrible failure!" << endl;
// }
//
// stat = cublasCreate(&handle);
//
// stat = cublasSetMatrix(M_A, N_A, sizeof(cuComplex), A, M_A, devA, M_A);
// if (stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Data download A failed" << endl;
// }
// stat = cublasSetMatrix(M_B, N_B, sizeof(cuComplex), B, M_B, devB, M_B);
// if (stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Data download B failed" << endl;
// }
//
// //Perform the multiply.
// stat = cublasCgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M_A, N_B, N_A, alpha, devA, M_A, devB, M_B, beta, devC, M_A);
//
// stat = cublasGetMatrix(M_A, N_B, sizeof(cuComplex), devC, M_A, hostC, M_A);
// if (stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Failed to get devC to hostC" << endl;
// cout << stat << endl;
// }
//
// cudaFree(devA);
// cudaFree(devB);
// cudaFree(devC);
// cublasDestroy(handle);
//
// delete alpha;
// delete beta;
// return hostC;
//
//}
//
///**
// * Prints out an array that is stored in column-major order in memory.
// */
//void LinearSysSolver::columnMajorPrintArray(cuComplex* x, int M, int N) {
// int realIndex;
// cout << "------------------------------------------------------" << endl;
// cout << " Printing Column Order Matrix " << endl;
// cout << "------------------------------------------------------" << endl;
// for(int i=0; i < M; i++) {
// cout << "Row: " << (i+1) << " ";
// for(int j=0; j < N; j++) {
// realIndex = (M*j)+i;
// cout << x[realIndex].x;
// if(x[realIndex].y >= 0) {
// cout << "+";
// }
// cout << x[realIndex].y << "i ";
// }
// cout << endl;
// }
//}
|
6
|
// Each thread calculates fitness for one individual
// Result: vector of fitness
extern "C"
__global__ void fitness_kernel(int populationCnt, int *population,
int pointsCnt, float *pointsX, float *pointsY, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < populationCnt)
{
int shift = 5*i;
float fitness = 0.0f;
for (int p = 0; p < pointsCnt; p++)
{
float fApprox = population[shift + 4];
for (int k = 3; k >= 0; k--)
{
fApprox = fApprox * (*pointsX) + population[shift + k];
}
fApprox /= 10.0f;
++pointsX;
fitness += pow(fApprox - *(pointsY++), 2);
}
result[i] = fitness / pointsCnt;
}
}
|
7
|
#include "cuda_runtime.h"
#include <cstdio>
#include "time.h"
constexpr int segment_size = 1024;
constexpr int threads = 512;
__device__ char *pool;
void __global__ alloc(int **pointers) {
auto index = blockIdx.x * blockDim.x + threadIdx.x;
// pointers[index] = (int *)malloc(segment_size);
pointers[index] = (int *)atomicAdd((unsigned long long *)&pool, segment_size);
}
void __global__ fill(int **pointers) {
auto index = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = 0; i < segment_size / sizeof(int); i++) {
pointers[index][i] = i;
}
}
void __global__ free(int **pointers) {
auto index = blockIdx.x * blockDim.x + threadIdx.x;
// free(pointers[index]);
}
int main() {
int **pointers;
cudaMalloc(&pointers, threads * sizeof(int *));
int bd = 32;
for (int i = 0; i < 10; i++) {
char *pool_;
cudaMallocManaged(&pool_, segment_size * threads);
cudaMemcpyToSymbol(pool, &pool_, sizeof(void *));
alloc<<<threads / bd, bd>>>(pointers);
fill<<<threads / bd, bd>>>(pointers);
free<<<threads / bd, bd>>>(pointers);
}
cudaDeviceSynchronize();
}
|
8
|
#include <algorithm>
#include <iostream>
#include <vector>
std::vector<double> add(std::vector<double> inarr1, std::vector<double> inarr2);
void test_integration()
{
constexpr size_t arr_size = 2 << 24;
std::cout << "Initializing test arrays...\n";
std::vector<double> arr1(arr_size);
std::vector<double> arr2(arr_size);
for (size_t i = 0; i < arr_size; i++)
{
arr1[i] = static_cast<double>(i);
arr2[i] = static_cast<double>(arr_size - i);
}
std::cout << "Calling the kernel wrapper...\n";
auto result = add(std::move(arr1), std::move(arr2));
std::cout << "Verifying results...\n";
if (std::all_of(result.begin(), result.end(),
[arr_size](double x) { return x == arr_size; }))
{
std::cout << "All results were valid.\n";
}
else
{
std::cout << "At least one result is invalid.\n";
}
}
int main()
{
std::cout << "Test CUDA integration\n";
test_integration();
std::cout << "Finished testing\n";
return 0;
}
|
9
|
#include "Output_Layer_GPU_Kernels.cuh"
__constant__ float anchors_416[10] = { 1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52 };
__device__ float Sigmoid(float x)
{
float expValue = exp((double)-x);
float result = 1 / (1 + expValue);
return result;
}
__global__ void XY_BoundingBox_Coordinates_Transform_Kernel(float* input, int inputHeight, int inputWidth)
{
int threadIndex = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
int tensorXYSize = inputHeight * inputWidth;
int tensorSize = boundingBoxesPerGridCell * tensorXYSize;
if (threadIndex < tensorSize)
{
int threadDepthIndex = threadIndex % boundingBoxesPerGridCell;
//int threadDepthIndexY = (threadIndex % XYCoordinatesCount) + 1;
int threadXYIndex = threadIndex % tensorXYSize;
int cy = threadXYIndex / inputWidth;
int cx = threadXYIndex % inputWidth;
//tensor[threadDepthIndex * tensorXYSize + threadXYIndex] = threadDepthIndex;
input[threadDepthIndex * 4 * tensorXYSize + threadXYIndex] = (cx + Sigmoid(input[threadDepthIndex * 4 * tensorXYSize + threadXYIndex])) * downsampleFactor;
input[(threadDepthIndex * 4 + 1) * tensorXYSize + threadXYIndex] = (cy + Sigmoid(input[(threadDepthIndex * 4 + 1) * tensorXYSize + threadXYIndex])) * downsampleFactor;
//input[threadDepthIndex * 4 * tensorXYSize + threadXYIndex] = 1;
//input[(threadDepthIndex * 4 + 1) * tensorXYSize + threadXYIndex] = 1;
}
}
__global__ void WH_BoundingBox_Transform_Kernel(float* input, int inputHeight, int inputWidth)
{
int threadIndex = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
int tensorXYSize = inputHeight * inputWidth;
int tensorSize = boundingBoxesPerGridCell * tensorXYSize;
if (threadIndex < tensorSize)
{
int threadDepthIndex = threadIndex % boundingBoxesPerGridCell;
//int threadDepthIndexY = (threadIndex % XYCoordinatesCount) + 1;
int threadXYIndex = threadIndex % tensorXYSize;
//tensor[threadDepthIndex * tensorXYSize + threadXYIndex] = threadDepthIndex;
input[(threadDepthIndex * 4 + 2) * tensorXYSize + threadXYIndex] = exp(input[(threadDepthIndex * 4 + 2) * tensorXYSize + threadXYIndex]) *
anchors_416[2 * threadDepthIndex] * downsampleFactor;
input[(threadDepthIndex * 4 + 3) * tensorXYSize + threadXYIndex] = exp(input[(threadDepthIndex * 4 + 3) * tensorXYSize + threadXYIndex]) *
anchors_416[2 * threadDepthIndex + 1] * downsampleFactor;
//input[(threadDepthIndex * 4 + 2) * tensorXYSize + threadXYIndex] = anchors_416[2 * threadDepthIndex] = 1;
//input[(threadDepthIndex * 4 + 3) * tensorXYSize + threadXYIndex] = anchors_416[2 * threadDepthIndex + 1] = 1;
input[(20 + threadDepthIndex) * tensorXYSize + threadXYIndex] = Sigmoid(input[(20 + threadDepthIndex) * tensorXYSize + threadXYIndex]);
//input[(20 + threadDepthIndex) * tensorXYSize + threadXYIndex] = 2;
}
}
__global__ void Softmax_Kernel(float* input, int classesCount, int inputHeight, int inputWidth)
{
int threadIndex = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
int tensorXYSize = inputHeight * inputWidth;
int tensorSize = boundingBoxesPerGridCell * tensorXYSize;
if (threadIndex < tensorSize)
{
int threadDepthIndex = threadIndex % boundingBoxesPerGridCell;
int threadXYIndex = threadIndex % tensorXYSize;
float maxClassProbability = FLOAT_MIN;
for (size_t i = 0; i < classesCount; i++)
{
float classProbability = input[(25 + threadDepthIndex * classesCount + i) * tensorXYSize + threadXYIndex];
if (classProbability > maxClassProbability)
{
maxClassProbability = classProbability;
}
}
float classProbabilitiesSum = 0;
for (size_t i = 0; i < classesCount; i++)
{
float exponent = exp(input[(25 + threadDepthIndex * classesCount + i) * tensorXYSize + threadXYIndex] - maxClassProbability);
classProbabilitiesSum += exponent;
input[(25 + threadDepthIndex * classesCount + i) * tensorXYSize + threadXYIndex] = exponent;
}
for (size_t i = 0; i < classesCount; i++)
{
input[(25 + threadDepthIndex * classesCount + i) * tensorXYSize + threadXYIndex] /= classProbabilitiesSum;
//input[(25 + threadDepthIndex * classesCount + i) * tensorXYSize + threadXYIndex] = i;
//input[(25 + threadDepthIndex * classesCount + i) * tensorXYSize + threadXYIndex] = 3;
}
}
}
void WH_BoundingBox_Transform(float* input, int inputHeight, int inputWidth)
{
int tensorSize = boundingBoxesPerGridCell * inputHeight * inputWidth;
int gridXDim = ceil(tensorSize / 512.0);
WH_BoundingBox_Transform_Kernel << <gridXDim, 512 >> > (input, inputHeight, inputWidth);
}
void XY_BoundingBox_Coordinates_Transform(float* input, int inputHeight, int inputWidth)
{
int tensorSize = boundingBoxesPerGridCell * inputHeight * inputWidth;
int gridXDim = ceil(tensorSize / 512.0);
XY_BoundingBox_Coordinates_Transform_Kernel << <gridXDim, 512 >> > (input, inputHeight, inputWidth);
}
void Softmax_GPU(float* input, int classesCount, int inputHeight, int inputWidth)
{
int tensorSize = boundingBoxesPerGridCell * inputHeight * inputWidth;
int gridXDim = ceil(tensorSize / 512.0);
Softmax_Kernel << <gridXDim, 512 >> > (input, classesCount, inputHeight, inputWidth);
}
|
10
|
#include <stdio.h>
#include <cuda_runtime.h>
#include <assert.h>
int main(int argc, char **argv){
float *a_h, *b_h; // Host data
float *a_d, *b_d; // Device data
int N = 14, nBytes, i;
printf("Start allocating\n");
nBytes = N * sizeof(float);
printf("Allocating in Host\n");
a_h = (float*) malloc(nBytes);
b_h = (float*) malloc(nBytes);
printf("Allocating in Device\n");
cudaMalloc((void**)&a_d, nBytes);
cudaMalloc((void**)&b_d, nBytes);
printf("End allocating\n");
for(i=0; i<N; i++)
a_h[i] = 100.0 + i;
printf("Start memcpy\n");
cudaMemcpy(a_d, a_h, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, a_d, nBytes, cudaMemcpyDeviceToDevice);
cudaMemcpy(b_h, b_d, nBytes, cudaMemcpyDeviceToHost);
printf("End memcpy\n");
for(i=0; i<N; i++)
assert(a_h[i] == b_h[i]);
free(a_h);
free(b_h);
cudaFree(a_d);
cudaFree(b_d);
return 0;
}
|
11
|
#include <cuda.h>
#define KERNEL_SIZE 3
#define BLOCK_SIZE 512
typedef signed int pixel_channel;
typedef unsigned long resolution;
__constant__ pixel_channel kernel_cuda[KERNEL_SIZE * KERNEL_SIZE];
pixel_channel kernel_host[KERNEL_SIZE * KERNEL_SIZE] = { -1, -1, -1,
-1, 9, -1,
-1, -1, -1 };
__global__ void Pixel_Shared_Convolution(pixel_channel *channel_cuda, pixel_channel *rezult_cuda, resolution width, resolution lineQuantity)
{
__shared__ pixel_channel sharedMemory [3][BLOCK_SIZE + 2];
for(long line = 1; line < lineQuantity; line++)
{
long temp = blockIdx.x * BLOCK_SIZE + threadIdx.x;
sharedMemory [0][threadIdx.x + 1] = channel_cuda[temp + width * (line - 1)];
sharedMemory [1][threadIdx.x + 1] = channel_cuda[temp + width * line];
sharedMemory [2][threadIdx.x + 1] = channel_cuda[temp + width * (line + 1)];
if(threadIdx.x == 0)
{
if(blockIdx.x != 0)
temp--;
sharedMemory [0][0] = channel_cuda[temp + width * (line-1)];
sharedMemory [1][0] = channel_cuda[temp + width * line];
sharedMemory [2][0] = channel_cuda[temp + width * (line+1)];
}
if(threadIdx.x == (BLOCK_SIZE - 1))
{
temp++;
sharedMemory [0][BLOCK_SIZE + 1] = channel_cuda[temp + width * (line - 1)];
sharedMemory [1][BLOCK_SIZE + 1] = channel_cuda[temp + width * line];
sharedMemory [2][BLOCK_SIZE + 1] = channel_cuda[temp + width * (line + 1)];
}
__syncthreads();
long Sum = 0;
for (int i = 0; i < KERNEL_SIZE; i++)
for (int j = 0; j < KERNEL_SIZE; j++)
Sum += sharedMemory[j][threadIdx.x + i] * kernel_cuda[i * 3 + j];
if (Sum < 0)
Sum = 0;
if (Sum > 255)
Sum = 255;
__syncthreads();
if((blockIdx.x * BLOCK_SIZE + threadIdx.x) > width)
continue;
rezult_cuda[blockIdx.x * BLOCK_SIZE + threadIdx.x + width * line] = Sum;
}
__syncthreads();
return;
}
extern "C" __host__ pixel_channel** asyncConvolution(pixel_channel **image, resolution width, resolution height)
{
pixel_channel **channel_cuda;
channel_cuda = (pixel_channel**)malloc(3*sizeof(pixel_channel*));
pixel_channel **rezult_cuda;
rezult_cuda = (pixel_channel**)malloc(3*sizeof(pixel_channel*));
resolution size = width * height;
cudaHostRegister(image[0], (size + BLOCK_SIZE) * sizeof(pixel_channel), cudaHostRegisterMapped);
cudaHostRegister(image[1], (size + BLOCK_SIZE) * sizeof(pixel_channel), cudaHostRegisterMapped);
cudaHostRegister(image[2], (size + BLOCK_SIZE) * sizeof(pixel_channel), cudaHostRegisterMapped);
cudaMalloc((void **)& rezult_cuda[0], (size + BLOCK_SIZE) * sizeof(pixel_channel));
cudaMalloc((void **)& rezult_cuda[1], (size + BLOCK_SIZE) * sizeof(pixel_channel));
cudaMalloc((void **)& rezult_cuda[2], (size + BLOCK_SIZE) * sizeof(pixel_channel));
cudaMalloc((void **)& channel_cuda[0], (size + BLOCK_SIZE) * sizeof(pixel_channel));;
cudaMalloc((void **)& channel_cuda[1], (size + BLOCK_SIZE) * sizeof(pixel_channel));
cudaMalloc((void **)& channel_cuda[2], (size + BLOCK_SIZE) * sizeof(pixel_channel));
cudaMemcpyToSymbol(kernel_cuda, kernel_host, 9 * sizeof(pixel_channel), 0, cudaMemcpyHostToDevice);
resolution block_count = 0;
if(((width - 2)%BLOCK_SIZE) == 0)
block_count = (width - 2)/BLOCK_SIZE;
else
block_count = (width - 2)/BLOCK_SIZE + 1;
dim3 gridSize = dim3(block_count, 1, 1);
dim3 blockSize = dim3(BLOCK_SIZE, 1, 1);
cudaStream_t stream[3];
for(int i = 0; i < 3; i++)
{
cudaStreamCreate(&stream[i]);
cudaMemcpyAsync(channel_cuda[i], image[i], size*sizeof(pixel_channel), cudaMemcpyHostToDevice, stream[i]);
Pixel_Shared_Convolution<<<gridSize, blockSize, 0, stream[i]>>>(channel_cuda[i], rezult_cuda[i], width, height);
cudaMemcpyAsync(image[i], rezult_cuda[i], size*sizeof(pixel_channel), cudaMemcpyDeviceToHost,stream[i]);
cudaStreamDestroy(stream[i]);
}
for(int i=0;i<3;i++)
{
cudaFree(rezult_cuda[i]);
cudaFree(channel_cuda[i]);
}
cudaDeviceReset();
return image;
}
|
12
|
#include <stdio.h>
__global__ void add(int a, int b, int *c) {
*c = a + b;
}
int main( void ) {
int c;
int *dev_c;
//Device Memory allocations
cudaError_t err = cudaMalloc((void**)&dev_c, sizeof(&dev_c));
if(err != cudaSuccess) {
printf("The error is %s\n", cudaGetErrorString(err));
}
add<<<1,1>>>(2, 7, dev_c);
if(cudaPeekAtLastError() != cudaSuccess) {
printf("The error is %s\n", cudaGetErrorString(cudaGetLastError()));
}
cudaError_t err2 = cudaMemcpy( &c, dev_c, sizeof(c), cudaMemcpyDeviceToHost);
if(err2 != cudaSuccess) {
printf("The error is %s\n", cudaGetErrorString(err2));
}
printf("2 + 7 = %d\n", c);
cudaFree(dev_c);
return 0;
}
|
13
|
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <memory>
/*CUDAлȡGPU豸*/
int main(void) {
int device_count = 0;
cudaGetDeviceCount(&device_count);
//ú֧CUDAGPU豸ĸ
if (device_count ==0)
{
printf("There are no available device(s) that support CUDA\n");
}
else
{
printf("Detected %d CUDA Capable device(s)\n", device_count);
}
//ͨ豸Ϣ
/*
cudaDevicePropṹṩ˿ʶ豸Լȷʹõİ汾Ϣԡṩnameԣַ
ʽ豸ơͨѯcudaDriverGetVersioncudaRuntimeGetVersionԻ豸ʹõCUDA Driver
ʱİ汾ж豸ϣʹеľǸͨmultiProcessorCount
жϡԷ豸ϵദͨʹclockRateԻȡGPUʱʣKHzʱ
ʡ
*/
int device;
cudaDeviceProp device_Property;
cudaGetDevice(&device);
cudaGetDeviceProperties(&device_Property, device);
printf("\nDevice %d:\"%s\"\n", device, device_Property.name);
int driver_Version;
int runtime_Version;
cudaDriverGetVersion(&driver_Version);
cudaRuntimeGetVersion(&runtime_Version);
printf("CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driver_Version / 1000, (driver_Version % 100) / 10, runtime_Version / 1000, (runtime_Version % 100) / 10);
printf("Total amount of global memory:%.0f Mbytes (%1lu bytes)\n", (float)device_Property.totalGlobalMem / 1048576.0f, (unsigned long long)device_Property.totalGlobalMem);
printf("(%2d) Multiprocessors", device_Property.multiProcessorCount);
printf("GPU Max Clock rate:%.0f MHz (%0.2f GHz)\n", device_Property.clockRate * 1e-3f, device_Property.clockRate * 1e-6f);
/*
̶߳ʱάģdim3͡ˣ֪ÿάпԲ̺߳Ϳ顣ÿദ
߳ÿ߳ҲơֿͨmaxThreadsPerMultiProcessormaxThreadsPerBlockҵ
ÿ߳ÿпܱܵ߳
ͨmaxThreadsDimȷÿάϵ߳ͬÿάÿͨ
maxGridSizeʶǶһֵ飬ֱʾxyzάеֵ
*/
printf("Maximum number of threads per multiprocessor:%d\n", device_Property.maxThreadsPerMultiProcessor);
printf("Maximum number of threads per block:%d\n", device_Property.maxThreadsPerBlock);
printf("Max dimension size of a thread block (x,y,z):(%d,%d,%d)\n", device_Property.maxThreadsDim[0],
device_Property.maxThreadsDim[1],
device_Property.maxThreadsDim[2]);
printf("Max dimension size of a grid size (x,y,z):(%d,%d,%d)\n", device_Property.maxGridSize[0],
device_Property.maxGridSize[1],
device_Property.maxGridSize[2]);
}
|
14
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <time.h>
#define AND 0
#define OR 1
#define NAND 2
#define NOR 3
#define XOR 4
#define XNOR 5
__global__ void computeLogicGates(char* d_input, char* d_output, int size) {
// calculate the index of the thread
int index = threadIdx.x + blockIdx.x * blockDim.x;
int input_index = index * 3;
// if the index is inside the range of the array
if (input_index < size) {
int output;
switch (d_input[input_index+2] - '0') {
case AND:
if (d_input[input_index] == '1' && d_input[input_index+1] == '1') output = 1;
else output = 0;
break;
case OR:
if (d_input[input_index] == '0' && d_input[input_index+1] == '0') output = 0;
else output = 1;
break;
case NAND:
if (d_input[input_index] == '1' && d_input[input_index+1] == '1') output = 0;
else output = 1;
break;
case NOR:
if (d_input[input_index] == '0' && d_input[input_index+1] == '0') output = 1;
else output = 0;
break;
case XOR:
if (d_input[input_index] == d_input[input_index+1]) output = 0;
else output = 1;
break;
case XNOR:
if (d_input[input_index] == d_input[input_index+1]) output = 1;
else output = 0;
break;
}
d_output[index] = output + '0';
}
}
int main(int argc, char* argv[]) {
// check if necessary arguments are provided
if (argc == 1) {
return printf("No arguments are provided! Please provide the input file path, input file length and the output file path!");
}
else if (argc == 2) {
return printf("Input file length and output file path are not provided!");
}
else if (argc == 3) {
return printf("Output file path is not provided!");
}
char* input_file = argv[1];
int input_size = atoi(argv[2]);
char* output_file = argv[3];
// read the input file
FILE* input_fptr;
input_fptr = fopen(input_file, "r");
if (!input_fptr) return printf("Error opening the input file!");
// read the file line by line and populate input_data array
char line[10];
// allocate CUDA variables
char* d_input;
char* d_output;
int input_array_size = input_size * 3 * sizeof(char);
int output_array_size = input_size * sizeof(char);
cudaMallocManaged(&d_input, input_array_size);
cudaMallocManaged(&d_output, output_array_size);
for (int i = 0; i < input_size; i++) {
fgets(line, 9, input_fptr);
d_input[i*3] = line[0];
d_input[i*3+1] = line[2];
d_input[i*3+2] = line[4];
}
// close file pointer
fclose(input_fptr);
clock_t start = clock();
// call device kernel
computeLogicGates<<<input_size, 1>>>(d_input, d_output, input_array_size);
// synchronize threads
cudaDeviceSynchronize();
clock_t end = clock();
// write the results into the output file
FILE* output_fptr;
output_fptr = fopen(output_file, "w");
if(!output_fptr) return printf("Error opening output file!");
for (int i = 0; i < input_size; i++) {
char data[3];
sprintf(data, "%c\n", d_output[i]);
fputs(data, output_fptr);
}
// close file pointer
fclose(output_fptr);
// free up device memory
cudaFree(d_input);
cudaFree(d_output);
// calculate execution time
double runtime = (double) (end-start) / CLOCKS_PER_SEC;
printf("Execution time: %f ms\n", runtime * 1000);
return 0;
}
|
15
|
#include "Matrix.cuh"
#include <cstring>
#include <fstream>
#include <ctime>
#include <device_functions.h>
#ifdef __CUDACC__
#define cuda_SYNCTHREADS() __syncthreads()
#else
#define cuda_SYNCTHREADS()
#endif
#define Zero ZeroCPU
#define PRINT_LOG false
//#define TARGET_RESIDUE ((double)1.0e-9);
const double TARGET_RESIDUE = 1.0e-6;
Matrix::Matrix(int cols, int rows) : cols(cols), rows(rows)
{
if (PRINT_LOG) printf("Matrix constructor\n");
cudaMallocManaged(&mat, cols * rows * sizeof(double));
}
unsigned Matrix::getRows() const
{
return rows;
}
unsigned Matrix::getCols() const
{
return cols;
}
Matrix::Matrix(int cols, int rows, double* mat) : cols(cols), rows(rows), mat(mat)
{
if (PRINT_LOG) printf("Matrix constructor\n");
//cudaMallocManaged(&mat, cols * rows * sizeof(double));
}
Matrix::Matrix(const Matrix& a)
{
if (PRINT_LOG) printf("Matrix copy constructor\n");
rows = a.rows;
cols = a.cols;
cudaMallocManaged(&mat, cols * rows * sizeof(double));
std::memcpy(mat, a.mat, cols * rows * sizeof(double));
}
void Matrix::operator=(const Matrix& a)
{
if (PRINT_LOG) printf("Matrix assignment operator\n");
rows = a.rows;
cols = a.cols;
cudaFree(mat);
cudaMallocManaged(&mat, cols * rows * sizeof(double));
std::memcpy(mat, a.mat, cols * rows * sizeof(double));
}
Matrix Matrix::Stub()
{
return Matrix(1, 1);
}
Matrix Matrix::ZeroCPU(int cols, int rows)
{
double* mat;
cudaMallocManaged(&mat, cols * rows * sizeof(double));
cudaDeviceSynchronize();
for (long i = 0; i < cols * rows; i++)
{
mat[i] = 0.0f;
}
return Matrix(cols, rows, mat);
}
Matrix Matrix::OneCPU(int cols, int rows)
{
double* mat;
cudaMallocManaged(&mat, cols * rows * sizeof(double));
for (long i = 0; i < cols * rows; i++)
{
mat[i] = 1.0f;
}
return Matrix(cols, rows, mat);
}
__global__ void ZeroGPUKernel(const int n, double* A)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
{
A[index] = 0.0f;
}
}
Matrix Matrix::ZeroGPU(int cols, int rows)
{
double* mat;
cudaMallocManaged(&mat, cols * rows * sizeof(double));
int blockCount = (cols * rows + BLOCK_SIZE - 1) / BLOCK_SIZE;
ZeroGPUKernel <<<blockCount, BLOCK_SIZE >>>(cols * rows, mat);
cudaDeviceSynchronize();
return Matrix(cols, rows, mat);
}
Matrix Matrix::IdentityCPU(int cols, int rows)
{
if (cols != rows) throw "Identity matrix must be square";
auto ret = Zero(cols, rows);
for (int i = 0; i < cols; ++i)
{
ret.mat[i * cols + i] = 1.0f;
}
return ret;
}
Matrix Matrix::FromFile(std::string path)
{
std::fstream reader;
int cols, rows;
reader.open(path, std::ios::in);
reader.seekp(0);
reader >> cols;
reader >> rows;
double* mat;
cudaMallocManaged(&mat, cols * rows * sizeof(double));
for (int i = 0; i < cols * rows; ++i)
{
reader >> mat[i];
}
reader.close();
return Matrix(cols, rows, mat);
}
Matrix Matrix::Jacobi(const Matrix& A, const Matrix& b)
{
auto LU = A;
auto invD = (LU.separateDiagonal());
auto x = ZeroCPU(1, A.getRows());
invD.inverseDiagonalInPlaceCPU();
auto M = -invD * LU;
auto temp = invD * b;
double res = 1;
int counter = 0;
do
{
x = (M * x + temp);
//if (counter++ == 9)
//{
// counter = 0;
res = (A * x - b).vectorEuclideanNorm();
// printf("res: %f\n", res);
//}
counter++;
}
while (res > TARGET_RESIDUE);
printf("res: %d \n", counter);
return x;
}
Matrix Matrix::JacobiOptimal(const Matrix& A, const Matrix& b)
{
// 25% czasu wykonania (80000us) prawdopodobnie kopiowanie pamieci z device na host i z powrotem
//auto LU = A;
//->
auto LU = Matrix(A.cols, A.rows);
copyGPU(LU, A);
//32x wzrost wydajnosci
//auto invD = (LU.separateDiagonal());
//invD.inverseDiagonalInPlaceCPU();
auto invD = Matrix(A.cols, A.rows);
separateDiagonalAndInverseGPU(invD, LU);
auto x = ZeroGPU(1, A.getRows());
//auto temp1 = invD * b;
auto temp1 = Matrix(1, A.rows);
refMul(temp1, invD, b);
//auto M = -invD * LU;
//auto M = Matrix(A.cols, A.rows);
auto M = Matrix(A.cols, A.rows);
additiveInverseInPlaceGPU(invD);
refMulDiag(M, invD, LU);
double res = 100;
int counter = 9;
auto memmul = Matrix(1, A.rows);
auto _Amulx = Matrix(1, A.rows);
auto resVector = Matrix(1, A.rows);
do
{
refMul(memmul, M, x);
refAdd(x, memmul, temp1);
//x = (M * x + temp);
if (counter++ == 9)
{
counter = 0;
refMul(_Amulx, A, x);
refSub(resVector, _Amulx, b);
res = resVector.vectorEuclideanNorm();
//printf("res: %f\n", res);
}
}
while (res > TARGET_RESIDUE);
return x;
}
Matrix Matrix::ForwardSubstitution(const Matrix& A, const Matrix& b)
{
if (!(A.cols == A.rows && A.rows == b.rows)) throw "Incorrect dimensions";
auto x = Matrix(1, A.getRows());
for (int i = 0; i < x.rows; ++i)
{
double sum = 0;
for (int j = 0; j < i; ++j)
{
sum += A.mat[i * A.cols + j] * x.mat[j];
}
x.mat[i] = (b.mat[i] - sum) / A.mat[i * A.cols + i];
}
return x;
}
Matrix Matrix::BackwardSubstitution(const Matrix& A, const Matrix& b)
{
if (!(A.cols == A.rows && A.rows == b.rows)) throw "Incorrect dimensions";
auto x = Matrix(1, A.getRows());
x.mat[0] = b.mat[0] / A.mat[0];
for (int i = x.rows - 1; i >= 0; --i)
{
double sum = 0;
for (int j = i + 1; j < A.cols; ++j)
{
sum += A.mat[i * A.cols + j] * x.mat[j];
}
x.mat[i] = (b.mat[i] - sum) / A.mat[i * A.cols + i];
}
return x;
}
Matrix Matrix::GaussSeidel(const Matrix& A, const Matrix& b)
{
auto DL = -(A.lowerCPU() + A.diagonalCPU());
auto U = A.upperCPU();
auto x = ZeroCPU(1, A.getRows());
auto temp = Matrix::ForwardSubstitution(DL, b);
double res = 1;
int counter = 0;
do
{
//x = -(Matrix::ForwardSubstitution(DL, U * x)) + temp;
x = (Matrix::ForwardSubstitution(DL, U * x)) + temp;
//if (counter++ == 9)
//{
counter++;
res = (A * (-x) - b).vectorEuclideanNorm();
//}
//printf("res: %f \n", res);
//(x).print();
}
while (res > TARGET_RESIDUE);
printf("res: %d \n", counter);
return -x;
}
Matrix Matrix::GaussSeidelOptimal(const Matrix& A, const Matrix& b)
{
//auto DL = (A.lowerCPU() + A.diagonalCPU());
//auto U = A.upperCPU();
auto DL = Matrix(A.cols, A.rows);
auto U = Matrix(A.cols, A.rows);
copyGPU(DL, A);
separateUpperGPU(U, DL);
//auto DL = (A.lowerCPU() + A.diagonalCPU());
//auto U = A.upperCPU();
auto x = ZeroCPU(1, A.getRows());
auto temp = Matrix::ForwardSubstitution(DL, b);
auto memmul = Matrix(1, A.rows);
auto memforwardsub = Matrix(1, A.rows);
auto memmulres = Matrix(1, A.rows);
auto resVector = Matrix(1, A.rows);
double res;
int counter = 9;
do
{
//x = -(Matrix::ForwardSubstitution(DL, U * x)) + temp;
refMul(memmul, U, x);
forwardSubstitutionGPU(memforwardsub, DL, memmul);
//memforwardsub = Matrix::ForwardSubstitution(DL, memmul);
//double xd = maxError(memforwardsub, memforwardsub2);
additiveInverseInPlaceGPU(memforwardsub);
refAdd(x, memforwardsub, temp);
//x = memforwardsub + temp;
if (counter++ == 9)
{
counter = 0;
refMul(memmulres, A, x);
refSub(resVector, memmulres, b);
res = resVector.vectorEuclideanNorm();
}
//printf("res: %f \n", res);
//(x).print();
}
while (res > TARGET_RESIDUE);
return x;
}
Matrix Matrix::LUMehtod(const Matrix& A, const Matrix& b)
{
Matrix L = Matrix::Stub();
Matrix U = Matrix::Stub();
Matrix::doolitle(L, U, A);
auto y = Matrix::ForwardSubstitution(L, b);
return Matrix::BackwardSubstitution(U, y);
}
Matrix Matrix::LUMehtodOptimal(const Matrix& A, const Matrix& b)
{
Matrix L = Matrix::Stub();
Matrix U = Matrix::Stub();
Matrix::doolitle(L, U, A);
auto y = Matrix::ForwardSubstitution(L, b);
return Matrix::BackwardSubstitution(U, y);
}
void Matrix::doolitle(Matrix& L, Matrix& U, const Matrix& A)
{
if (A.cols != A.rows) throw "Matrix is not square";
L = OneCPU(A.cols, A.rows).diagonalCPU();
U = ZeroCPU(A.cols, A.rows);
for (int j = 0; j < A.cols; ++j)
{
for (int i = 0; i <= j; ++i)
{
double sum = 0;
for (int k = 0; k < i; ++k)
{
sum += L.mat[i * L.cols + k] * U.mat[k * U.cols + j];
}
U.mat[i * U.cols + j] = A.mat[i * U.cols + j] - sum;
}
for (int i = j + 1; i < A.cols; ++i)
{
double sum = 0;
for (int k = 0; k < j; ++k)
{
sum += L.mat[i * L.cols + k] * U.mat[k * U.cols + j];
}
L.mat[i * U.cols + j] = 1 / U.mat[j * U.cols + j] * (A.mat[i * U.cols + j] - sum);
}
}
}
__global__ void doolitleKernel(const int n, double* A, double* B)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int j = index; j < n; j += stride)
{
A[j] = B[j];
}
}
void Matrix::doolitleGPU(Matrix& L, Matrix& U, const Matrix& A)
{
int blockCount = (A.rows * A.cols + BLOCK_SIZE - 1) / BLOCK_SIZE;
//doolitleKernel <<< blockCount, BLOCK_SIZE >>> (A.rows * A.cols, A.mat);
cudaDeviceSynchronize();
}
void Matrix::createTest(Matrix& A, Matrix& b, Matrix& x, int size)
{
srand(time(NULL));
const int constrange = 100;
const auto r = [](int range)-> double { return (double)(rand() % 20000) / 100 - 100; };
x = Matrix(1, size);
A = Matrix(size, size);
b = Matrix(1, size);
for (int i = 0; i < size; ++i)
{
x.mat[i] = r(100);
}
for (int i = 0; i < size; ++i)
{
double sum = 0;
for (int j = 0; j < size; ++j)
{
if (i != j)
{
A.mat[i * size + j] = r(100);
sum += fabs(A.mat[i * size + j]);
}
double randomized = r(100);
if (randomized > 0)
{
A.mat[i * size + i] = sum + r(10);
}
else
{
A.mat[i * size + i] = -sum + r(10);
}
}
}
for (int i = 0; i < size; ++i)
{
double sum = 0;
for (int j = 0; j < size; ++j)
{
sum += A.mat[i * size + j] * x.mat[j];
}
b.mat[i] = sum;
}
}
void Matrix::createTask(Matrix& A, Matrix& b, const int size)
{
//const int size = 994;
const int a1 = 5 + 7;
const int a2 = -1;
const int a3 = a2;
const int inSin(1 + 1);
A = Matrix::ZeroCPU(size, size);
b = Matrix(1, size);
for (int i = 0; i < size; ++i)
{
A.mat[size * i + i] = a1;
if (size * i + i - 1 >= 0)
A.mat[size * i + i - 1] = a2;
if (size * i + i - 2 >= 0)
A.mat[size * i + i - 2] = a3;
if (size * i + i + 1 < size * size)
A.mat[size * i + i + 1] = a2;
if (size * i + i + 2 < size * size)
A.mat[size * i + i + 2] = a3;
}
for (int i = 0; i < size; ++i)
{
b.mat[i] = sin(i * inSin);
}
}
void Matrix::createTaskC(Matrix& A, Matrix& b)
{
const int size = 994;
const int a1 = 3;
const int a2 = -1;
const int a3 = a2;
const int inSin(1 + 1);
A = Matrix::ZeroCPU(size, size);
b = Matrix(1, size);
for (int i = 0; i < size; ++i)
{
A.mat[size * i + i] = a1;
if (size * i + i - 1 >= 0)
A.mat[size * i + i - 1] = a2;
if (size * i + i - 2 >= 0)
A.mat[size * i + i - 2] = a3;
if (size * i + i + 1 < size * size)
A.mat[size * i + i + 1] = a2;
if (size * i + i + 2 < size * size)
A.mat[size * i + i + 2] = a3;
}
for (int i = 0; i < size; ++i)
{
b.mat[i] = sin(i * inSin);
}
}
double Matrix::maxError(Matrix& x, Matrix& r)
{
if (x.rows * x.cols != r.rows * r.cols) throw "Matrices are not the same size";
double max = 0;
for (int i = 0; i < x.rows * x.cols; ++i)
{
if (fabs(x.mat[i] - r.mat[i]) > max)
max = fabs(x.mat[i] - r.mat[i]);
}
return max;
}
__global__ void copyKernel(const int n, double* A, double* B)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int j = index; j < n; j += stride)
{
A[j] = B[j];
}
}
void Matrix::copyGPU(Matrix& a, const Matrix& b)
{
int blockCount = (a.cols * a.rows + BLOCK_SIZE - 1) / BLOCK_SIZE;
copyKernel <<< blockCount, BLOCK_SIZE >>>(a.cols * a.rows, a.mat, b.mat);
cudaDeviceSynchronize();
}
__global__ void separateDiagonalKernel(const int n, double* d, double* A)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int j = index; j < n; j += stride)
{
d[j * n + j] = 1 / A[j * n + j];
A[j * n + j] = 0;
}
}
void Matrix::separateDiagonalAndInverseGPU(Matrix& d, Matrix& A)
{
int blockCount = (A.cols + BLOCK_SIZE - 1) / BLOCK_SIZE;
separateDiagonalKernel <<< blockCount, BLOCK_SIZE >>>(A.cols, d.mat, A.mat);
cudaDeviceSynchronize();
}
__global__ void separateUpperKernel(const int n, const int cols, double* U, double* A)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int j = index; j < n; j += stride)
{
int row = j / cols;
int col = j % cols;
if (col > row)
{
U[j] = A[j];
A[j] = 0;
}
}
}
void Matrix::separateUpperGPU(Matrix& U, Matrix& A)
{
int blockCount = (A.cols + BLOCK_SIZE - 1) / BLOCK_SIZE;
separateUpperKernel <<< blockCount, BLOCK_SIZE >>>(A.cols * A.rows, A.cols, U.mat, A.mat);
cudaDeviceSynchronize();
}
__global__ void additiveInverseInPlaceKernel(const int n, double* A)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int j = index; j < n; j += stride)
{
A[j] = -A[j];
}
}
void Matrix::additiveInverseInPlaceGPU(Matrix& A)
{
int blockCount = (A.rows * A.cols + BLOCK_SIZE - 1) / BLOCK_SIZE;
additiveInverseInPlaceKernel <<< blockCount, BLOCK_SIZE >>>(A.rows * A.cols, A.mat);
cudaDeviceSynchronize();
}
__global__ void forwardSubstitutionKernel(const int n, double* A, double* b, double* x)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int j = index; j < n; j += stride)
{
double sum = 0;
for (int i = 0; i < n; i++)
{
if (i == j)
{
x[j] = (b[j] - sum) / A[j * n + j];
}
cuda_SYNCTHREADS();
if (i < j)
{
sum += A[j * n + i] * x[i];
}
}
}
}
void Matrix::forwardSubstitutionGPU(Matrix& result, const Matrix& A, const Matrix& b)
{
int blockCount = 1;
int blockSize = pow(2, ceil(log2f(A.cols)));
forwardSubstitutionKernel <<< blockCount, blockSize >>>(A.cols, A.mat, b.mat, result.mat);
cudaDeviceSynchronize();
}
void Matrix::backwardSubstitutionGPU(Matrix& result, const Matrix& A, const Matrix& b)
{
}
void Matrix::toFile(std::string path)
{
std::fstream writer;
writer.open(path, std::ios::out);
writer.seekg(0);
writer << cols << ' ' << rows << '\n';
for (int i = 0; i < rows; ++i)
{
for (int j = 0; j < cols; ++j)
{
writer << mat[i * cols + j] << ' ';
}
writer << "\n";
}
writer.close();
}
Matrix Matrix::separateDiagonal()
{
if (cols != rows) throw "Matrix is not square";
auto ret = Zero(cols, rows);
for (int i = 0; i < cols; ++i)
{
ret.mat[i * cols + i] = mat[i * cols + i];
mat[i * cols + i] = 0.0f;
}
return ret;
}
Matrix Matrix::diagonalCPU() const
{
if (cols != rows) throw "Matrix is not square";
auto ret = Zero(cols, rows);
for (int i = 0; i < cols; ++i)
{
ret.mat[i * cols + i] = mat[i * cols + i];
}
return ret;
}
Matrix Matrix::lowerCPU() const
{
if (cols != rows) throw "Matrix is not square";
auto ret = Zero(cols, rows);
for (int j = 0; j < cols; ++j)
{
for (int i = 0; i < j; ++i)
{
ret.mat[j * cols + i] = mat[j * cols + i];
}
}
return ret;
}
Matrix Matrix::upperCPU() const
{
if (cols != rows) throw "Matrix is not square";
auto ret = Zero(cols, rows);
for (int j = 0; j < cols; ++j)
{
for (int i = j + 1; i < cols; ++i)
{
ret.mat[j * cols + i] = mat[j * cols + i];
}
}
return ret;
}
void Matrix::inverseDiagonalInPlaceCPU()
{
if (cols != rows) throw "Matrix is not square";
for (int i = 0; i < cols; ++i)
{
if (mat[i * cols + i] == 0) throw "0 on diagonal";
mat[i * cols + i] = 1 / mat[i * cols + i];
}
}
void Matrix::transposeVectorInPlace()
{
unsigned int tmp = cols;
cols = rows;
rows = tmp;
}
double Matrix::vectorEuclideanNorm()
{
if (cols != 1 && rows != 1) throw "Matrix is not a vector";
double sum = 0;
for (int i = 0; i < cols * rows; ++i)
{
sum += mat[i] * mat[i];
}
return sqrt(sum);
}
Matrix Matrix::lu()
{
throw "Not implemented";
}
void Matrix::print() const
{
for (int i = 0; i < rows; ++i)
{
for (int j = 0; j < cols; ++j)
{
printf("%f ", mat[i * cols + j]);
}
printf("\n");
}
printf("\n");
}
Matrix::~Matrix()
{
if (PRINT_LOG) printf("Matrix destructor\n");
cudaFree(mat);
//free(mat);
}
__global__ void mulKernel(const int commonDim, const int cols, const int n, double* A, double* B, double* C)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int j = index; j < n; j += stride)
{
int row = j / cols;
int col = j % cols;
C[j] = 0;
for (int i = 0; i < commonDim; i++)
{
C[j] += A[row * commonDim + i] * B[i * cols + col];
}
}
}
void Matrix::refMul(Matrix& result, const Matrix& a, const Matrix& b)
{
int blockCount = (a.rows * b.cols + BLOCK_SIZE - 1) / BLOCK_SIZE;
mulKernel <<< blockCount, BLOCK_SIZE >>>(a.cols, b.cols, b.cols * a.rows, a.mat, b.mat, result.mat);
cudaDeviceSynchronize();
}
__global__ void mulDiagKernel(const int commonDim, const int cols, const int n, double* A, double* B, double* C)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int j = index; j < n; j += stride)
{
int row = j / cols;
int col = j % cols;
C[j] = A[row * commonDim + row] * B[row * commonDim + col];
}
}
void Matrix::refMulDiag(Matrix& result, const Matrix& a, const Matrix& b)
{
int blockCount = (a.rows * b.cols + BLOCK_SIZE - 1) / BLOCK_SIZE;
mulDiagKernel << < blockCount, BLOCK_SIZE >> >(a.cols, b.cols, b.cols * a.rows, a.mat, b.mat, result.mat);
cudaDeviceSynchronize();
}
Matrix operator*(const Matrix& a, const Matrix& b)
{
if (a.cols != b.rows) throw "wrong dimensions for multiplication";
double* mat;
cudaMallocManaged(&mat, b.cols * a.rows * sizeof(double));
int blockCount = (a.rows * b.cols + BLOCK_SIZE - 1) / BLOCK_SIZE;
if (PRINT_LOG) printf("Matrix multiplication on %d blocks x %d threads\n", blockCount, BLOCK_SIZE);
mulKernel <<< blockCount, BLOCK_SIZE >>>(a.cols, b.cols, b.cols * a.rows, a.mat, b.mat, mat);
cudaDeviceSynchronize();
return Matrix(b.cols, a.rows, mat);
}
__global__ void addKernel(const int n, double* A, double* B, double* C)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int j = index; j < n; j += stride)
{
C[j] = A[j] + B[j];
}
}
void Matrix::refAdd(Matrix& result, const Matrix& a, const Matrix& b)
{
int blockCount = (a.cols * a.rows + BLOCK_SIZE - 1) / BLOCK_SIZE;
addKernel <<< blockCount, BLOCK_SIZE >>>(a.cols * a.rows, a.mat, b.mat, result.mat);
cudaDeviceSynchronize();
}
Matrix operator+(const Matrix& a, const Matrix& b)
{
if (a.cols != b.cols || a.rows != b.rows) throw "dimensions must equal for addition";
double* mat;
cudaMallocManaged(&mat, a.cols * a.rows * sizeof(double));
int blockCount = (a.cols * a.rows + BLOCK_SIZE - 1) / BLOCK_SIZE;
if (PRINT_LOG) printf("Matrix addition on %d blocks x %d threads\n", blockCount, BLOCK_SIZE);
addKernel <<< blockCount, BLOCK_SIZE >>>(a.cols * a.rows, a.mat, b.mat, mat);
cudaDeviceSynchronize();
return Matrix(a.cols, a.rows, mat);
}
__global__ void subKernel(const int n, double* A, double* B, double* C)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int j = index; j < n; j += stride)
{
C[j] = A[j] - B[j];
}
}
void Matrix::refSub(Matrix& result, const Matrix& a, const Matrix& b)
{
int blockCount = (a.cols * a.rows + BLOCK_SIZE - 1) / BLOCK_SIZE;
subKernel <<< blockCount, BLOCK_SIZE >> >(a.cols * a.rows, a.mat, b.mat, result.mat);
cudaDeviceSynchronize();
}
Matrix operator-(const Matrix& a, const Matrix& b)
{
if (a.cols != b.cols || a.rows != b.rows) throw "dimensions must equal for addition";
double* mat;
cudaMallocManaged(&mat, a.cols * a.rows * sizeof(double));
int blockCount = (a.cols * a.rows + BLOCK_SIZE - 1) / BLOCK_SIZE;
if (PRINT_LOG) printf("Matrix addition on %d blocks x %d threads\n", blockCount, BLOCK_SIZE);
subKernel <<< blockCount, BLOCK_SIZE >>>(a.cols * a.rows, a.mat, b.mat, mat);
cudaDeviceSynchronize();
return Matrix(a.cols, a.rows, mat);
}
__global__ void additiveInverseKernel(const int n, double* A, double* B)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int j = index; j < n; j += stride)
{
A[j] = -B[j];
}
}
Matrix operator-(const Matrix& a)
{
double* mat;
cudaMallocManaged(&mat, a.cols * a.rows * sizeof(double));
int blockCount = (a.cols * a.rows + BLOCK_SIZE - 1) / BLOCK_SIZE;
additiveInverseKernel <<<blockCount, BLOCK_SIZE >>>(a.cols * a.rows, mat, a.mat);
cudaDeviceSynchronize();
return Matrix(a.cols, a.rows, mat);
}
|
16
|
#include "includes.h"
__global__ void multiply_by_itself_training_util_kernel( const float4 * __restrict input_buf, float4 * __restrict output_buf, int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = input_buf[elem_id];
val.x *= val.x;
val.y *= val.y;
val.z *= val.z;
val.w *= val.w;
output_buf[elem_id] = val;
}
}
|
17
|
#include <algorithm>
#include <iostream>
#include <vector>
typedef unsigned long long data_t;
static inline void check(cudaError_t err, const char* context) {
if (err != cudaSuccess) {
std::cerr << "CUDA error: " << context << ": "
<< cudaGetErrorString(err) << std::endl;
std::exit(EXIT_FAILURE);
}
}
#define CHECK(x) check(x, #x)
template <class T>
void cuda_memcpy(T* target, const T* source, std::size_t num, cudaMemcpyKind direction) {
CHECK(cudaMemcpy(target, source, num * sizeof(T), direction));
}
static inline int divup(int a, int b) {
return (a + b - 1)/b;
}
// get the 0 bit of each number by bit_shift
// example: number : 10001, bit_shit: 1, One: 1,
//
// it means check if the second bit is 1 or not.
__global__ void getMask(data_t *d_in, unsigned int *d_out, const int len, const unsigned int n, data_t bit_shift, unsigned int One) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
data_t bit = 0;
data_t one=1;
data_t shift=one<<bit_shift;
unsigned int start=index*len;
if (start>=n) return;
unsigned int end=start+len;
for(unsigned int i=start;i<end && i<n; i++ ){
bit=d_in[i]&shift;
bit = (bit > 0) ? 1 : 0;
d_out[i] = (One ? bit : 1 - bit);
}
}
__global__ void getIndex(unsigned int *d_index, unsigned int *d_sum, unsigned int* d_mask, const int len, const unsigned int n,
unsigned int total_pre) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int start=index*len;
if (start>=n) return;
unsigned int end=start+len;
for (unsigned int i=start; i<end && i<n; i++){
d_index[i]=d_mask[i]?d_sum[i]:i-d_sum[i]+total_pre;
if(d_index[i]>=n){
printf(" d_sum[i] : %d, total_pre : %d, d_mask[i] : %d \n", d_sum[i], total_pre, d_mask[i]);
}
// if(d_mask[i]==1){
// d_index[i]=total_pre+d_sum[i];
// }
}
}
__global__ void scatter(data_t *d_in, unsigned int *d_index, data_t *d_out, const int len, const unsigned int n) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int start=index*len;
if (start>=n) return;
unsigned int end=start+len;
for(unsigned int i=start;i<end && i<n; i++ ){
d_out[d_index[i]]=d_in[i];
}
}
// idea to do exclusive prefix is similar to my ppc course https://www.youtube.com/watch?v=HVhCtl96gUs
// I will use y,z,s to specify which step I am in.
// in particular, I split the whole array into multiple smaller array. each small array has [len] numbers
// Thread level y: each thread will do addition sequentially. threads are working independently, dealing with [len] numbers.
// Thread level z: each threads in the same block will do sequentially. threads are working independently, dealing with one block.
// Thread level s: each thread will add the result from its previous thread. threads are working independently, dealing with [len] numbers.
// Block level y: this will get prefix sum in block level.
// Block level z: only one block and one thread are used here, do addition sequentially.
// Block level s: each threads will add the result from its previous block.
__global__ void prefixsum(unsigned int* mask, unsigned int* output,const int len, const unsigned int n ){
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
int start=index*len+1;//exclusive
if (start>n) return; //exclusive, could equal to n
int end=start+step;
output[start]=mask[start-1];
for(unsigned int i=start+1;i<end&&i<n;i++){
output[i]+=output[i-1]+mask[i-1];//exclusive, therefore mask[i-1]
}
}
__global__ void serialsum_accrossthread(unsigned int* sum,const int len, const unsigned int n){
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
int offset=2*step;
unsigned int start=step*blockDim.x*index+offset;
unsigned int end=step*blockDim.x*(index+1)+1;
for(unsigned int i=start;i<end && i<n; i+=step){
sum[i]+=sum[i-step];
}
}
__global__ void mergethread(unsigned int* sum,const int len, const unsigned int n){
if (threadIdx.x==0) return;
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
unsigned int start=index*step+1;//exclusive
unsigned int end=start+step-1; // -1 is important, this position has been added in serial sum
unsigned int base=sum[start-1];
for(unsigned int i=start; i<end && i<n; i++){
sum[i]+=base;
}
}
// void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){
// int step=len*block_size;//each block has step number
// int start=2*step;
// for(unsigned int i=start; i<n; i+=step){
// sum[i]+=sum[i-step];
// }
// }
__global__ void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){
//only one block and one thread
int step=len*block_size;//each block has step number
int start=2*step;
for(unsigned int i=start; i<n; i+=step){
sum[i]+=sum[i-step];
}
}
// __global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){
// unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
// if (index==0) return; //the first block is not needed to merge
// int step=len*blockDim.x;
// int start=index*step+1; //exclusive
// int end=start+step-1;// -1 is important, this position has been added in serial sum
// int base=sum[start-1];//last element at last block
// for(int i=start; i<end && i<n; i++){
// sum[i]+=base;
// }
// }
__global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){
if (blockIdx.x==0) return;//the first block is not needed to merge
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
unsigned int base_index=blockIdx.x*step*blockDim.x;
unsigned int base=sum[base_index];
int start=index*step; //only the first thread in block should excluded the first element
int end=start+step;
start=(start==base_index)?start+1:start;
// int base=sum[start-1];//last element at last block
for(int i=start; i<end && i<n; i++){
sum[i]+=base;
}
}
void psort(int n, data_t *data) {
if(n<=0) return;
// FIXME: Implement a more efficient parallel sorting algorithm for the GPU.
const int block_size=256;//64 threads per block;
const int len=2000; // add 1000 prefix sum per thread;
data_t *d_temp;
data_t *d_in=NULL;
CHECK(cudaMalloc((void**)&d_in,n*sizeof(data_t)));
data_t *d_out_long=NULL;
CHECK(cudaMalloc((void**)&d_out_long,n*sizeof(data_t)));
unsigned int *d_out=NULL;
CHECK(cudaMalloc((void**)&d_out,n*sizeof(unsigned int)));
unsigned int *d_sum=NULL;
CHECK(cudaMalloc((void**)&d_sum,n*sizeof(unsigned int)));
unsigned int *d_index=NULL;
CHECK(cudaMalloc((void**)&d_index,n*sizeof(unsigned int)));
// std::vector<unsigned int> inter_sum(n);
// unsigned int inter_sum[n];
cuda_memcpy(d_in,data,n,cudaMemcpyHostToDevice);
data_t bits=sizeof(data_t)*8;
// unsigned int out[n];
// unsigned int sum[n];
unsigned int total_zeros, mask_last;
//one pass here
for(data_t i=0; i<bits; i++){
CHECK(cudaMemset(d_sum,0,n*sizeof(unsigned int)));
getMask<<<divup(n,block_size*len),block_size>>>(d_in, d_out, len, n, i, 0);
CHECK(cudaGetLastError());
// CHECK(cudaMemcpy(out, d_out, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// std::cout<<"out "<<std::endl;
// for(int j=0;j<n;j++){
// std::cout<<out[j]<<" ";
// }
// std::cout<<std::endl;
//inclusive prefix sum
prefixsum<<<divup(n,block_size*len),block_size>>>(d_out,d_sum,len,n);
CHECK(cudaGetLastError());
serialsum_accrossthread<<<divup(n,block_size*len*block_size),block_size>>>(d_sum,len,n);
CHECK(cudaGetLastError());
mergethread<<<divup(n,block_size*len),block_size>>>(d_sum,len,n);
CHECK(cudaGetLastError());
serialsum_accrossblock<<<1,1>>>(d_sum, len, n, block_size);
CHECK(cudaGetLastError());
// CHECK(cudaMemcpy(inter_sum.data(), d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// serialsum_accrossblock(inter_sum.data(), len, n, block_size);
// CHECK(cudaMemcpy(d_sum, inter_sum.data(),n * sizeof(unsigned int), cudaMemcpyHostToDevice));
// CHECK(cudaGetLastError());
mergeblock<<<divup(n,block_size*len),block_size>>>(d_sum,len,n);
CHECK(cudaGetLastError());
// CHECK(cudaMemcpy(sum, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// std::cout<<"sum "<<std::endl;
// for(int j=0;j<n;j++){
// std::cout<<sum[j]<<" ";
// }
// std::cout<<std::endl;
CHECK(cudaMemcpy(&total_zeros, d_sum+n-1, sizeof(unsigned int), cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(&mask_last, d_out+n-1, sizeof(unsigned int), cudaMemcpyDeviceToHost));
total_zeros+=(mask_last==1)?1:0;
getIndex<<<divup(n,block_size*len),block_size>>>(d_index, d_sum, d_out, len, n, total_zeros);
// std::cout<<"index "<<std::endl;
// CHECK(cudaMemcpy(sum, d_index, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for(int j=0;j<n;j++){
// std::cout<<sum[j]<<" ";
// }
// std::cout<<std::endl;
CHECK(cudaGetLastError());
scatter<<<divup(n,block_size*len),block_size>>>(d_in, d_index, d_out_long, len, n);
CHECK(cudaGetLastError());
//must swap pointers
d_temp = d_in;
d_in = d_out_long;
d_out_long = d_temp;
}
cuda_memcpy(data, d_in, n, cudaMemcpyDeviceToHost);
CHECK(cudaFree(d_in));
CHECK(cudaFree(d_out_long));
CHECK(cudaFree(d_out));
CHECK(cudaFree(d_sum));
CHECK(cudaFree(d_index));
// std::sort(data, data + n);
}
|
18
|
#include <iostream>
using namespace std;
#define CUDA_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPU assert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void square(float *d_out, float *d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f*f;
}
int main(){
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
for(int i=0; i < ARRAY_SIZE; i++){
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
float *d_in;
float *d_out;
CUDA_CALL(cudaMalloc((void**) &d_in, ARRAY_BYTES));
CUDA_CALL(cudaMalloc((void**) &d_out, ARRAY_BYTES));
CUDA_CALL(cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice));
square<<<1, ARRAY_SIZE>>>(d_out, d_in);
CUDA_CALL(cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost));
for(int i=0; i< ARRAY_SIZE; i++){
cout << h_out[i];
if(i%4!=3) cout << "\t";
else cout << endl;
}
}
|
19
|
extern "C"
__global__ void cuAdd(int n, float *a, float *b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] + b[i];
}
}
extern "C"
__global__ void cuMult(int n, float *a, float *b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] * b[i];
}
}
extern "C"
__global__ void cuDiv(int n, float *a, float *b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] / b[i];
}
}
extern "C"
__global__ void cuExp(int n, float *a, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = expf(a[i]);
}
}
|
20
|
#include<bits/stdc++.h>
using namespace std;
__global__ void vec_add(int N, int *A, int *B, int *C){
int i = threadIdx.x + blockIdx.x * blockDim.x;
// assert( i<N );
if(i < N) C[i] = A[i] + B[i];
}
int main(int argc, char *argv[]){
srand(0);
int N = 10000, block_size = 256;
if(argc>1) N = stoi(argv[1]);
if(argc>2) block_size = stoi(argv[2]);
int n_block = (N+block_size-1)/block_size;
int *A = new int [N], *B = new int [N], *C = new int [N];
for(int i=0;i<N;++i) A[i] = rand()%50;
for(int i=0;i<N;++i) B[i] = rand()%50;
clock_t start_time, mid_time1, mid_time2, end_time;
// Record the starting time
start_time = clock();
int *dA, *dB, *dC;
cudaMalloc((void **)&dA, N*sizeof(int));
cudaMalloc((void **)&dB, N*sizeof(int));
cudaMalloc((void **)&dC, N*sizeof(int));
// Copy data to divice
cudaMemcpy(dA, A, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dB, B, N*sizeof(int), cudaMemcpyHostToDevice);
mid_time1 = clock();
// Running code on GPUs
vec_add<<<n_block, block_size>>>(N, dA, dB, dC);
mid_time2 = clock();
cudaMemcpy(C, dC, N*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
// Record the ending time
end_time = clock();
double dt = double(end_time - start_time)/CLOCKS_PER_SEC;
double dt_trans = double(mid_time1 + end_time - start_time - mid_time2)/CLOCKS_PER_SEC;
cout<<"Data Transfer Time Usage: "<<dt_trans<<"s"<<endl;
cout<<"Total Time Usage: "<<dt<<"s\nResults:\n";
int stride = N/10;
for(int i=0;i<N;i+=stride) cout<<C[i]<<' ';
cout<<endl;
delete [] A;
delete [] B;
delete [] C;
return 0;
}
|
21
|
/*用gpu实现2个矩阵之间的乘法*/
#include<iostream>
#include<stdlib.h>
#include<sys/time.h>
#include<math.h>
#include"cuda_runtime.h"
using namespace std;
#define cols 1024
#define rows 1024
__global__ void multiply(float**Ad,float**Bd,float**Cd)
{
int x = blockDim.x*blockIdx.x+threadIdx.x;
int y = blockDim.y*blockIdx.y+threadIdx.y;
if(x<rows && y<cols)
{
for(int i=0;i<cols;i++)
{
Cd[y][x]+=Ad[y][i]*Bd[i][x];
}
}
}
int main()
{
struct timeval start, end;
int n=cols*rows;
float **A,**B,**C,**Ad,**Bd,**Cd;
float *a,*b,*c,*ad,*bd,*cd;
A=new float* [cols];
B=new float* [cols];
C=new float* [cols];
a=new float [n];
b=new float [n];
c=new float [n];
cudaMalloc((void**)&Ad,sizeof(float*)*cols);
cudaMalloc((void**)&Bd,sizeof(float*)*cols);
cudaMalloc((void**)&Cd,sizeof(float*)*cols);
cudaMalloc((void**)&ad,sizeof(float)*n);
cudaMalloc((void**)&bd,sizeof(float)*n);
cudaMalloc((void**)&cd,sizeof(float)*n);
for(int i=0;i<n;i++)
{
a[i]=2;
b[i]=2;
}
for(int i=0;i<cols;i++)
{
A[i]=ad+i*rows;
B[i]=bd+i*rows;
C[i]=cd+i*rows;
}
gettimeofday( &start, NULL);//以开始向gpu拷贝数据为起点,记录时间
cudaMemcpy(Ad,A,sizeof(float*)*cols,cudaMemcpyHostToDevice);
cudaMemcpy(Bd,B,sizeof(float*)*cols,cudaMemcpyHostToDevice);
cudaMemcpy(Cd,C,sizeof(float*)*cols,cudaMemcpyHostToDevice);
cudaMemcpy(ad,a,sizeof(float)*n,cudaMemcpyHostToDevice);
cudaMemcpy(bd,b,sizeof(float)*n,cudaMemcpyHostToDevice);
dim3 dimBlock(16,16);
dim3 dimGrid(cols/16+1,rows/16+1);
multiply<<<dimGrid,dimBlock>>>(Ad,Bd,Cd);
cudaMemcpy(c,cd,sizeof(float)*n,cudaMemcpyDeviceToHost);
gettimeofday( &end, NULL );//以从gpu返回计算数据为终点,记录时间
float target=4096;
float error=0.0;
for(int i=0;i<n;i++)
{
error+=abs(c[i]-target);
}
cout<<"error is "<<error<<endl;
int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
cout << "total time is " << timeuse/1000 << "ms" <<endl;
delete [] a;
delete [] b;
delete [] c;
delete [] A;
delete [] B;
delete [] C;
cudaFree(Ad);
cudaFree(Bd);
cudaFree(Cd);
cudaFree(ad);
cudaFree(bd);
cudaFree(cd);
return 0;
}
|
22
|
#include <stdio.h>
__global__ void firstParallel()
{
printf("This is running in parallel.\n");
}
int main()
{
firstParallel<<<5, 5>>>();
cudaDeviceSynchronize();
}
|
23
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void conv2(float *A, float *kernel,int inputSize, int depth, int kernelSize , int stride, int pad, float *B, int outputSize) {
// 计算元素output(i,j)的值 一次卷积运算
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if( !(i < outputSize) || !(j < outputSize) ) return;
int Ai = i*stride;
int Aj = j*stride;
// 除去填充的0
int startk = (pad-Ai) < 0? 0 : pad-Ai;
int endk = kernelSize < (inputSize + pad - Ai) ? kernelSize : (inputSize + pad - Ai);
int startl = (pad-Aj) < 0? 0 : pad-Aj;
int endl = kernelSize < (inputSize + pad - Aj) ? kernelSize : (inputSize + pad - Aj);
float sum = 0;
for(int d = 0; d < depth; d++) {
for( int k = startk ; k < endk; k++) {
for( int l = startl; l < endl; l++) {
sum += A[d*inputSize*inputSize + (Ai+k-pad)*inputSize + Aj+l-pad]*kernel[d*kernelSize*kernelSize + k*kernelSize+l];
}
}
B[d*outputSize*outputSize + i*outputSize + j] = sum;
}
B[i*outputSize + j] = sum;
}
int main(int argc, char * argv[] ) {
// input: inputSize*inputSize*depth
// kernel: kernelSize*kernelSize*depth
// output: outputSize*outputSize
int inputSize = 7;
int depth = 3;
int kernelSize = 3;
int kernelNum = 3;
int stride[3] = {1 , 2 , 3 };
int pad[3] = {0,0,0};
int outputSize[3];
// 计算不同stride下需要的padding数量pad和output的规模outputSize
for(int i = 0; i < kernelNum; i++) {
if((inputSize - kernelSize)%stride[i] != 0) {
pad[i] = (stride[i] - ((inputSize - kernelSize)%stride[i])) / 2;
}
outputSize[i] = (inputSize - kernelSize + 2*pad[i] ) / stride[i] + 1;
}
// ============================= 资源申请的初始化 =========================
// ==== CPU资源申请和初始化
// input:A kernel:kernel output:B
float *A, *kernel[3], *B[3];
A = (float *)malloc(sizeof(float)*inputSize*inputSize*depth);
for(int i = 0; i < 3; i++) {
kernel[i] = (float *)malloc(sizeof(float)*kernelSize*kernelSize*depth);
B[i] = (float *)malloc(sizeof(float)*outputSize[i]*outputSize[i]*depth);
}
// 初始化input A
for(int d = 0; d < depth; d++) {
for(int i=0; i<inputSize*inputSize; i++) {
A[d*inputSize*inputSize + i] = i;
}
}
// 初始化kernel
for(int i = 0; i < 3; i++){
for(int j = 0; j < kernelSize*kernelSize*depth; j++) {
kernel[i][j] = 1;
}
}
// ==== GPU资源申请和初始化
float *d_A, *d_kernel[3], *d_B[3];
cudaMalloc((void**)&d_A,sizeof(float)*inputSize*inputSize*depth);
for(int i = 0; i < 3; i++) {
cudaMalloc((void**)&d_kernel[i], sizeof(float)*kernelSize*kernelSize*depth);
cudaMalloc((void**)&d_B[i],sizeof(float)*outputSize[i]*outputSize[i]);
}
cudaMemcpy(d_A,A,sizeof(float)*inputSize*inputSize*depth,cudaMemcpyHostToDevice);
for(int i = 0; i < 3; i++) {
cudaMemcpy(d_kernel[i],kernel[i],sizeof(float)*kernelSize*kernelSize*depth,cudaMemcpyHostToDevice);
}
// ============================= 调用核函数 =========================
struct timeval start, end;
gettimeofday( &start, NULL );
for( int i = 0; i < 3; i++ ) {
int blockx = (int) (log2(outputSize[i])+ 1);
int blocky = blockx;
dim3 Block(blockx,blocky);
dim3 Grid((inputSize+Block.x-1) / Block.x,(inputSize+Block.y-1) / Block.y );
conv2 <<< Grid, Block >>> (d_A,d_kernel[i],inputSize,depth,kernelSize,stride[i],pad[i],d_B[i],outputSize[i]);
}
// 结果回传
for( int i = 0; i < 3; i++ ) {
cudaMemcpy(B[i],d_B[i],sizeof(float)*outputSize[i]*outputSize[i],cudaMemcpyDeviceToHost);
}
gettimeofday( &end, NULL );
int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
//printf("Block(%d,%d) Grid(%d,%d).\n", Block.x, Block.y, Grid.x, Grid.y);
printf("total time is %f ms\n", timeuse/(float)1000);
// 输出结果
FILE *b[3];
b[0] = fopen("matrixB11.m", "wb");
b[1] = fopen("matrixB12.m", "wb");
b[2] = fopen("matrixB13.m", "wb");
for(int k = 0; k < 3; k++ ) {
fprintf(b[k], "B = [ \n");
for (int i = 0; i < outputSize[k]; i++)
{
for (int j = 0; j < outputSize[k]; j++)
fprintf(b[k], "%f ", B[k][i * outputSize[k] + j]);
fprintf(b[k], "\n");
}
fprintf(b[k], "];");
}
// ============================= 资源释放 =========================
free(A);
cudaFree(d_A);
for(int i = 0; i < 3; i++) {
free(kernel[i]);
free(B[i]);
cudaFree(d_B[i]);
cudaFree(d_kernel[i]);
fclose(b[i]);
}
return 0;
}
|
24
|
#include "includes.h"
__global__ void __stratifycounts(double *strata, int n, double *a, unsigned int *bi) {
__shared__ unsigned int ic[SNDVALS][SNDGRPS];
__shared__ double ss[SNDVALS];
int istart = (int)(((long long)blockIdx.x) * n / gridDim.x);
int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x);
int bibase = SNDVALS * (blockIdx.x + istart / SBIGBLK);
int tid = threadIdx.x + threadIdx.y * blockDim.x;
if (threadIdx.y == 0) {
ss[threadIdx.x] = strata[threadIdx.x];
}
for (int i = istart; i < iend; i += SBIGBLK) {
__syncthreads();
if (threadIdx.y < SNDGRPS) {
ic[threadIdx.x][threadIdx.y] = 0;
}
__syncthreads();
for (int k = i + tid; k < min(iend, i + tid + SBIGBLK); k += SNTHREADS) {
double v = a[k];
int j = 0;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = (v > ss[j]) ? 2*j+2 : 2*j+1;
j = j - SNDVALS + 1;
atomicInc(&ic[j][threadIdx.y], 65536*32767);
}
__syncthreads();
if (threadIdx.y == 0) {
bi[bibase + threadIdx.x] = ic[threadIdx.x][0] + ic[threadIdx.x][1] + ic[threadIdx.x][2] + ic[threadIdx.x][3];
}
bibase += SNDVALS;
}
}
|
25
|
//#include <hayai/hayai.hpp>
//
//#include "btree.cuh"
//
//#include "concurrent-xfasttrie-fixture.cu"
//
//using BTREE = gpu::BTree<key_type, mapped_type>;
//using BTreeInsertionFixture = XTrieInsertionFixture<BTREE, Structure::BTREE>;
//using BTreeGetThreadFixture = XTrieGetThreadFixture<BTREE, Structure::BTREE>;
//using BTreeGetWarpFixture = XTrieGetWarpFixture<BTREE, Structure::BTREE>;
//using BTreePredecessorFixture = XTriePredecessorFixture<BTREE, Structure::BTREE, true>;
//using BTreeSuccessorFixture = XTrieSuccessorFixture<BTREE, Structure::BTREE, true>;
//
//BENCHMARK_F(BTreeInsertionFixture, InsertionBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS)
//{
// insert();
//}
///*
//BENCHMARK_F(BTreeGetThreadFixture, GetThreadBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS)
//{
// get_thread();
//}
//
//BENCHMARK_F(BTreeGetWarpFixture, GetWarpBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS)
//{
// get_warp();
//}
//
//BENCHMARK_F(BTreePredecessorFixture, PredecessorBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS)
//{
// predecessor();
//}*/
///*
//BENCHMARK_F(BTreeSuccessorFixture, SuccessorBtree, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS)
//{
// successor();
//}*/
|
26
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define MAX 65535
#define imin(a,b) (a<b?a:b)
const int arr_size =8;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin(32,(arr_size +threadsPerBlock -1)/threadsPerBlock);
__global__ void kernel(float*arrA , float* arrB, float* arrC)
{
__shared__ float cache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0;
while (tid < arr_size)
{
temp += arrA[tid] * arrB[tid];
tid += blockIdx.x * blockDim.x;
}
//set cache values
cache[cacheIndex] = temp;
__syncthreads();
//REDUCTION FUNCTION
int i = blockDim.x / 2;
while (i != 0)
{
if (cacheIndex < i)
{
cache[cacheIndex] += cache[cacheIndex + i];
}
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
{
arrC[blockIdx.x] = cache[0];
}
}
int main(int argc, char **argv)
{
const int arr_bytes = arr_size * sizeof(float);
float arr_a[MAX];
float arr_b[MAX];
float partial_c[MAX];
float* dev_a;
float* dev_b;
float* partialdev_c;
int i;
float j = 1.0;
for (i = 0; i < arr_size; i++)
{
arr_a[i] = j;
arr_b[i] = j * j;
}
cudaMalloc((void**)&dev_a, arr_bytes);
cudaMalloc((void**)&dev_b, arr_bytes);
cudaMalloc((void**)&partialdev_c, blocksPerGrid * sizeof(float));
cudaMemcpy(dev_a, arr_a, arr_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, arr_b, arr_bytes, cudaMemcpyHostToDevice);
kernel <<<blocksPerGrid,threadsPerBlock >>>(dev_a,dev_b,partialdev_c);
cudaMemcpy(partial_c, partialdev_c, blocksPerGrid*sizeof(float), cudaMemcpyDeviceToHost);
//calculate final dot product on cpu side
float c = 0;
for (i = 0; i < blocksPerGrid; i++)
{
c += partial_c[i];
}
printf("The value of Dot product is : %f\n", c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(partialdev_c);
}
|
27
|
#include <stdio.h>
#include <stdlib.h>
#define N 5
#define BR() printf("\n")
#define BRS(str) printf("%s\n",str)
typedef struct {
int top;
int* data;
int stack_size;
}FIFO;
void exec();
void initialize_array(int*);
void print_array(int*);
int main(int argc, char const *argv[]) {
exec();
return 0;
}
// __device__ int i,j,k;
__device__ int push(int new_data,FIFO* stack_t){
if(stack_t->top > stack_t->stack_size){
return -1;
}
stack_t->data[stack_t->top] = new_data;
stack_t->top++;
return 1;
}
__device__ int pop(FIFO* stack_t){
if(stack_t->top == 0){
return -1;
}
stack_t->top--;
return 1;
}
__device__ int initialize_stack(FIFO* stack_t,int stack_size){
stack_t->top = 0;
stack_t->stack_size = stack_size;
stack_t->data = (int*) malloc(stack_size*sizeof(int));
if(stack_t->data == NULL){
return -1;
}
return 1;
}
__device__ int top(FIFO* stack_t){
if(stack_t->top == 0){
return -1;
}
return stack_t->data[stack_t->top-1];
}
__device__ int isEmpty(FIFO* stack_t){
if(stack_t->top == 0)
return 1;
else
return 0;
}
__device__ void swap(int *x, int *y)
{
int tmp;
tmp = *x;
*x = *y;
*y = tmp;
}
__device__ void print_d_array(int *array){
int i;
BRS(__func__);
printf("blockIdx.x %d , threadIdx.x %d\n", blockIdx.x, threadIdx.x);
for (i = 0; i < N; i++) {
printf("%d ",array[i]);
}//for
BR();
}
__global__ void kernel_test_stack(int *d_array){
int status;
int i, x = 3, y = 6;
FIFO stack1;
print_d_array(d_array);
//スワップの確認
printf("x: %d y: %d\n", x, y);
swap(&x,&y);
printf("x: %d y: %d\n", x, y);
//スタックの確認
if ((status = initialize_stack(&stack1, N)) == -1) {
printf("initialize_stack error LINE:%d \n", __LINE__);
}
printf("blockIdx.x %d , threadIdx.x %d stack address %p x %p y%p \n", blockIdx.x, threadIdx.x, &stack1, &x, &y);
if(isEmpty(&stack1)){
BRS("Empty");
}//if
else{
BRS("NOT Empty");
}//else
for(i = 1 ; i < N ; i++){
push(i, &stack1);
printf("push: %d\n",i);
if(isEmpty(&stack1)){
BRS("Empty");
// printf("top: %d \n",top(&stack1));
}//if
else{
BRS("NOT Empty");
// printf("top: %d \n",top(&stack1));
}//else
}//for
for(i = 1 ; i < N ; i++){
pop(&stack1);
BRS("pop");
if(isEmpty(&stack1)){
BRS("Empty");
printf("top: %d \n",top(&stack1));
}//if
else{
BRS("NOT Empty");
printf("top: %d \n",top(&stack1));
}//else
}//for
}//Kernel
void exec(){
int array[N];
int *d_array;
int iDev = 0;
dim3 grid, block;
cudaDeviceProp iProp;
cudaSetDevice(iDev);
cudaGetDeviceProperties(&iProp, iDev);
printf("Device %d: %s\n", iDev, iProp.name);
initialize_array(array);
print_array(array);
cudaMalloc((int**)&d_array, sizeof(array));
cudaMemcpy(d_array, array, sizeof(array), cudaMemcpyHostToDevice);
grid.x = 1;
block.x = 2;
kernel_test_stack<<<grid, block>>>(d_array);
cudaMemcpy(array, d_array, sizeof(array), cudaMemcpyDeviceToHost);
print_array(array);
cudaFree(d_array);
cudaDeviceReset();
}
void initialize_array(int* array){
int i;
for (i = 0; i < N; i++) {
array[i] = rand() % N * 2;
}//for
}//function
void print_array(int* array){
int i;
BRS(__func__);
for (i = 0; i < N; i++) {
printf("%d ",array[i]);
}//for
BR();
}//function
|
28
|
// nvcc -arch sm_21 -o test -run --keep --ptxas-options="-v" test.cu
#include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
__global__ void transpose (int* Input, int* Output) {
}
|
29
|
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
//Note that any functions that want to be called from the kernel must be preceeded with __device__
//Function we are integrating
__device__ float myFunction(float x){
return pow(x,4);
}
//Trapezoidal rule calculation
__device__ float trapezoidal(float a, float b){
return (b-a)*((myFunction(a)+myFunction(b))/2);
}
//Composite trap rule calculation
__device__ float composite_trapezoidal(float a, float b, int n){
float h=(b-a)/(n);
float total=0;
int i;
for (i=0;i<n;i++){
total=total+trapezoidal(a+i*h,a+(i+1)*h);
}
return total;
}
//This section runs on the GPUs
__global__ void kernel(float* arr, float A, float B, int P, int N){
//Who am I?
int id = blockIdx.x * blockDim.x + threadIdx.x;
//calculate number of intervals, where they start, and where they end, and what interval this processor will use
float intervalWidth = (B-A)/(P);
float intervalStart = A+(intervalWidth)*(id);
float intervalEnd = intervalStart+intervalWidth;
//calculate the partial sum of this interval
arr[id] = composite_trapezoidal(intervalStart,intervalEnd,N);
}
int main(int argc, char** argv){
//Process input from command line
if (argc<3){
printf("Please enter a,b,N\n");
return 1;
}
float A=atof(argv[1]);
float B=atof(argv[2]);
int N=atoi(argv[3]);
printf("Integrating x^4 from %.3f to %.3f with %d points\n", A, B, N);
//How many threads will we use and how much data is in each thread?
int elements = 512;
int bytes = elements * sizeof(float);
//Create pointers to host and device arrays
float *hostArray = 0;
float *deviceArray = 0;
//Create the array on the host and on the GPU
hostArray = (float*) malloc(bytes);
cudaMalloc((void**)&deviceArray, bytes);
int blockSize = 128;
int gridSize = elements / blockSize;
//Instruct each GPU core to run its kernel section
kernel<<<gridSize,blockSize>>>(deviceArray, A, B, elements, N);
//Gather all the partial sums
cudaMemcpy(hostArray, deviceArray, bytes, cudaMemcpyDeviceToHost);
//Reduce the partial sums to a single integral
float sum = 0;
for(int i=0; i < elements; ++i){
sum += hostArray[i];
}
//Print result
printf("Integrating x^4 from %.3f to %.3f with %d points is: %.3f\n", A, B, N, sum);
//Deallocate the two arrays
free(hostArray);
cudaFree(deviceArray);
//Exit from the calling program
return 0;
}
|
30
|
#include "cuda_runtime.h"
#include "stdio.h"
#include "stdlib.h"
#include "string.h"
#include "time.h"
#define A_w 50
#define A_h 50
#define B_w 32
#define B_h 32
typedef struct{
int width;
int height;
float * elements;
}Matrix;
// #define
void rightKronecker1(Matrix A, Matrix B, Matrix C){
for(int c_row=0; c_row<C.height; c_row++){
for(int c_col=0; c_col<C.width; c_col++){
C.elements[c_col + c_row*C.width] =
A.elements[c_col/B.width + c_row/B.height * A.width]
* B.elements[c_col%B.width + c_row%B.height*B.width];
}
}
}
void rightKronecker2(Matrix A, Matrix B, Matrix C){
for(int a_row=0; a_row<A.height; a_row++){
for(int a_col=0; a_col<A.width; a_col++){
for(int b_row=0; b_row<B.height; b_row++){
for(int b_col=0; b_col<B.width; b_col++){
C.elements[(b_col+a_col*B.width)+(b_row+a_row*B.height)*A.width*B.width]
= A.elements[a_col+a_row*A.width] * B.elements[b_col+b_row*B.width];
}
}
}
}
}
void generatorNum(float* array, int num)
{
// srand((unsigned)time(NULL));
for(int i=0;i<num;i++)
{
array[i]=rand()%5;
}
}
void printUsage(void)
{
printf("\n");
printf("The program aims to calculate the product of matrix A and B\n");
printf("-h matrix A row num\n");
printf("-w matrix A col num\n");
printf("-H matrix B row num\n");
printf("-W matrix B col num\n");
}
int main(int argc,char** argv){
// int A_w,B_w,A_h,B_h;
// if(argc==1)
// {
// printf("Error: no enough parameters.Please input the col and row number of Matrix A and B,respectively\n");
// exit(0);
// }
// else if(argc==2)
// {
// if(strcmp("--help",argv[1])==0)
// {
// printUsage();
// exit(0);
// }
// }
// for(int id=1;id<argc;id+=2)
// {
// if(strcmp("-h",argv[id])==0)
// A_h=atoi(argv[id+1]);
// else if(strcmp("-w",argv[id])==0)
// A_w=atoi(argv[id+1]);
// else if(strcmp("-W",argv[id])==0)
// B_w=atoi(argv[id+1]);
// else if(strcmp("-H",argv[id])==0)
// B_h=atoi(argv[id+1]);
// }
// Matrix A,d_A,B,d_B,C,d_C;
Matrix A, B, C1, C2;
A.width=A_w;A.height=A_h;
B.width=B_w;B.height=B_h;
C1.width=A_w*B_w;C1.height=A_h*B_h;
C2.width=A_w*B_w;C2.height=A_h*B_h;
A.elements=(float *)malloc(A.width*A.height*sizeof(float));
B.elements=(float *)malloc(B.width*B.height*sizeof(float));
C1.elements=(float *)malloc(C1.width*C1.height*sizeof(float));
C2.elements=(float *)malloc(C2.width*C2.height*sizeof(float));
// A.elements=(float *)malloc(A.width*A.height*sizeof(float));
// B.elements=(float *)malloc(B.width*B.height*sizeof(float));
// C.elements=(float *)malloc(C.width*C.height*sizeof(float));
generatorNum(A.elements,A.width*A.height);
generatorNum(B.elements,B.width*B.height);
memset(C1.elements,0,C1.width*sizeof(float)*C1.height);
memset(C2.elements,0,C2.width*sizeof(float)*C2.height);
// printf("A.elements:\n");
// for(int i=0;i<A.height;i++){
// for(int j=0;j<A.width;j++){
// printf("%d ", int(A.elements[j+i*A.width]));
// }
// printf("\n");
// }
// printf("B.elements:\n");
// for(int i=0;i<B.height;i++){
// for(int j=0;j<B.width;j++){
// printf("%d ", int(B.elements[j+i*B.width]));
// }
// printf("\n");
// }
srand(time(0));
clock_t start,finish1, finish2;
start=clock();
rightKronecker1(A, B, C1);
finish1=clock();
rightKronecker2(A, B, C2);
finish2=clock();
// printf("C1.elements:\n");
// for(int i=0;i<C1.height;i++){
// for(int j=0;j<C1.width;j++){
// printf("%d ", C1.elements[j+i*C1.width]);
// }
// printf("\n");
// }
// printf("C2.elements:\n");
// for(int i=0;i<C2.height;i++){
// for(int j=0;j<C2.width;j++){
// printf("%d ", C2.elements[j+i*C2.width]);
// }
// printf("\n");
// }
printf("Difference between 2 method:\n");
float diff = 0;
for(int i=0;i<C2.height;i++){
for(int j=0;j<C2.width;j++){
diff = C2.elements[j+i*C2.width] - C1.elements[j+i*C2.width];
}
}
printf("%f\n", diff);
printf("method1 cost time %f ms\n",(finish1-start)*1000.0/CLOCKS_PER_SEC);
printf("method2 cost time %f ms\n",(finish2-finish1)*1000.0/CLOCKS_PER_SEC);
// malloc matrix A B C on GPU
// cudaMalloc(&d_A.elements,sizeof(float)*A.width*A.height);
// cudaMalloc(&d_B.elements,sizeof(float)*B.width*B.height);
// cudaMalloc(&d_C.elements,sizeof(float)*C.width*C.height);
return 0;
}
|
31
|
/* Block size X: 32 */
__global__ void fct_ale_b2(const int maxLevels, const double dt, const double fluxEpsilon, const int * __restrict__ nLevels, const double * __restrict__ area_inv, const double * __restrict__ fct_ttf_max, const double * __restrict__ fct_ttf_min, double * __restrict__ fct_plus, double * __restrict__ fct_minus)
{
int index = 0;
double area_item = 0;
for ( int level = threadIdx.x; level < nLevels[blockIdx.x] - 1; level += 32 )
{
index = (blockIdx.x * maxLevels) + level;
area_item = area_inv[index + blockIdx.x];
fct_plus[index] = fmin(1.0, fct_ttf_max[index] / (fct_plus[index] * dt * area_item + fluxEpsilon));
fct_minus[index] = fmin(1.0, fct_ttf_min[index] / (fct_minus[index] * dt * area_item - fluxEpsilon));
}
}
|
32
|
#include "includes.h"
using namespace std;
__global__ void setValue(float *data, int idx, float value) {
if(threadIdx.x == 0) {
data[idx] = value;
}
}
|
33
|
#include "includes.h"
__device__ float sigmoid(float x) {
return 1.0f / (1 + __expf(-x));
}
__global__ void sigmoidActivationForward(float* Z, float* A, int Z_x_dim, int Z_y_dim) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < Z_x_dim * Z_y_dim) {
A[index] = sigmoid(Z[index]);
}
}
|
34
|
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <sys/time.h>
// #define NUM_PARTICLES 10000
// #define NUM_ITERATIONS 10000
// int TPB = 16;
#define SEED 10
#define EPSILON 1e-5
typedef struct {
float3 position;
float3 velocity;
} Particle;
// Deterministically generates a "random" float, provided a seed and 3 integers.
__host__ __device__ float gen_random(int seed, int a, int b, int c) {
return (float)((seed * a + b) % c) / c;
}
// Given an array of particles and an index, print that particle.
void printParticle(Particle* particles, int index){
printf("%f %f %f %f %f %f\n",
particles[index].position.x, particles[index].position.y, particles[index].position.z,
particles[index].velocity.x, particles[index].velocity.y, particles[index].velocity.z);
}
// Compare two arrays of Particles. If their position coordinates are all within EPSILON of each other,
// return true, else false.
__host__ bool arraysMatch(Particle* arr1, Particle* arr2, int num_particles)
{
for (int i = 0; i < num_particles; i++) {
if (fabs(arr1[i].position.x - arr2[i].position.x) > EPSILON ||
fabs(arr1[i].position.y - arr2[i].position.y) > EPSILON ||
fabs(arr1[i].position.z - arr2[i].position.z) > EPSILON)
return false;
}
return true;
}
// Get the current time
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
// Replaces the x, y and z values in a float3 to random values between 0 and 1.
void randomizeFloat3(float3* f3) {
f3->x = (float) rand() / RAND_MAX;
f3->y = (float) rand() / RAND_MAX;
f3->z = (float) rand() / RAND_MAX;
}
// Randomizes the position and velocity of all Particles in an array.
void randomizeParticles(Particle* particles, int num_particles) {
srand(0);
for (int i = 0; i < num_particles; i++) {
randomizeFloat3(&particles[i].position);
randomizeFloat3(&particles[i].velocity);
}
}
// Updates a particle's position by its velocity, then updates its velocity
__host__ __device__ void updateParticle(Particle* particle, int id, int iter, int num_particles) {
int dt = 1;
// update position
particle->position.x += dt * particle->velocity.x;
particle->position.y += dt * particle->velocity.y;
particle->position.z += dt * particle->velocity.z;
// update the velocity randomly
particle->velocity.x += gen_random(SEED, id, iter, num_particles);
particle->velocity.y += gen_random(SEED, id, iter, num_particles);
particle->velocity.z += gen_random(SEED, id, iter, num_particles);
}
// CPU function that updates a given particle.
void cpu_updatePositionAndVelocity(Particle* particle, int id, int iter, int num_particles) {
updateParticle(particle, id, iter, num_particles);
}
// Kernel that finds a given Particle's ID then updates it if within range.
__global__ void gpu_updatePositionAndVelocity(Particle* particles, int iter, int num_particles) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= num_particles) // If out of bounds, ignore the Particle.
return;
else
updateParticle(&particles[id], id, iter, num_particles);
}
// Perform the update step for all Particles in the array on CPU with a for loop.
void cpu_updateParticles(Particle* particles, int iter, int num_particles) {
// srand(time(NULL))
for (int i = 0; i < num_particles; i++) {
cpu_updatePositionAndVelocity(&particles[i], i, iter, num_particles);
}
}
// Perform the update step for all Particles in the array by launching GPU kernels.
void gpu_updateParticles(Particle* particles, int iter, int num_particles, int tpb) {
gpu_updatePositionAndVelocity<<<(num_particles + tpb - 1)/tpb, tpb>>>(particles, iter, num_particles);
}
int main(int argc, char** argv) {
printf("Running the simulations with the following params:\n");
if (argc < 5) {
printf("Usage: ./a NUM_PARTICLES NUM_ITERATIONS TPB INCLUDE_CPU\nExample usage: ./a 10000 10000 32 include_cpu\n");
return -1;
}
// reading the command line arguments, without any kind of error checking
const int num_particles = (int) strtol(argv[1], NULL, 10); // e.g. 10000 - NULL is the endpointer and 10 is the base
const int num_iterations = (int) strtol(argv[2], NULL, 10); // e.g. 10000
const int tpb = (int) strtol(argv[3], NULL, 10); // e.g. 32
const char* include_cpu = argv[4];
printf("======== %s: %d, %s: %d, %s: %d\n\n", "num_particles", num_particles, "num_iterations", num_iterations, "tpb", tpb);
// Declare variables
Particle *c_particles, *g_particles, *g_result;
double iStart, iElaps;
// Initialize array for CPU
c_particles = (Particle*) malloc(num_particles*sizeof(Particle));
randomizeParticles(c_particles, num_particles);
// Initialize array for GPU - particle positions/velocities in device memory are a copy of those in host memory
// g_result = (Particle*) malloc(num_particles*sizeof(Particle)); // Used to store the result of GPU simulation
// cudaMallocHost(&g_result, num_particles*sizeof(Particle));
// cudaMalloc(&g_particles, num_particles*sizeof(Particle));
cudaMallocManaged(&g_particles, num_particles*sizeof(Particle));
iStart = cpuSecond();
memcpy(g_particles, c_particles, num_particles*sizeof(Particle));
double copy_time = cpuSecond() - iStart;
// CPU Version
if (strcmp(include_cpu, "include_cpu") == 0) { // perfrom CPU version if wanted by the user
printf("CPU simulation started...\n"); fflush(stdout);
iStart = cpuSecond();
for (int i = 0; i < num_iterations; i++) {
cpu_updateParticles(c_particles, i, num_particles);
}
iElaps = cpuSecond() - iStart;
printf("Done in %f!\n\n", iElaps); fflush(stdout);
}
else
printf("Excluded the CPU experiment...\n\n");
// GPU Version
printf("GPU simulation started...\n"); fflush(stdout);
iStart = cpuSecond();
for (int i = 0; i < num_iterations; i++) {
// cudaMemcpy(g_particles, g_result, num_particles*sizeof(Particle), cudaMemcpyHostToDevice);
gpu_updateParticles(g_particles, i, num_particles, tpb);
cudaDeviceSynchronize();
// cudaMemcpy(g_result, g_particles, num_particles*sizeof(Particle), cudaMemcpyDeviceToHost);
}
iElaps = cpuSecond() - iStart;
printf("Done in %f!\n\n", iElaps + copy_time); fflush(stdout);
// copying the result back from the GPU memory to the CUP memory
// cudaMemcpy(g_result, g_particles, num_particles*sizeof(Particle), cudaMemcpyDeviceToHost);
// if CPU version is perfromed, then compare it with GPU version
if (strcmp(include_cpu, "include_cpu") == 0)
printf(arraysMatch(g_particles, c_particles, num_particles) ? "Results match!\n" : "Results are wrong!\n");
// printf(arraysMatch(g_result, c_particles, num_particles) ? "Results match!\n" : "Results are wrong!\n");
printf("========================================================== \n\n\n");
// Free arrays
free(c_particles);
cudaFree(g_particles);
}
|
35
|
__device__ void rot_x(float3 *vec, float angle)
{
float tmp;
tmp = vec->y;
vec->y = tmp * cosf(angle) + vec->z * -sinf(angle);
vec->z = tmp * sinf(angle) + vec->z * cosf(angle);
}
__device__ void rot_y(float3 *vec, float angle)
{
float tmp;
tmp = vec->x;
vec->x = tmp * cosf(angle) + vec->z * sinf(angle);
vec->z = tmp * -sinf(angle) + vec->z * cosf(angle);
}
__device__ void rot_z(float3 *vec, float angle)
{
float tmp;
tmp = vec->x;
vec->x = tmp * cosf(angle) + vec->y * -sinf(angle);
vec->y = tmp * sinf(angle) + vec->y * cosf(angle);
}
__device__ void rot_vec(float3 *vec, float3 angle)
{
rot_x(vec, angle.x);
rot_y(vec, angle.y);
rot_z(vec, angle.z);
}
|
36
|
#include "includes.h"
__global__ void cuSetupSincKernel_kernel(float *r_filter_, const int i_filtercoef_, const float r_soff_, const float r_wgthgt_, const int i_weight_, const float r_soff_inverse_, const float r_beta_, const float r_decfactor_inverse_, const float r_relfiltlen_inverse_)
{
int i = threadIdx.x + blockDim.x*blockIdx.x;
if(i > i_filtercoef_) return;
float r_wa = i - r_soff_;
float r_wgt = (1.0f - r_wgthgt_) + r_wgthgt_*cos(PI*r_wa*r_soff_inverse_);
float r_s = r_wa*r_beta_*r_decfactor_inverse_*PI;
float r_fct;
if(r_s != 0.0f) {
r_fct = sin(r_s)/r_s;
}
else {
r_fct = 1.0f;
}
if(i_weight_ == 1) {
r_filter_[i] = r_fct*r_wgt;
}
else {
r_filter_[i] = r_fct;
}
//printf("kernel %d %f\n", i, r_filter_[i]);
}
|
37
|
#include "includes.h"
using namespace std;
struct compressed_sparse_column {
int* data;
int* row;
int* column;
int* index_column;
int* index_row_start;
int* index_row_end;
};
struct graph {
compressed_sparse_column* dataset;
bool* roots;
bool* leaves;
bool* singletons;
int vertices;
int edges;
};
__global__ void pre_post_order(int* depth, int* zeta, int* zeta_tilde, graph* dataset_graph) {
int* pre = new int[dataset_graph->vertices];
int* post = new int[dataset_graph->vertices];
memset(pre, 0, dataset_graph->vertices * sizeof(int));
memset(post, 0, dataset_graph->vertices * sizeof(int));
bool* incoming_edges = new bool[dataset_graph->edges];
memset(incoming_edges, false, dataset_graph->edges * sizeof(bool));
bool* q = new bool[dataset_graph->vertices];
memcpy(q, dataset_graph->roots, sizeof(int) * dataset_graph->vertices);
while(true) {
bool* p = new bool[dataset_graph->vertices];
memset(p, false, dataset_graph->vertices * sizeof(bool));
bool global_check = false;
for(int i = 0; i < dataset_graph->vertices; i++) {
if( q[i] ) {
int pre_node = pre[i];
int post_node = post[i];
for(int j = dataset_graph->dataset->index_column[i]; dataset_graph->dataset->column[j] == i; j++) {
int neighbor_vertex = dataset_graph->dataset->row[j];
// zeta[i] = undefined!
pre[neighbor_vertex] = pre_node + zeta_tilde[neighbor_vertex];
post[neighbor_vertex] = post_node + zeta_tilde[neighbor_vertex];
incoming_edges[j] = true;
bool flag = true;
for(int k = 0; k < dataset_graph->edges; k++) {
if( dataset_graph->dataset->row[k] == neighbor_vertex && !incoming_edges[k] ) {
flag = false;
break;
}
}
if( flag ) {
global_check = true;
p[neighbor_vertex] = true;
}
}
pre[i] = pre_node + depth[i];
post[i] = post_node + (zeta[i] - 1);
}
}
q = p;
if( !global_check ) {
break;
}
}
}
|
38
|
#include "includes.h"
__global__ void gpu_transpo_kernel_naive(u_char *Source, u_char *Resultat, unsigned width, unsigned height){
int j = blockIdx.x*blockDim.x + threadIdx.x;
int i = blockIdx.y*blockDim.y + threadIdx.y;
if ((i<0)||(i>=height)||(j<0)||(j>=width)) {}
else {
Resultat[j*height + i] = Source[i*width + j];
}
}
|
39
|
#include <cstdio>
#include <cstdlib>
#include <time.h>
#include "cuda_timer.cuh"
#define SafeTimerCall(err) __safeTimerCall(err, __FILE__, __LINE__)
inline void __safeTimerCall(cudaError err, const char *file, const int line) {
#pragma warning(push)
#pragma warning(disable: 4127) Prevent warning on do-while(0);
do {
if (cudaSuccess != err) {
fprintf(stderr, "CudaTimer failed at %s:%i : %s\n", file, line, cudaGetErrorString(err));
exit(-1);
}
} while (0);
#pragma warning(pop)
return;
}
CudaTimer::CudaTimer() {
SafeTimerCall(cudaEventCreate(&_begEvent));
SafeTimerCall(cudaEventCreate(&_endEvent));
return;
}
CudaTimer::~CudaTimer() {
SafeTimerCall(cudaEventDestroy(_begEvent));
SafeTimerCall(cudaEventDestroy(_endEvent));
return;
}
void CudaTimer::start() {
SafeTimerCall(cudaEventRecord(_begEvent, 0));
return;
}
void CudaTimer::stop() {
SafeTimerCall(cudaEventRecord(_endEvent, 0));
return;
}
float CudaTimer::value() {
SafeTimerCall(cudaEventSynchronize(_endEvent));
float timeVal;
SafeTimerCall(cudaEventElapsedTime(&timeVal, _begEvent, _endEvent));
return timeVal / CLOCKS_PER_SEC;
}
|
40
|
#include "cuda.h"
typedef long long int64;
__global__ void ReceiveFun(double *out, const double*vx, const double*vy,
const double*sigmaxx, const double*sigmayy, const double*sigmaxy, int64 nt,
const int64 *rcvi, const int64 *rcvj, const int64 *rcvtype, int64 nrcv, int64 NX, int64 NY){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i>=nrcv) return;
int idx = (rcvi[i]-1)*(NY+2) + rcvj[i]-1;
switch (rcvtype[i])
{
case 0:
for(int k=0;k<nt;k++)
out[nt*i+k] = vx[k*(NX+2)*(NY+2)+idx];
break;
case 1:
for(int k=0;k<nt;k++)
out[nt*i+k] = vy[k*(NX+2)*(NY+2)+idx];
break;
case 2:
for(int k=0;k<nt;k++)
out[nt*i+k] = sigmaxx[k*(NX+2)*(NY+2)+idx];
break;
case 3:
for(int k=0;k<nt;k++)
out[nt*i+k] = sigmayy[k*(NX+2)*(NY+2)+idx];
break;
case 4:
for(int k=0;k<nt;k++)
out[nt*i+k] = sigmaxy[k*(NX+2)*(NY+2)+idx];
break;
default:
break;
}
}
void forwardGPU(double *out, const double*vx, const double*vy,
const double*sigmaxx, const double*sigmayy, const double*sigmaxy, int64 nt,
const int64 *rcvi, const int64 *rcvj, const int64 *rcvtype, int64 nrcv, const int64* nx, const int64* ny){
long long NX, NY;
cudaMemcpy(&NX, nx, sizeof(long long), cudaMemcpyDeviceToHost);
cudaMemcpy(&NY, ny, sizeof(long long), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
ReceiveFun<<<(nrcv+255)/256, 256>>>(out, vx, vy, sigmaxx, sigmayy, sigmaxy, nt,
rcvi, rcvj, rcvtype, nrcv, NX, NY);
}
__global__ void Zero(const long long size, double* out) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<size) out[i] = 0.0;
}
__global__ void ReceiveGrad(
double*d_vx, double*d_vy,
double*d_sigmaxx, double*d_sigmayy, double*d_sigmaxy, const double *d_out,
int64 nt, const int64 *rcvi, const int64 *rcvj, const int64 *rcvtype, int64 nrcv, int64 NX, int64 NY) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i>=nrcv) return;
int idx = (rcvi[i]-1)*(NY+2) + rcvj[i]-1;
switch (rcvtype[i])
{
case 0:
for(int k=0;k<nt;k++)
d_vx[k*(NX+2)*(NY+2)+idx] += d_out[nt*i+k];
break;
case 1:
for(int k=0;k<nt;k++){
// printf("Top gradients: %f\n", d_out[nt*i+k]);
d_vy[k*(NX+2)*(NY+2)+idx] += d_out[nt*i+k];
}
break;
case 2:
for(int k=0;k<nt;k++)
d_sigmaxx[k*(NX+2)*(NY+2)+idx] += d_out[nt*i+k];
break;
case 3:
for(int k=0;k<nt;k++)
d_sigmayy[k*(NX+2)*(NY+2)+idx] += d_out[nt*i+k];
break;
case 4:
for(int k=0;k<nt;k++)
d_sigmaxy[k*(NX+2)*(NY+2)+idx] += d_out[nt*i+k];
break;
default:
break;
}
}
void backwardGPU(
double*d_vx, double*d_vy,
double*d_sigmaxx, double*d_sigmayy, double*d_sigmaxy, const double *d_out,
int64 nt, const int64 *rcvi, const int64 *rcvj, const int64 *rcvtype, int64 nrcv, const int64* nx, const int64* ny){
long long NX, NY;
cudaMemcpy(&NX, nx, sizeof(long long), cudaMemcpyDeviceToHost);
cudaMemcpy(&NY, ny, sizeof(long long), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
Zero<<<(nt*(NX+2)*(NY+2)+255)/256, 256>>>(nt*(NX+2)*(NY+2), d_vx);
Zero<<<(nt*(NX+2)*(NY+2)+255)/256, 256>>>(nt*(NX+2)*(NY+2), d_vy);
Zero<<<(nt*(NX+2)*(NY+2)+255)/256, 256>>>(nt*(NX+2)*(NY+2), d_sigmaxx);
Zero<<<(nt*(NX+2)*(NY+2)+255)/256, 256>>>(nt*(NX+2)*(NY+2), d_sigmayy);
Zero<<<(nt*(NX+2)*(NY+2)+255)/256, 256>>>(nt*(NX+2)*(NY+2), d_sigmaxy);
ReceiveGrad<<<(nrcv+255)/256, 256>>>(d_vx, d_vy, d_sigmaxx, d_sigmayy, d_sigmaxy,
d_out, nt, rcvi, rcvj, rcvtype, nrcv, NX, NY);
}
|
41
|
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <time.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#define X_SIZE 10240
#define Y_SIZE 16384
#define ARRAY_SIZE (X_SIZE*Y_SIZE)
#define BLOCK_SIZE_X 32
#define BLOCK_SIZE_Y 32
#define TIMESTEPS 1000
const char* input_file_name = "input.dat";
const char* output_file_name = "output.dat";
void prtdat(int nx, int ny, float *current, const char *fnam);
void inidat(int nx, int ny, float *u);
void printDevProp(cudaDeviceProp devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %u or %uKB or %uMB\n", devProp.totalGlobalMem, devProp.totalGlobalMem/1024, devProp.totalGlobalMem / (1024*1024), devProp.totalGlobalMem / 1024 / 1024 / 1024);
printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Texture alignment: %u\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
__global__ void kernelCalculateNewGenerationWithSharedMemory(float* current, float* next, int ny, int nx) {
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
const float cx = 0.1;
const float cy = 0.1;
int me = ix + iy * nx,
east = ix + 1 + iy * nx,
west = ix - 1 + iy * nx,
north = ix + (iy - 1) * nx,
south = ix + (iy + 1) * nx;
// INIT SHARED MEMORY
__shared__ float dev_sharedMem[BLOCK_SIZE_Y][BLOCK_SIZE_X];
dev_sharedMem[threadIdx.y][threadIdx.x] = current[me];
__syncthreads();
/* The point to update doesn't need an element that's "included" in this block */
if ((threadIdx.x > 0) && (threadIdx.x < (BLOCK_SIZE_X - 1)) &&
(threadIdx.y > 0) && (threadIdx.y < (BLOCK_SIZE_Y - 1))
) {
next[me] = cx * (dev_sharedMem[threadIdx.y][threadIdx.x-1] + dev_sharedMem[threadIdx.y][threadIdx.x+1] - 2.0f * dev_sharedMem[threadIdx.y][threadIdx.x]) +
cy * (dev_sharedMem[threadIdx.y - 1][threadIdx.x] + dev_sharedMem[threadIdx.y + 1][threadIdx.x] - 2.0f * dev_sharedMem[threadIdx.y][threadIdx.x]) +
dev_sharedMem[threadIdx.y][threadIdx.x];
}
else if (ix > 0 && ix < X_SIZE - 1 && iy > 0 && iy < Y_SIZE - 1) {
next[me] =
cx * (current[east] + current[west] - 2.0f * current[me]) +
cy * (current[south] + current[north] - 2.0f * current[me]) +
current[me];
}
}
__global__ void kernelCalculateNewGeneration(float* current, float* next, int ny, int nx) {
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
const float cx = 0.1;
const float cy = 0.1;
int me = ix + iy * nx,
east = ix + 1 + iy * nx,
west = ix - 1 + iy * nx,
north = ix + (iy - 1) * nx,
south = ix + (iy + 1) * nx;
if (ix > 0 && ix < X_SIZE-1 && iy > 0 && iy < Y_SIZE-1) {
next[me] =
cx * (current[east] + current[west] - 2.0f * current[me]) +
cy * (current[south] + current[north] - 2.0f * current[me]) +
current[me];
}
}
#define CEILDIV(a,b) (((a)+(b)-1)/(b))
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int main() {
float *dev_heatmap, *heatmap;
float *dev_current_map, *dev_next_map;
int iz;
float duration = 0;
cudaEvent_t startEvent, endEvent;
gpuErrchk(cudaEventCreate(&startEvent));
gpuErrchk(cudaEventCreate(&endEvent));
heatmap = (float*)malloc(ARRAY_SIZE*sizeof(float));
printf("Grid is %dx%d and block is %dx%d\n", CEILDIV(X_SIZE, BLOCK_SIZE_X), CEILDIV(Y_SIZE, BLOCK_SIZE_Y), BLOCK_SIZE_X, BLOCK_SIZE_Y);
// KERNEL CALL PARAMETRES INIT
dim3 blockDim(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 gridDim(CEILDIV(X_SIZE, BLOCK_SIZE_X), CEILDIV(Y_SIZE, BLOCK_SIZE_Y));
// CPU ARRAY INITIALIZATION
inidat(X_SIZE, Y_SIZE, heatmap);
prtdat(X_SIZE, Y_SIZE, heatmap, input_file_name);
// GPU INIT
gpuErrchk(cudaSetDevice(0));
cudaDeviceProp prop;
gpuErrchk(cudaGetDeviceProperties(&prop, 0));
// Init timer to count the GPU processing time
// GPU processing time = Moving data from host to device + main loop (processing elements) + moving data from device to host
cudaEventRecord(startEvent);
// GPU MEMORY INIT
gpuErrchk(cudaMalloc(&dev_heatmap, 2 * sizeof(float)*ARRAY_SIZE))
gpuErrchk(cudaMemcpy(dev_heatmap, heatmap, sizeof(float)*ARRAY_SIZE, cudaMemcpyHostToDevice));
memset(heatmap, '\0', sizeof(float)*ARRAY_SIZE);
// PRE LOOP INITIALIZATIONS
iz = 0;
dev_current_map = dev_heatmap;
dev_next_map = dev_heatmap + ARRAY_SIZE;
// MAIN LOOP
for (int t = 0 ; t < TIMESTEPS ; t++) {
dev_current_map = dev_heatmap + ARRAY_SIZE * iz;
dev_next_map = dev_heatmap + ARRAY_SIZE * (1 - iz);
// KERNEL CALL
//kernelCalculateNewGeneration<<<blockDim,gridDim>>>(dev_current_map,dev_next_map,Y_SIZE,X_SIZE);
kernelCalculateNewGenerationWithSharedMemory<<<blockDim,gridDim >>>(dev_current_map, dev_next_map, Y_SIZE, X_SIZE);
iz = 1 - iz;
}
gpuErrchk(cudaMemcpy(heatmap, dev_next_map, sizeof(float)*ARRAY_SIZE, cudaMemcpyDeviceToHost));
gpuErrchk(cudaEventRecord(endEvent));
cudaDeviceSynchronize();
prtdat(X_SIZE, Y_SIZE, heatmap, output_file_name);
gpuErrchk(cudaEventElapsedTime(&duration, startEvent, endEvent));
printf("GPU elapsed time: %f\n", duration);
return 0;
}
void inidat(int nx, int ny, float *u) {
int ix, iy;
for (ix = 0; ix <= nx - 1; ix++)
for (iy = 0; iy <= ny - 1; iy++)
*(u + ix + nx * iy) = (float)(ix * (nx - ix - 1) * iy * (ny - iy - 1));
}
void prtdat(int nx, int ny, float *current, const char *fnam) {
int ix, iy;
FILE *fp;
fp = fopen(fnam, "w");
for (iy = 0; iy < Y_SIZE; iy++) {
for (ix = 0; ix < nx; ix++) {
fprintf(fp, "%6.1f", *(current + ix + nx*iy));
if (ix != nx - 1)
fprintf(fp, " ");
else
fprintf(fp, "\n");
}
}
fclose(fp);
}
/*for (int t = 0; t < TIMESTEPS; t++) {
cudaError_t cudaStatus;
dev_current_heatmap = dev_heatmap + iz * heatmap_size;
dev_next_heatmap = dev_heatmap + (1-iz) * heatmap_size;
kernelCalculateNextIteration<<<dim3BlockSizes,dim3GridSizes>>>(dev_current_heatmap, dev_next_heatmap, Y_SIZE, X_SIZE, dev_someint);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
printf("Iteration %d\n", t);
iz = 1 - iz;
}*/
//cudaMemcpy(&someint, dev_someint, heatmap_size* sizeof(int), cudaMemcpyDeviceToHost);
|
42
|
#include "includes.h"
__global__ void elementwise_1D_1D_add(float* in1, float* in2, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) out[tid] = in1[tid] + in2[tid];
}
|
43
|
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include "cuda.h"
//device function
__global__ void kernelAddVectors(int N, double *a, double *b, double *c) {
int threadid = threadIdx.x; //thread number
int blockid = blockIdx.x; //block number
int Nblock = blockDim.x; //number of threads in a block
int id = threadid + blockid*Nblock;
if (id < N) {
c[id] = a[id] + b[id];
}
}
int main(int argc, char **argv) {
// get vector size from command line argument
int N = atoi(argv[1]);
//seed RNG
double seed = clock();
srand48(seed);
double *h_a, *h_b, *h_c; //host vectors
// allocate storage
h_a = (double *) malloc(N*sizeof(double));
h_b = (double *) malloc(N*sizeof(double));
h_c = (double *) malloc(N*sizeof(double));
//populate a and b
for (int n=0;n<N;n++) {
h_a[n] = drand48();
h_b[n] = drand48();
}
double hostStart = clock();
// c = a + b
for (int n=0;n<N;n++) {
h_c[n] = h_a[n] + h_b[n];
}
double hostEnd = clock();
double hostTime = (hostEnd - hostStart)/(double) CLOCKS_PER_SEC;
size_t inputMem = 2*N*sizeof(double); //number of bytes the operation inputs
size_t outMem = 1*N*sizeof(double); //number of bytes the operation outputs
size_t totalMem = (inputMem+outMem);
printf("The host took %f seconds to add a and b \n", hostTime);
printf("The efective bandwidth of the host was: %f GB/s\n", totalMem/(1E9*hostTime));
//Device arrays
double *d_a, *d_b, *d_c;
//allocate memory on the Device with cudaMalloc
cudaMalloc(&d_a,N*sizeof(double));
cudaMalloc(&d_b,N*sizeof(double));
cudaMalloc(&d_c,N*sizeof(double));
double copyStart = clock();
//copy data from the host to the device
cudaMemcpy(d_a,h_a,N*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(d_b,h_b,N*sizeof(double),cudaMemcpyHostToDevice);
double copyEnd = clock();
double copyTime = (copyEnd-copyStart)/(double)CLOCKS_PER_SEC;
printf("It took %f seconds to copy the data to device. \n",copyTime);
printf("The efective bandwidth of the copy was: %f GB/s\n", inputMem/(1E9*copyTime));
//at this point the data is allocated and populated on the device
int Nthreads = atoi(argv[2]); //get the number of threads per block from command line
int Nblocks = (N+Nthreads-1)/Nthreads;
double deviceStart = clock();
kernelAddVectors <<<Nblocks ,Nthreads >>>(N, d_a, d_b, d_c);
cudaDeviceSynchronize();
double deviceEnd = clock();
double deviceTime = (deviceEnd-deviceStart)/(double) CLOCKS_PER_SEC;
printf("The device took %f seconds to add a and b \n", deviceTime);
printf("The efective bandwidth of the device was: %f GB/s\n", totalMem/(1E9*deviceTime));
printf("The device was %f times faster\n", hostTime/deviceTime);
copyStart = clock();
cudaMemcpy(h_c,d_c,N*sizeof(double),cudaMemcpyDeviceToHost);
copyEnd = clock();
copyTime = (copyEnd-copyStart)/(double) CLOCKS_PER_SEC;
printf("It took %f seconds to copy the data back to the host. \n",copyTime);
printf("The efective bandwidth of the copy was: %f GB/s\n", outMem/(1E9*copyTime));
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
}
|
44
|
#include <stdio.h>
/*
* ホスト上で配列値を初期化します。
*/
void init(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = i;
}
}
/*
* GPU 上で要素を並列で 2 倍にします。
*/
__global__
void doubleElements(int *a, int N)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
a[i] *= 2;
}
}
/*
* ホスト上ですべての要素が 2 倍になっていることを確認します。
*/
bool checkElementsAreDoubled(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
if (a[i] != i*2) return false;
}
return true;
}
int main()
{
int N = 100;
int *a;
size_t size = N * sizeof(int);
/*
* このメモリの割り当てをリファクタリングして、
* ホストとデバイスの両方で使用できるポインタ `a` を提供します。
*/
a = (int *)malloc(size);
init(a, N);
size_t threads_per_block = 10;
size_t number_of_blocks = 10;
/*
* この起動は、ポインタ `a` がデバイスで使用できるようになるまで機能しません。
*/
doubleElements<<<number_of_blocks, threads_per_block>>>(a, N);
cudaDeviceSynchronize();
bool areDoubled = checkElementsAreDoubled(a, N);
printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE");
/*
* ホストとデバイスの両方のアクセス用に割り当てた
* メモリを解放するためにリファクタリングします。
*/
free(a);
}
|
45
|
/*
============================================================================
Name : LAB3.cu
Author : Kineibe
Version :
Copyright : Your copyright notice
Description : CUDA compute reciprocals
============================================================================
*/
#include <iostream>
#include <numeric>
#include <stdlib.h>
#include <string>
#include <fstream>
#include <sstream>
using namespace std;
static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
#define H_T 0.0001
#define H_X 0.5
#define TOTAL_TIME 10
#define EPSILON 0.001
#define RIGHT_COND 1
#define LEFT_COND 0
#define BLOCK_SIZE_AMOUNT 256
const double A = H_T / (H_X * H_X);
const double B = 2 * A + 1;
double countSum(int k, double* t, int size) {
if (k == 0) {
return t[k] * 1;
} else if (k == size - 1) {
return -1 * t[k - 1] / H_X + t[k] / H_X;
} else {
return -1 * A * t[k - 1] + t[k] / B - A * t[k + 1];
}
}
double iterationPart(double prev, double multiplier, double f, double sum) {
return prev + (f - sum) / multiplier;
}
void iteration(double* t_prev, int size, double* f, double* t_result) {
for (int i = 0; i < size; ++i) {
double a;
if (i == 0)
a = 1;
else if (i == size - 1)
a = 1 / H_X;
else
a = B;
double sum = countSum(i, t_prev, size);
double newT = iterationPart(t_prev[i], a, f[i], sum);
t_result[i] = newT;
}
}
bool condition(double* t_prev, double* t_result, int size) {
double result = 0;
for (int i = 0; i < size; ++i) {
result += abs(t_prev[i] - t_result[i]);
}
return result < EPSILON;
}
void iterationManager(double* t_prev, int size, double* f, double* t_target) {
bool check = true;
double* t_result = new double[size];
do {
iteration(t_prev, size, f, t_result);
check = condition(t_prev, t_result, size);
double* temp = t_result;
t_result = t_prev;
t_prev = temp;
} while(!check);
for (int i = 0; i < size; ++i) {
t_target[i] = t_prev[i];
}
delete[] t_result;
}
void printMas(double* arr, int size) {
for (int i = 0; i < size; ++i) {
cout << arr[i] << ' ';
}
cout << endl;
}
void model(int size) {
double* t = new double[size];
for (int i = 0; i < size; ++i) {
t[i] = 0;
}
double* t_next = new double[size];
double* f = new double[size];
f[0] = LEFT_COND;
f[size - 1] = RIGHT_COND;
// int iterationAmount = TOTAL_TIME / H_T;
int iterationAmount = 10;
for (int i = 0; i < iterationAmount; ++i) {
cout << "Iteration num " << i << endl;
for (int i = 1; i < size - 1; ++i) {
f[i] = t[i];
}
cout << "F array" << endl;
printMas(f, size);
iterationManager(t, size, f, t_next);
printMas(t_next, size);
double* temp = t_next;
t_next = t;
t = temp;
}
delete[] t_next;
delete[] f;
delete[] t;
}
/**
* CUDA kernel that computes reciprocal values for a given vector
*/
__global__ void reciprocalKernel(float *data, float *newData, unsigned vectorSize) {
unsigned idx = blockIdx.x*blockDim.x+threadIdx.x;
if (idx < vectorSize) {
if (idx == vectorSize - 1) {
newData[idx] = RIGHT_COND * H_T + data[idx];
} else if (idx == 0) {
newData[idx] = LEFT_COND;
} else {
newData[idx] = data[idx] + (data[idx - 1] - 2 * data[idx] + data[idx + 1]) * H_T / (H_X * H_X);
}
}
}
/**
* Host function that copies the data and launches the work on GPU
*/
void gpuReciprocal(float *data, unsigned size)
{
cudaEvent_t GPUstart, GPUstop;
float GPUtime = 0.0f;
float *rc = new float[size];
float *gpuOldData;
float *gpuNewData;
int iterationAmount = TOTAL_TIME / H_T;
static const int BLOCK_SIZE = BLOCK_SIZE_AMOUNT;
const int blockCount = 1000;
CUDA_CHECK_RETURN(cudaMalloc((void **)&gpuOldData, sizeof(float)*size));
CUDA_CHECK_RETURN(cudaMalloc((void **)&gpuNewData, sizeof(float)*size));
CUDA_CHECK_RETURN(cudaMemcpy(gpuOldData, data, sizeof(float)*size, cudaMemcpyHostToDevice));
cudaEventCreate(&GPUstart);
cudaEventCreate(&GPUstop);
for (int i = 0; i < iterationAmount; ++i) {
cudaEventRecord(GPUstart, 0);
if (i % 2 == 0) {
reciprocalKernel<<<blockCount, BLOCK_SIZE>>> (gpuOldData, gpuNewData, size);
cudaEventRecord(GPUstop, 0);
CUDA_CHECK_RETURN(cudaMemcpy(rc, gpuNewData, sizeof(float)*size, cudaMemcpyDeviceToHost));
} else {
reciprocalKernel<<<blockCount, BLOCK_SIZE>>> (gpuNewData, gpuOldData, size);
cudaEventRecord(GPUstop, 0);
CUDA_CHECK_RETURN(cudaMemcpy(rc, gpuOldData, sizeof(float)*size, cudaMemcpyDeviceToHost));
}
cudaEventSynchronize(GPUstop);
float temp;
cudaEventElapsedTime(&temp, GPUstart, GPUstop);
GPUtime += temp;
//
// for (int i = 0; i < size; ++i) {
// std::cout << "t[" << i << "] = " << rc[i] << std::endl;
// }
// std::cout << std::endl;
}
printf("GPU time : %.3f ms\n", GPUtime);
CUDA_CHECK_RETURN(cudaFree(gpuOldData));
CUDA_CHECK_RETURN(cudaFree(gpuNewData));
}
void initialize(float *data, unsigned size)
{
for (unsigned i = 0; i < size; ++i)
data[i] = 0;
}
void cpuIteration(float *data, float *newData, unsigned vectorSize) {
for (int idx = 0; idx < vectorSize; ++idx) {
if (idx == vectorSize - 1) {
newData[idx] = RIGHT_COND * H_T + data[idx];
} else if (idx == 0) {
newData[idx] = LEFT_COND;
} else {
newData[idx] = data[idx] + (data[idx - 1] - 2 * data[idx] + data[idx + 1]) * H_T / (H_X * H_X);
}
}
}
void cpuReciprocal(float *data, unsigned size)
{
float *rc = new float[size];
float *oldData = new float[size];
float* result;
float CPUstart, CPUstop;
float CPUtime = 0.0f;
int iterationAmount = TOTAL_TIME / H_T;
for (int i = 0; i < iterationAmount; ++i) {
CPUstart = clock();
if (i % 2 == 0) {
cpuIteration(oldData, rc, size);
result = rc;
} else {
cpuIteration(rc, oldData, size);
result = oldData;
}
CPUstop = clock();
CPUtime += 1000.*(CPUstop - CPUstart) / CLOCKS_PER_SEC;
//
// for (int i = 0; i < size; ++i) {
// std::cout << "t[" << i << "] = " << result[i] << std::endl;
// }
// std::cout << std::endl;
}
printf("CPU time : %.3f ms\n", CPUtime);
}
bool checkShodimost() {
return true;
}
int main(void)
{
static const int WORK_SIZE = 256000;
float *data = new float[WORK_SIZE];
model(5);
/* Free memory */
delete[] data;
return 0;
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err)
{
if (err == cudaSuccess)
return;
std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
|
46
|
// includes, system
#include <cuda_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// includes, kernels
#include "vector_reduction_kernel.cu"
// For simplicity, just to get the idea in this MP, we're fixing the problem size to 512 elements.
#define NUM_ELEMENTS 512*1
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
float computeOnDevice(float* h_data, int array_mem_size);
extern "C"
void computeGold( float* reference, float* idata, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
cudaSetDevice(0);
runTest( argc, argv);
return EXIT_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////
//! Run naive scan test
////////////////////////////////////////////////////////////////////////////////
void runTest( int argc, char** argv)
{
int num_elements = NUM_ELEMENTS;
const unsigned int array_mem_size = sizeof( float) * num_elements;
// Allocate host memory to store the input data
float* h_data = (float*) malloc( array_mem_size);
// initialize the input data on the host to be integer values
// between 0 and 1000
for( unsigned int i = 0; i < num_elements; ++i)
h_data[i] = floorf(1000*(rand()/(float)RAND_MAX));
// Function to compute the reference solution on CPU using a C sequential version of the algorithm
// It is written in the file "vector_reduction_gold.cpp". The Makefile compiles this file too.
float reference = 0.0f;
computeGold(&reference , h_data, num_elements);
// Function to compute the solution on GPU using a call to a CUDA kernel (see body below)
// The kernel is written in the file "vector_reduction_kernel.cu". The Makefile also compiles this file.
float result = computeOnDevice(h_data, num_elements);
// We can use an epsilon of 0 since values are integral and in a range that can be exactly represented
float epsilon = 0.0f;
unsigned int result_regtest = (abs(result - reference) <= epsilon);
printf( "Test %s\n", (1 == result_regtest) ? "CORRECTO: Coinciden los resultados de la CPU y la GPU" : "INCORRECTO: Los resultados calculados en paralelo en la GPU no coinciden con los obtenidos secuencialmente en la CPU");
printf( "device: %f host: %f\n", result, reference);
// cleanup memory
free( h_data);
}
// Function to call the CUDA kernel on the GPU.
// Take h_data from host, copies it to device, setup grid and thread
// dimensions, excutes kernel function, and copy result of scan back
// to h_data.
// Note: float* h_data is both the input and the output of this function.
float computeOnDevice(float* h_data, int num_elements)
{
float* d_data = NULL;
float result;
// Memory allocation on device side
cudaMalloc((void**)&d_data, num_elements );
// Copy from host memory to device memory
cudaMemcpy(d_data, h_data, num_elements, cudaMemcpyHostToDevice);
int threads = (num_elements/2) + num_elements%2;
// Invoke the kernel
reduction<<<1,threads>>>(d_data,num_elements);
// Copy from device memory back to host memory
cudaMemcpy(&result, d_data, sizeof(float), cudaMemcpyDeviceToHost );
cudaFree(d_data);
cudaDeviceReset();
return result;
}
|
47
|
/*
* Kernel for calulating the element-wise product of two matrices
* m, n --> dimensions of matrices A, B, C
*/
extern "C" {
__global__ void hadamard(int m, int n, double *A, int lda, double *B, int ldb, double *C, int ldc)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= m || j >= n) return;
C[i + j*ldc] = A[i + j*lda] * B[i + j*ldb];
}
}
/*
* Matrix sum, parameters as above
*/
extern "C" {
__global__ void matrix_sum(int m, int n, double *A, int lda, double *B, int ldb, double *C, int ldc)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= m || j >= n) return;
C[i + j*ldc] = A[i + j*lda] + B[i + j*ldb];
}
}
/*
* Copy that allows us to move around pieces of a matrix
*/
extern "C" {
__global__ void copy(int m, int n, double *dst, int lddst, double *src, int ldsrc)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= m || j >= n) return;
dst[i + j*lddst] = src[i + j*ldsrc];
}
}
|
48
|
#include <cuda_runtime.h>
#include <stdio.h>
#include <assert.h>
#include <iostream>
#include <stdlib.h>
#include <unistd.h>
extern "C" __global__
void memcpy_kernel(unsigned char* __restrict__ output, const unsigned char* __restrict__ input){
output += (blockIdx.x<<13)|(threadIdx.x<<2);
input += (blockIdx.x<<13)|(threadIdx.x<<2);
*((float* )&output[0]) = *((float* )&input[0]);
*((float* )&output[0x400]) = *((float* )&input[0x400]);
*((float* )&output[0x800]) = *((float* )&input[0x800]);
*((float* )&output[0xc00]) = *((float* )&input[0xc00]);
*((float* )&output[0x1000]) = *((float* )&input[0x1000]);
*((float* )&output[0x1400]) = *((float* )&input[0x1400]);
*((float* )&output[0x1800]) = *((float* )&input[0x1800]);
*((float* )&output[0x1c00]) = *((float* )&input[0x1c00]);
}
#define CALL(cmd) \
do {\
cudaError_t cuda_error = cmd;\
if (cuda_error != cudaSuccess) { \
std::cout<<"'"<<cudaGetErrorString(cuda_error)<<"'("<<cuda_error<<")"<<" at "<<__FILE__<<":"<<__LINE__<<std::endl;\
exit(EXIT_FAILURE);\
}\
} while(0)
#define WARMUP 20
#define LOOP 100
static inline void b2s(size_t bytes, char * str){
if(bytes<1024){
sprintf(str, "%luB", bytes);
}else if(bytes<(1024*1024)){
double b= (double)bytes/1024.0;
sprintf(str, "%.2fKB", b);
}else if(bytes<(1024*1024*1024)){
double b= (double)bytes/(1024.0*1024);
sprintf(str, "%.2fMB", b);
}else{
double b= (double)bytes/(1024.0*1024*1024);
sprintf(str, "%.2fGB", b);
}
}
static inline int env_get_int(const char * var_name, int def_v)
{
char * v = getenv(var_name);
int r = def_v;
if(v)
r = atoi(v);
return r;
}
static inline float get_rand(){
static int inited = 0;
float v;
if(!inited){ srand(time(NULL)); inited = 1; }
v = rand() % 1000 + 1;
return v / 1000.0f;
}
static inline int valid_vec(const float * vec_a, const float * vec_b, int num)
{
int err_cnt = 0;
for(int i=0;i<num;i++){
if(vec_a[i] != vec_b[i])
err_cnt++;
}
return err_cnt;
}
int main() {
cudaSetDevice(0);
unsigned char *A, *B;
const int dwords = env_get_int("DWORDS",64*3*224*224);
float * h_A = (float*)malloc(dwords*sizeof(float));
float * h_B = (float*)malloc(dwords*sizeof(float));
for (int i = 0; i < dwords; ++i) h_A[i] = get_rand();
CALL(cudaMalloc(&A, dwords * sizeof(float)));
CALL(cudaMalloc(&B, dwords * sizeof(float)));
CALL(cudaMemcpy(A, h_A, dwords * sizeof(float), cudaMemcpyHostToDevice));
// benchmark kernel
int bx = 256;
int gx = (dwords+255)>>11;
assert(dwords/(bx*8*4));
cudaEvent_t start_ev, stop_ev;
CALL(cudaEventCreate(&start_ev));
CALL(cudaEventCreate(&stop_ev));
for(int i=0;i<WARMUP;i++)
memcpy_kernel<<<gx, bx>>>(B, A);
CALL(cudaEventRecord(start_ev, 0));
for(int i=0;i<LOOP;i++)
memcpy_kernel<<<gx, bx>>>(B, A);
CALL(cudaEventRecord( stop_ev, 0 ));
CALL(cudaEventSynchronize(stop_ev));
float ms;
CALL(cudaEventElapsedTime(&ms,start_ev, stop_ev));
ms/=LOOP;
CALL(cudaMemcpy(h_B, B, dwords * sizeof(float), cudaMemcpyDeviceToHost));
//if(valid_vec(h_A, h_B, dwords) != 0) printf("not valid copy!\n");
sleep(1);
// benchmark memcpy api
for(int i=0;i<WARMUP;i++)
CALL(cudaMemcpy(B, A, dwords * sizeof(float), cudaMemcpyDeviceToDevice));
CALL(cudaEventRecord( start_ev, 0));
for(int i=0;i<LOOP;i++)
CALL(cudaMemcpy(B, A, dwords * sizeof(float), cudaMemcpyDeviceToDevice));
CALL(cudaEventRecord( stop_ev, 0 ));
CALL(cudaEventSynchronize(stop_ev));
float ms_api;
CALL(cudaEventElapsedTime(&ms_api,start_ev, stop_ev));
ms_api/=LOOP;
char str[64];
b2s(dwords*sizeof(float), str);
printf("%s, bandwidth_kernel:%.3f(GB/s), bandwidth_api:%.3f(GB/s)\n", str, ((double)dwords*sizeof(float)*2)/((double)ms/1000)/1000000000.0,
((double)dwords*sizeof(float)*2)/((double)ms_api/1000)/1000000000.0 );
free(h_A);
free(h_B);
CALL(cudaFree(A));
CALL(cudaFree(B));
}
|
49
|
# include<stdio.h>
__global__ void mykernel()
{
printf("hello world for GPU\n");
}
int main()
{
mykernel<<<1, 10>>>();
cudaDeviceSynchronize();
return 0;
}
|
50
|
#include "cuda_runtime.h" // A small gpu volumetric path tracer in 200 lines
#include "device_launch_parameters.h" // Jerry Guo (c) CGV TU Delft
#include "math_constants.h" // Based on smallvpt and cu-smallpt
#include "curand_kernel.h" // Compile: nvcc
#include <stdlib.h> // Usage: cusmallvpt [#SPP]
#include <stdio.h> // Result: image.ppm
enum Refl_t { DIFF, SPEC, REFR };
inline void HandleError(cudaError_t err) {
if (cudaSuccess != err) { printf("%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); }
}
struct Vec { // position, also color (r,g,b)
float x, y, z;
__host__ __device__ explicit Vec() { x = 0.f; y = 0.f; z = 0.f; }
__host__ __device__ explicit Vec(float v) { x = v; y = v; z = v; }
__host__ __device__ explicit Vec(float x_ = 0.f, float y_ = 0.f, float z_ = 0.f) { x = x_; y = y_; z = z_; }
Vec(const Vec& vec) noexcept = default;
Vec(Vec&& vec) noexcept = default;
~Vec() = default;
__device__ Vec& operator=(const Vec& b) { this->x = b.x; this->y = b.y; this->z = b.z; return *this; }
__device__ const Vec operator+(const Vec& b) const { return Vec(x + b.x, y + b.y, z + b.z); }
__device__ const Vec operator-(const Vec& b) const { return Vec(x - b.x, y - b.y, z - b.z); }
__host__ __device__ const Vec operator*(float b) const { return Vec(x * b, y * b, z * b); }
__device__ const Vec mult(const Vec& b) const { return Vec(x * b.x, y * b.y, z * b.z); }
__device__ float len() const { return sqrt(x * x + y * y + z * z); }
__device__ Vec& norm() { float inv_len = 1.f / len(); this->x *= inv_len; this->y *= inv_len; this->z *= inv_len; return *this; }
__device__ float dot(const Vec& b) const { return x * b.x + y * b.y + z * b.z; } // cross:
__device__ Vec operator%(Vec& b) { return Vec(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x); }
__device__ Vec operator%(const Vec& b) { return Vec(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x); }
};
__device__ inline float len(const Vec& v) { return sqrt(v.x*v.x + v.y*v.y + v.z*v.z); }
__device__ inline Vec norm(const Vec& v) { float inv_len = 1.f / len(v); return Vec(v.x * inv_len, v.y * inv_len, v.z * inv_len); }
struct Ray {
Vec o, d;
__host__ __device__ explicit Ray() : o(Vec(0.f, 0.f, 0.f)), d(Vec(0.f, 0.f, 0.f)) {}
__host__ __device__ explicit Ray(Vec o_, Vec d_) noexcept : o(o_), d(d_) {}
Ray(const Ray& ray) noexcept = default;
Ray(Ray&& ray) noexcept = default;
~Ray() = default;
__device__ Ray& operator=(const Ray& r) { this->o = r.o; this->d = r.d; return *this; }
};
struct Sphere {
float rad;
Vec p, e, c;
Refl_t refl;
__host__ __device__ explicit Sphere(float rad_, Vec p_, Vec e_, Vec c_, Refl_t refl_) :
rad(rad_), p(p_), e(e_), c(c_), refl(refl_) {}
__device__ float intersect(const Ray& r, float* tin = NULL, float* tout = NULL) const {
Vec op = p - r.o;
float t, eps = 1e-4, b = op.dot(r.d), det = b * b - op.dot(op) + rad * rad;
if (det < 0.f) return 0; else det = sqrt(det);
if (tin && tout) { *tin = (b - det <= 0.f) ? 0.f : b - det; *tout = b + det; }
return (t = b - det) > eps ? t : ((t = b + det) > eps ? t : 0.f);
}
};
__host__ __device__ inline float clamp(float x) { return x < 0.f ? 0.f : x>1.f ? 1.f : x; }
__host__ __device__ inline int toInt(float x) { return int(pow(clamp(x), 1.f / 2.2f) * 255.f + .5f); }
__device__ inline bool intersect(const Sphere* spheres, size_t n_sphere, const Ray& r, float& t, int& id, float tmax = 1e20) {
float d, inf = t = tmax;
for (int i = int(n_sphere); i--;) if ((d = spheres[i].intersect(r)) && d < t) { t = d; id = i; }
return t < inf;
}
__device__ inline float sampleSegment(float epsilon, float sigma, float smax) {
return -log(1.f - epsilon * (1.f - exp(-sigma * smax))) / sigma;
}
__device__ inline Vec sampleSphere(float e1, float e2) {
float z = 1.f - 2.f * e1, sint = sqrt(1.f - z * z);
return Vec(cos(2.f * CUDART_PI_F * e2) * sint, sin(2.f * CUDART_PI_F * e2) * sint, z);
}
__device__ inline Vec sampleHG(float g, float e1, float e2) {
float s = 1.f-2.f*e1,cost=(s+2.f*g*g*g*(-1.0+e1)*e1+g*g*s+2.f*g*(1.f-e1+e1*e1))/((1.f+g*s)*(1.f+g*s)),sint=sqrt(1.f-cost*cost);
return Vec(cos(2.f * CUDART_PI_F * e2) * sint, sin(2.f * CUDART_PI_F * e2) * sint, cost);
}
__device__ inline void generateOrthoBasis(Vec& u, Vec& v, Vec w) {
Vec coVec = w;
if (fabs(w.x) <= fabs(w.y))
if (fabs(w.x) <= fabs(w.z)) coVec = Vec(0.f, -w.z, w.y); else coVec = Vec(-w.y, w.x, 0.f);
else if (fabs(w.y) <= fabs(w.z)) coVec = Vec(-w.z, 0.f, w.x); else coVec = Vec(-w.y, w.x, 0.f);
coVec.norm(); u = w % coVec, v = w % u;
}
__device__ inline float scatter(const Ray& r, Ray* sRay, float tin, float tout, float& s, const float& sigma_s, curandState_t* rand_state) {
s = sampleSegment(curand_uniform(rand_state), sigma_s, tout - tin);
Vec x = r.o + r.d * tin + r.d * s;
Vec dir = sampleHG(-0.5f, curand_uniform(rand_state), curand_uniform(rand_state));
Vec u(0.f, 0.f, 0.f), v(0.f, 0.f, 0.f);
generateOrthoBasis(u, v, r.d);
dir = u * dir.x + v * dir.y + r.d * dir.z;
if (sRay) *sRay = Ray(x, dir);
return (1.0f - exp(-sigma_s * (tout - tin)));
}
__device__ Vec radiance(const Sphere* spheres, size_t n_sphere, const Ray& r, int _depth, curandState_t* rand_state) {
Ray ray = r;
Vec L(0.f, 0.f, 0.f);
Vec B(1.f, 1.f, 1.f);
int depth = _depth;
float tnear, tfar, scaleBy = 1.f, absorption = 1.f;
const Sphere homoMedium(300.f, Vec(50.f, 50.f, 80.f), Vec(0.f, 0.f, 0.f), Vec(0.f, 0.f, 0.f), DIFF);
const float sigma_s = 0.009f, sigma_a = 0.006f, sigma_t = sigma_s + sigma_a;
while (1) {
float t; // distance to intersection
int id = 0; // id of intersected object
if (homoMedium.intersect(ray, &tnear, &tfar) > 0) {
Ray sRay;
float s, ms = scatter(ray, &sRay, tnear, tfar, s, sigma_s, rand_state), prob_s = ms;
scaleBy = 1.f / (1.f - prob_s);
if (curand_uniform(rand_state) <= prob_s) {// Sample surface or volume?
if (!intersect(spheres, n_sphere, ray, t, id, tnear + s)) {
B = B * ms * (1.f - prob_s); ray = sRay; ++depth; continue;
}
scaleBy = 1.f;
} else if (!intersect(spheres, n_sphere, ray, t, id)) return L;
if (t >= tnear) {
float dist = (t > tfar ? tfar - tnear : t - tnear);
absorption = exp(-sigma_t * dist);
}
} else if (!intersect(spheres, n_sphere, ray, t, id)) return L;
const Sphere& obj = spheres[id];
Vec x = r.o + r.d * t, n = Vec(x - obj.p).norm(), nl = n.dot(ray.d) < 0 ? n : n * -1, f = obj.c, Le = obj.e;
float p = f.x > f.y && f.x > f.z ? f.x : f.y > f.z ? f.y : f.z;
if (++depth > 5) if (curand_uniform(rand_state) < p) B = B * (1 / p); else return L;
if (n.dot(nl) > 0 || obj.refl != REFR) { B = B * absorption; Le = obj.e * absorption; } else scaleBy = 1.f;
// Accumulate luminance and throughtput
L = L + B.mult(Le); B = B.mult(f * scaleBy); ++depth;
switch (obj.refl) {
case SPEC: { ray = Ray(x, r.d - n * 2 * n.dot(r.d)); break; }
case REFR: {
ray = Ray(x, r.d - n * 2 * n.dot(r.d)); bool into = n.dot(nl) > 0;
float nc = 1, nt = 1.5, nnt = into ? nc / nt : nt / nc, ddn = r.d.dot(nl), cos2t;
if ((cos2t = 1 - nnt * nnt * (1 - ddn * ddn)) < 0) break;
Vec tdir = Vec(r.d*nnt-n*((into?1:-1)*(ddn*nnt+sqrt(cos2t)))).norm();
float a=nt-nc,b=nt+nc,R0=a*a/(b*b),c = 1 - (into ? -ddn : tdir.dot(n));
float Re=R0+(1-R0)*c*c*c*c*c, Tr=1-Re,P=.25+.5*Re,RP=Re/P,TP = Tr / (1 - P);
if (curand_uniform(rand_state) < P) B=B*RP; else { ray=Ray(x,tdir); B=B*TP; }
break;
}
default: {
float r1=2*CUDART_PI_F*curand_uniform(rand_state),r2=curand_uniform(rand_state),r2s = sqrt(r2);
Vec w = nl, u = Vec((fabs(w.x) > .1 ? Vec(0, 1) : Vec(1.f, 1.f, 1.f)) % w).norm(), v = w % u;
Vec d = Vec(u * cos(r1) * r2s + v * sin(r1) * r2s + w * sqrt(1 - r2)).norm();
ray = Ray(x, d);
}
}
}
}
__global__ void render_kernel(const Sphere* spheres, const size_t n_sphere, Vec* Ls, size_t w, size_t h, int spp) {
const size_t x = threadIdx.x + blockIdx.x * blockDim.x;
const size_t y = threadIdx.y + blockIdx.y * blockDim.y;
const size_t offset = x + y * blockDim.x * gridDim.x;
const float inv_spp = 1.0f / float(spp);
if (x >= w || y >= h) return;
curandState rand_state; curand_init(offset, 0u, 0u, &rand_state);
Ray cam(Vec(50.f, 52.f, 285.f), norm(Vec(0.f, -0.042612f, -1.f)));
const float fov = 0.5135f; Vec cx = Vec(w * fov / h, 0.0f, 0.0f);
Vec cy = norm(Vec(cx % cam.d)) * fov; size_t i = (h - 1u - y) * w + x;
for (size_t sy = 0u; sy < 2u; ++sy) for (size_t sx = 0u; sx < 2u; ++sx) {
Vec L(0.f, 0.f, 0.f);
for (size_t s = 0u; s < spp; ++s) {
float u1 = 2.f * curand_uniform(&rand_state);
float u2 = 2.f * curand_uniform(&rand_state);
float dx = (u1 < 1.f) ? sqrt(u1) - 1.f : 1.f - sqrt(2.f - u1);
float dy = (u2 < 1.f) ? sqrt(u2) - 1.f : 1.f - sqrt(2.f - u2);
Vec d = cx * (((sx+0.5+dx)*0.5+x)/w-0.5)+cy*(((sy+0.5+dy)*0.5+y)/h-0.5)+cam.d;
Ray pRay(cam.o + d * 140.f, d.norm());
L = L + radiance(spheres, n_sphere, pRay, 0, &rand_state) * inv_spp;
}
Ls[i] = Ls[i] + Vec(0.25f * clamp(L.x), 0.25f * clamp(L.y), 0.25f * clamp(L.z));
}
}
cudaError_t Render(int w, int h, unsigned int spp = 100) {
const size_t n_sphere = 4;
Sphere spheres[n_sphere] = {//Scene: radius, position, emission, color, material
Sphere(26.5f, Vec(27.f, 18.5f, 78.f),Vec(0.f, 0.f, 0.f),Vec(1.f,1.f,1.f)*.75f,SPEC),//Mirr
Sphere(12.f, Vec(70.f, 43.f, 78.f), Vec(0.f, 0.f, 0.f), Vec(0.27f,0.8f,0.8f), REFR),//Glas
Sphere(8.f, Vec(55.f, 87.f, 78.f), Vec(0.f, 0.f, 0.f), Vec(1,1,1) * .75f, DIFF), //Lite
Sphere(4.f, Vec(55.f, 80.f, 78.f), Vec(10.f,10.f,10.f), Vec(0.f, 0.f, 0.f), DIFF) //Lite
};
HandleError(cudaSetDevice(0));
const size_t n_pixels = size_t(w * h);
Sphere* spheres_device;
HandleError(cudaMalloc((void**)&spheres_device, sizeof(spheres)));
HandleError(cudaMemcpy(spheres_device, spheres, sizeof(spheres), cudaMemcpyHostToDevice));
Vec* film_device;
HandleError(cudaMalloc((void**)&film_device, sizeof(Vec) * n_pixels));
HandleError(cudaMemset(film_device, 0, sizeof(Vec) * n_pixels));
const dim3 nblocks(w / 16, h / 16);
const dim3 nthreads(16, 16);
render_kernel <<< nblocks, nthreads >>> (spheres_device, n_sphere, film_device, w, h, spp);
Vec* film = (Vec*)malloc(n_pixels * sizeof(Vec));
HandleError(cudaMemcpy(film, film_device, sizeof(Vec) * n_pixels, cudaMemcpyDeviceToHost));
HandleError(cudaFree(spheres_device));
HandleError(cudaFree(film_device));
FILE* f = fopen("image.ppm", "w"); // Write image to PPM file.
fprintf(f, "P3\n%d %d\n%d\n", w, h, 255);
for (int i=0;i<w*h;i++) fprintf(f,"%d %d %d ",toInt(film[i].x),toInt(film[i].y),toInt(film[i].z));
free(film); return cudaSuccess;
}
int main(int argc, char* argv[]) {
int w = 1024, h = 768, spp = argc == 2 ? atoi(argv[1]) / 4 : 100;
Render(w, h, spp); return 0;
}
|
51
|
#include "includes.h"
__global__ void addVectors( float *d_A, float *d_B, float *d_C, int size)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
{
d_C[i] = d_A[i] + d_B[i];
}
}
|
52
|
extern "C"
__global__ void calcDir(// Dots props
float* pX,
float* pY,
float* pZ,
//Tree specs
// per Block
int* dotIndexes,
int* stBl0, int* nPtBl0,
int* stBl1, int* nPtBl1,
float* avgPX,
float* avgPY,
float* avgPZ,
// per GPU Block
int* idBl, int* offsBl,
// output values, per block
int* idFurthest, float* dMax
/*float* pX,float* pY,float* pZ,
float* avgPX, float* avgPY, float* avgPZ,
int* lockBlock, float* dMax,
int* idFurthest,
int* id_in, int* id_bl_in*/
)
{
extern __shared__ int array[];
float* posAVGBlock = (float*)&array[5];
float* dMaxPt = (float*)&posAVGBlock[3];
int* iMaxPt = (int*)&dMaxPt[blockDim.x];
// Fetch block data
int iGPUBlock=blockIdx.x;
int iThread=threadIdx.x;
int idBloc;
if (iThread==0) {
idBloc=idBl[iGPUBlock];
array[0]=offsBl[iGPUBlock];
array[1]=stBl0[idBloc];
array[2]=nPtBl0[idBloc];
array[3]=stBl1[idBloc];
array[4]=nPtBl1[idBloc];
posAVGBlock[0]=avgPX[idBloc];
posAVGBlock[1]=avgPY[idBloc];
posAVGBlock[2]=avgPZ[idBloc];
}
__syncthreads();
int offsPt = array[0];
int startIndexBl0 = array[1];
int nPtBlock0 = array[2];
int startIndexBl1 = array[3]; // useless in fact
int nPtBlock1 = array[4];
int nPts = nPtBlock0 + nPtBlock1;
int ptToBeComputed = iThread+offsPt;
int mx=posAVGBlock[0];
int my=posAVGBlock[1];
int mz=posAVGBlock[2];
if (ptToBeComputed<nPts) {
int id_pt=dotIndexes[startIndexBl0+ptToBeComputed];
float xval=(pX[id_pt]-mx);
float yval=(pY[id_pt]-my);
float zval=(pZ[id_pt]-mz);
dMaxPt[iThread]=xval*xval+yval*yval+zval*zval;
iMaxPt[iThread]=id_pt;
} else {
dMaxPt[iThread]=-1;
iMaxPt[iThread]=-1;
}
__syncthreads();
// All data copied to shared Mem
}
|
53
|
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
__global__ void vecAdd(float* h_a, float* h_b, float* h_c, int n)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
//check if it is in bound
if(id<n)
h_c[id] = h_a[id]+ h_b[id];
}
int main(int argc, char* argv[])
{
//size of vectors
int n= 1000;
float *h_a;//ip
float *h_b;//ip
float *h_c;//op
float *d_a;//ip
float *d_b;//ip
float *d_c;//op
int size = n * sizeof(float);
//allocating memory on host
h_a = (float*)malloc(size);
h_b = (float*)malloc(size);
h_c = (float*)malloc(size);
//allocating memory for each vector on GPU
cudaMalloc((void **) &d_a, size);
cudaMalloc((void **) &d_b, size);
cudaMalloc((void **) &d_c, size);
//initialize vectors on host
int i;
for(i = 0; i<n; i++)
{
h_a[i] = sin(i)*sin(i);
h_b[i] = cos(i)*cos(i);
}
/*printf("h_a: \n");
for(i=0; i<n; i++)
printf("%.1f\n", h_a[i]);
printf("\n");
printf("h_b: \n");
for(i=0; i<n; i++)
printf("%.1f\n", h_b[i]);
printf("\n");
*/
//copy host vectors to device
cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice);
int threadPerBlocks, blockCount;
//block size
threadPerBlocks = 1024;
//grid size
blockCount = (int)ceil((float)n/threadPerBlocks);
//executing kernel
vecAdd<<<threadPerBlocks, blockCount>>>(d_a, d_b, d_c, n);
//copy array back to host
cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost);
float sum = 0;
for(i=0; i<n; i++)
sum += h_c[i];
printf("Final result is: %f\n", sum/n);
//release device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
//releasing host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
54
|
#include "includes.h"
__global__ void vectorReduce(const float *global_input_data, float *global_output_data, const int numElements)
{
__shared__ float sdata[10];
__shared__ int sindice[10];
int tid = threadIdx.x;
int i = blockIdx.x * (blockDim.x ) + threadIdx.x;
sdata[tid] = global_input_data[i];
sindice[tid] = tid;
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s ) {
if (sdata[tid] > sdata[tid + s]) {
sdata[tid] = sdata[tid + s];
sindice[tid] = sindice[tid + s];
}
__syncthreads();
}
}
__syncthreads();
if (tid == 0) {
global_output_data[0] = sdata[0];
}
if (tid == 1) {
global_output_data[1] = sindice[0];
}
}
|
55
|
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
__global__ void convertToFloat(float *d_out, int *d_in){
d_out[threadIdx.x] = (float)d_in[threadIdx.x];
}
double time_diff(struct timeval x , struct timeval y){
double x_ms , y_ms , diff;
x_ms = (double)x.tv_sec*1000000 + (double)x.tv_usec;
y_ms = (double)y.tv_sec*1000000 + (double)y.tv_usec;
diff = (double)y_ms - (double)x_ms;
return diff;
}
int main(int argc, char ** argv) {
int lenInts = 2000;
int ints[2000] = {4, 9, 6, 7, 7, 5, 7, 0, 6, 0, 0, 9, 7, 8, 1, 2, 7, 7, 3, 9, 4, 5, 9, 3, 6, 7, 5, 6, 0, 4, 0, 5, 4, 6, 9, 1, 3, 4, 2, 9, 5, 6, 2, 5, 7, 1, 5, 8, 9, 8, 9, 9, 2, 7, 5, 0, 7, 6, 2, 8, 7, 0, 1, 1, 2, 5, 9, 2, 8, 7, 0, 3, 9, 2, 8, 6, 0, 4, 3, 6, 4, 9, 3, 8, 9, 4, 0, 6, 1, 6, 7, 0, 8, 6, 5, 2, 1, 8, 9, 3, 0, 4, 4, 5, 6, 0, 0, 0, 4, 5, 1, 1, 0, 8, 7, 8, 9, 1, 3, 0, 3, 3, 8, 1, 0, 4, 6, 0, 7, 3, 5, 3, 5, 3, 7, 6, 2, 7, 9, 7, 9, 6, 9, 0, 1, 0, 5, 0, 7, 2, 8, 3, 4, 0, 6, 1, 6, 3, 5, 4, 0, 6, 1, 3, 1, 9, 5, 4, 3, 3, 9, 8, 0, 6, 6, 6, 7, 2, 8, 5, 6, 8, 8, 1, 5, 0, 7, 0, 6, 7, 9, 4, 2, 2, 6, 2, 0, 9, 3, 6, 5, 0, 3, 3, 8, 2, 2, 9, 1, 3, 4, 5, 9, 8, 4, 7, 2, 1, 7, 2, 3, 3, 3, 4, 3, 6, 5, 5, 0, 6, 5, 0, 1, 4, 0, 2, 9, 7, 3, 2, 6, 3, 0, 7, 7, 1, 1, 4, 2, 3, 0, 7, 9, 7, 8, 0, 0, 5, 0, 6, 4, 7, 5, 4, 1, 3, 3, 5, 0, 1, 2, 9, 4, 4, 2, 8, 8, 7, 1, 2, 9, 4, 6, 6, 2, 0, 4, 8, 6, 1, 7, 9, 1, 4, 5, 9, 8, 3, 0, 6, 2, 8, 3, 0, 6, 2, 6, 1, 3, 6, 0, 2, 9, 9, 1, 5, 0, 8, 7, 4, 5, 4, 3, 8, 0, 2, 2, 0, 1, 0, 5, 3, 6, 4, 4, 9, 0, 7, 5, 7, 1, 9, 0, 5, 2, 9, 6, 2, 7, 9, 0, 8, 0, 8, 9, 7, 8, 8, 6, 8, 1, 0, 3, 5, 3, 0, 8, 3, 2, 1, 2, 3, 3, 9, 9, 4, 8, 6, 1, 1, 0, 7, 1, 9, 0, 4, 1, 3, 7, 0, 8, 3, 7, 2, 0, 8, 9, 1, 6, 1, 0, 5, 2, 1, 5, 5, 7, 7, 2, 8, 5, 1, 5, 9, 7, 0, 9, 6, 4, 6, 3, 1, 9, 6, 4, 7, 2, 4, 2, 2, 2, 7, 9, 1, 0, 5, 9, 0, 6, 1, 9, 5, 5, 2, 9, 9, 3, 3, 7, 7, 9, 5, 5, 1, 7, 6, 0, 1, 7, 0, 7, 3, 1, 4, 1, 9, 4, 0, 0, 5, 1, 3, 7, 8, 7, 3, 7, 8, 8, 8, 9, 0, 1, 0, 9, 5, 3, 5, 0, 1, 2, 4, 7, 0, 9, 9, 3, 2, 6, 4, 7, 0, 7, 8, 1, 3, 3, 2, 6, 0, 2, 2, 0, 6, 0, 4, 5, 1, 4, 7, 4, 3, 6, 5, 3, 8, 3, 3, 7, 5, 4, 9, 4, 4, 2, 1, 9, 7, 9, 1, 4, 4, 3, 5, 9, 2, 0, 1, 1, 3, 5, 1, 0, 0, 8, 8, 0, 6, 9, 9, 5, 2, 5, 6, 0, 7, 7, 4, 5, 0, 7, 0, 3, 2, 4, 2, 6, 7, 7, 5, 6, 4, 3, 2, 5, 3, 2, 5, 8, 0, 1, 2, 1, 4, 3, 4, 7, 4, 2, 2, 8, 5, 4, 1, 4, 2, 1, 4, 7, 1, 4, 7, 0, 1, 3, 0, 2, 7, 9, 2, 8, 7, 9, 7, 9, 2, 1, 7, 8, 0, 6, 9, 5, 8, 7, 0, 5, 2, 3, 2, 3, 1, 7, 8, 9, 7, 2, 6, 3, 1, 3, 2, 9, 5, 8, 2, 4, 1, 3, 5, 4, 4, 0, 9, 1, 6, 7, 0, 3, 9, 4, 7, 7, 5, 4, 4, 9, 6, 2, 2, 3, 9, 3, 1, 2, 3, 5, 1, 1, 2, 1, 7, 4, 3, 3, 7, 4, 8, 1, 4, 2, 0, 0, 3, 2, 2, 5, 7, 3, 0, 7, 9, 9, 0, 7, 1, 0, 0, 9, 5, 9, 6, 7, 4, 5, 2, 9, 8, 4, 4, 1, 6, 6, 3, 9, 1, 4, 7, 4, 6, 2, 5, 1, 8, 3, 2, 5, 8, 3, 3, 4, 1, 2, 4, 0, 9, 9, 0, 1, 4, 4, 0, 2, 2, 7, 8, 7, 3, 5, 3, 1, 5, 1, 1, 8, 8, 2, 6, 6, 7, 9, 1, 6, 4, 2, 6, 7, 3, 9, 7, 1, 2, 1, 7, 1, 7, 7, 2, 7, 2, 5, 7, 6, 8, 7, 2, 8, 1, 8, 6, 5, 1, 2, 4, 0, 4, 4, 3, 7, 6, 7, 1, 8, 7, 5, 2, 3, 5, 4, 8, 7, 8, 8, 7, 0, 5, 9, 2, 7, 7, 8, 6, 4, 3, 5, 7, 0, 0, 9, 5, 5, 4, 8, 1, 9, 4, 2, 6, 6, 3, 3, 7, 6, 1, 5, 1, 5, 8, 7, 8, 5, 2, 4, 4, 9, 4, 5, 6, 1, 0, 5, 4, 8, 2, 1, 7, 5, 5, 5, 8, 0, 8, 7, 4, 9, 1, 5, 9, 3, 2, 7, 6, 6, 2, 4, 9, 2, 7, 2, 8, 4, 1, 5, 1, 1, 0, 6, 1, 3, 0, 7, 1, 4, 0, 3, 3, 6, 1, 0, 3, 6, 2, 7, 5, 2, 0, 9, 1, 8, 8, 9, 1, 3, 9, 4, 4, 1, 8, 3, 9, 5, 3, 9, 4, 1, 1, 9, 2, 9, 2, 4, 3, 4, 7, 1, 0, 9, 4, 4, 6, 2, 8, 7, 3, 7, 9, 5, 7, 4, 6, 3, 3, 4, 5, 5, 6, 5, 1, 6, 8, 6, 2, 8, 1, 6, 9, 6, 0, 3, 6,4, 9, 6, 7, 7, 5, 7, 0, 6, 0, 0, 9, 7, 8, 1, 2, 7, 7, 3, 9, 4, 5, 9, 3, 6, 7, 5, 6, 0, 4, 0, 5, 4, 6, 9, 1, 3, 4, 2, 9, 5, 6, 2, 5, 7, 1, 5, 8, 9, 8, 9, 9, 2, 7, 5, 0, 7, 6, 2, 8, 7, 0, 1, 1, 2, 5, 9, 2, 8, 7, 0, 3, 9, 2, 8, 6, 0, 4, 3, 6, 4, 9, 3, 8, 9, 4, 0, 6, 1, 6, 7, 0, 8, 6, 5, 2, 1, 8, 9, 3, 0, 4, 4, 5, 6, 0, 0, 0, 4, 5, 1, 1, 0, 8, 7, 8, 9, 1, 3, 0, 3, 3, 8, 1, 0, 4, 6, 0, 7, 3, 5, 3, 5, 3, 7, 6, 2, 7, 9, 7, 9, 6, 9, 0, 1, 0, 5, 0, 7, 2, 8, 3, 4, 0, 6, 1, 6, 3, 5, 4, 0, 6, 1, 3, 1, 9, 5, 4, 3, 3, 9, 8, 0, 6, 6, 6, 7, 2, 8, 5, 6, 8, 8, 1, 5, 0, 7, 0, 6, 7, 9, 4, 2, 2, 6, 2, 0, 9, 3, 6, 5, 0, 3, 3, 8, 2, 2, 9, 1, 3, 4, 5, 9, 8, 4, 7, 2, 1, 7, 2, 3, 3, 3, 4, 3, 6, 5, 5, 0, 6, 5, 0, 1, 4, 0, 2, 9, 7, 3, 2, 6, 3, 0, 7, 7, 1, 1, 4, 2, 3, 0, 7, 9, 7, 8, 0, 0, 5, 0, 6, 4, 7, 5, 4, 1, 3, 3, 5, 0, 1, 2, 9, 4, 4, 2, 8, 8, 7, 1, 2, 9, 4, 6, 6, 2, 0, 4, 8, 6, 1, 7, 9, 1, 4, 5, 9, 8, 3, 0, 6, 2, 8, 3, 0, 6, 2, 6, 1, 3, 6, 0, 2, 9, 9, 1, 5, 0, 8, 7, 4, 5, 4, 3, 8, 0, 2, 2, 0, 1, 0, 5, 3, 6, 4, 4, 9, 0, 7, 5, 7, 1, 9, 0, 5, 2, 9, 6, 2, 7, 9, 0, 8, 0, 8, 9, 7, 8, 8, 6, 8, 1, 0, 3, 5, 3, 0, 8, 3, 2, 1, 2, 3, 3, 9, 9, 4, 8, 6, 1, 1, 0, 7, 1, 9, 0, 4, 1, 3, 7, 0, 8, 3, 7, 2, 0, 8, 9, 1, 6, 1, 0, 5, 2, 1, 5, 5, 7, 7, 2, 8, 5, 1, 5, 9, 7, 0, 9, 6, 4, 6, 3, 1, 9, 6, 4, 7, 2, 4, 2, 2, 2, 7, 9, 1, 0, 5, 9, 0, 6, 1, 9, 5, 5, 2, 9, 9, 3, 3, 7, 7, 9, 5, 5, 1, 7, 6, 0, 1, 7, 0, 7, 3, 1, 4, 1, 9, 4, 0, 0, 5, 1, 3, 7, 8, 7, 3, 7, 8, 8, 8, 9, 0, 1, 0, 9, 5, 3, 5, 0, 1, 2, 4, 7, 0, 9, 9, 3, 2, 6, 4, 7, 0, 7, 8, 1, 3, 3, 2, 6, 0, 2, 2, 0, 6, 0, 4, 5, 1, 4, 7, 4, 3, 6, 5, 3, 8, 3, 3, 7, 5, 4, 9, 4, 4, 2, 1, 9, 7, 9, 1, 4, 4, 3, 5, 9, 2, 0, 1, 1, 3, 5, 1, 0, 0, 8, 8, 0, 6, 9, 9, 5, 2, 5, 6, 0, 7, 7, 4, 5, 0, 7, 0, 3, 2, 4, 2, 6, 7, 7, 5, 6, 4, 3, 2, 5, 3, 2, 5, 8, 0, 1, 2, 1, 4, 3, 4, 7, 4, 2, 2, 8, 5, 4, 1, 4, 2, 1, 4, 7, 1, 4, 7, 0, 1, 3, 0, 2, 7, 9, 2, 8, 7, 9, 7, 9, 2, 1, 7, 8, 0, 6, 9, 5, 8, 7, 0, 5, 2, 3, 2, 3, 1, 7, 8, 9, 7, 2, 6, 3, 1, 3, 2, 9, 5, 8, 2, 4, 1, 3, 5, 4, 4, 0, 9, 1, 6, 7, 0, 3, 9, 4, 7, 7, 5, 4, 4, 9, 6, 2, 2, 3, 9, 3, 1, 2, 3, 5, 1, 1, 2, 1, 7, 4, 3, 3, 7, 4, 8, 1, 4, 2, 0, 0, 3, 2, 2, 5, 7, 3, 0, 7, 9, 9, 0, 7, 1, 0, 0, 9, 5, 9, 6, 7, 4, 5, 2, 9, 8, 4, 4, 1, 6, 6, 3, 9, 1, 4, 7, 4, 6, 2, 5, 1, 8, 3, 2, 5, 8, 3, 3, 4, 1, 2, 4, 0, 9, 9, 0, 1, 4, 4, 0, 2, 2, 7, 8, 7, 3, 5, 3, 1, 5, 1, 1, 8, 8, 2, 6, 6, 7, 9, 1, 6, 4, 2, 6, 7, 3, 9, 7, 1, 2, 1, 7, 1, 7, 7, 2, 7, 2, 5, 7, 6, 8, 7, 2, 8, 1, 8, 6, 5, 1, 2, 4, 0, 4, 4, 3, 7, 6, 7, 1, 8, 7, 5, 2, 3, 5, 4, 8, 7, 8, 8, 7, 0, 5, 9, 2, 7, 7, 8, 6, 4, 3, 5, 7, 0, 0, 9, 5, 5, 4, 8, 1, 9, 4, 2, 6, 6, 3, 3, 7, 6, 1, 5, 1, 5, 8, 7, 8, 5, 2, 4, 4, 9, 4, 5, 6, 1, 0, 5, 4, 8, 2, 1, 7, 5, 5, 5, 8, 0, 8, 7, 4, 9, 1, 5, 9, 3, 2, 7, 6, 6, 2, 4, 9, 2, 7, 2, 8, 4, 1, 5, 1, 1, 0, 6, 1, 3, 0, 7, 1, 4, 0, 3, 3, 6, 1, 0, 3, 6, 2, 7, 5, 2, 0, 9, 1, 8, 8, 9, 1, 3, 9, 4, 4, 1, 8, 3, 9, 5, 3, 9, 4, 1, 1, 9, 2, 9, 2, 4, 3, 4, 7, 1, 0, 9, 4, 4, 6, 2, 8, 7, 3, 7, 9, 5, 7, 4, 6, 3, 3, 4, 5, 5, 6, 5, 1, 6, 8, 6, 2, 8, 1, 6, 9, 6, 0, 3, 6};
float h_intsAsFloats[lenInts];
float *d_intsAsFloats;
int * d_ints;
float serial_intsAsFloats[lenInts];
struct timeval start, before , after;
gettimeofday(&before , NULL);
for (int i = 0; i < lenInts; i++){
serial_intsAsFloats[i] = (float) ints[i];
}
gettimeofday(&after , NULL);
printf("Serial time : %.0lf us\n\n" , time_diff(before , after) );
start = before;
gettimeofday(&before , NULL);
cudaMalloc((void **) &d_intsAsFloats, lenInts*sizeof(float));
gettimeofday(&after , NULL);
printf("Parallel cudaMalloc : %.0lf us\n" , time_diff(before , after) );
gettimeofday(&before , NULL);
cudaMalloc((void **) &d_ints, lenInts*sizeof(int));
gettimeofday(&after , NULL);
printf("Parallel cudaMalloc : %.0lf us\n" , time_diff(before , after) );
gettimeofday(&before , NULL);
cudaMemcpy(d_ints, ints, lenInts*sizeof(int), cudaMemcpyHostToDevice);
gettimeofday(&after , NULL);
printf("Parallel cudaMemcpy : %.0lf us\n" , time_diff(before , after) );
gettimeofday(&before , NULL);
convertToFloat<<<1,lenInts>>>(d_intsAsFloats, d_ints);
gettimeofday(&after , NULL);
printf("Parallel calling kernal : %.0lf us\n" , time_diff(before , after) );
gettimeofday(&before , NULL);
cudaMemcpy(h_intsAsFloats, d_intsAsFloats, lenInts*sizeof(float), cudaMemcpyDeviceToHost);
gettimeofday(&after , NULL);
printf("Parallel cudaMemcpy : %.0lf us\n" , time_diff(before , after) );
gettimeofday(&before , NULL);
cudaFree(d_ints);
gettimeofday(&after , NULL);
printf("Parallel cudaFree : %.0lf us\n" , time_diff(before , after) );
gettimeofday(&before , NULL);
cudaFree(d_intsAsFloats);
gettimeofday(&after , NULL);
printf("Parallel cudaFree : %.0lf us\n" , time_diff(before , after) );
printf("Parallel total: %.0lf us\n" , time_diff(start , after) );
return 0;
}
|
56
|
#include <stdio.h>
// Number of threads
#define NT 1024
// Structure to hold the 2D Points
typedef struct
{
double x;
double y;
}
point;
// Structure to store the metric center result
typedef struct
{
double distance;
int pointIndex;
}
result;
// Function to calculate distance between two points
__device__ double pointDistance(point *aPoint, point *bPoint)
{
double distance;
distance = sqrt(((aPoint->x - bPoint->x) * (aPoint->x - bPoint->x)) + ((aPoint->y - bPoint->y) * (aPoint->y - bPoint->y)));
return distance;
}
// Compare two distances
__device__ int compareDistance(double a, double b)
{
if(a < b) return -1;
if(a > b) return 1;
return 0;
}
// Assign the values of one result struct to another result struct
__device__ void assignResult(result *a, result *b)
{
a->pointIndex = b->pointIndex;
a->distance = b->distance;
}
// Function to reduce the block's result
__device__ void reduceBlockResult(result *blockResult, result *newResult)
{
// Store this block's result in the devResult array at this block's index only if the new result
// is better than the old result of this block.
if((blockResult->distance == -100.00 && blockResult->pointIndex == -1) || (compareDistance(blockResult->distance, newResult->distance) == 1))
{
assignResult(blockResult, newResult);
}
}
// Array holding the result of each thread in a block
__shared__ result shrResult [NT];
// Kernel function to calculate the metric center
extern "C" __global__ void metricCenter(point *pts, result *devResult, int n)
{
int thr, size, block, noOfBlocks;
result thrResult, tempResult;
block = blockIdx.x;
noOfBlocks = gridDim.x;
thr = threadIdx.x;
size = NT;
// Calculate the distance from this block's points to one of the other points.
for(int i = block; i < n; i += noOfBlocks)
{
thrResult.distance = -1.0;
for(int j = thr; j < n; j += size)
{
tempResult.distance = pointDistance(&pts[i], &pts[j]);
// Keep only the point whose distance is maximum from this block's point
if(compareDistance(tempResult.distance, thrResult.distance) == 1)
{
tempResult.pointIndex = i;
assignResult(&thrResult, &tempResult);
}
}
assignResult(&shrResult[thr], &thrResult);
// Reduce the results of all threads in this block
__syncthreads();
for(int m = NT/2; m > 0 ; m >>= 1)
{
if(thr < m)
{
if(compareDistance(shrResult[thr].distance, shrResult[thr+m].distance) == -1)
{
assignResult(&shrResult[thr], &shrResult[thr+m]);
}
}
__syncthreads();
}
// If this is the 1st thread of the block, it will now have the reduced result of this block.
if (thr == 0)
{
reduceBlockResult(&devResult[blockIdx.x], &shrResult[0]);
}
}
}
|
57
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <time.h>
__global__ void vAdd(int* A, int* B, int* C, int num_elements){
//Posicion del thread
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < num_elements){
C[i] = A[i] + B[i];
}
}
void sumarVectores(int* A, int* B, int* C, int num_elements){
//Posicion del thread
//int i = blockIdx.x * blockDim.x + threadIdx.x;
for(int i=0; i<num_elements; i++){
C[i] = A[i] + B[i];
}
}
int main(){
int num_elements = 100000;
//Reservar espacio en memoria HOST
int * h_A = (int*)malloc(num_elements * sizeof(int));
int * h_B = (int*)malloc(num_elements * sizeof(int));
int * h_C = (int*)malloc(num_elements * sizeof(int));
//Inicializar elementos de los vectores
for(int i=0; i<num_elements; i++){
h_A[i] = 1;
h_B[i] = i;
}
cudaError_t err;
int size = num_elements * sizeof(int);
int * d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
int * d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
int * d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
//Copiamos a GPU DEVICE
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
err = cudaMemcpy(d_C, h_C, size, cudaMemcpyHostToDevice);
int HilosPorBloque = 512;
int BloquesPorGrid = (num_elements + HilosPorBloque -1) / HilosPorBloque;
//Lanzamos el kernel y medimos tiempos
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
vAdd<<<BloquesPorGrid, HilosPorBloque>>>(d_A, d_B, d_C, num_elements);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float tiempo_reserva_host;
cudaEventElapsedTime(&tiempo_reserva_host, start, stop);
printf("Tiempo de suma vectores DEVICE: %f\n", tiempo_reserva_host);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//Copiamos a CPU el vector C
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
//Realizamos la suma en la CPU
cudaEvent_t start1, stop1;
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cudaEventRecord(start1, 0);
sumarVectores(h_A, h_B, h_C, num_elements);
cudaEventRecord(stop1,0);
cudaEventSynchronize(stop1);
float tiempo_reserva_host1;
cudaEventElapsedTime(&tiempo_reserva_host1, start1, stop1);
printf("Tiempo de suma vectores HOST: %f\n", tiempo_reserva_host1);
cudaEventDestroy(start1);
cudaEventDestroy(stop1);
/*for(int i=0; i<num_elements; i++){
printf("%i", h_C[i]);
printf("\n");
}*/
}
|
58
|
#include <stdio.h>
__global__
void saxpy(int n, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = x[i] + y[i];
}
void cuda_array_culc_add_float(float* x, float* y, int32_t N)
{
float *d_x, *d_y;
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
saxpy<<<(N+255)/256, 256>>>(N, d_x, d_y);
cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
}
|
59
|
//
// Created by songzeceng on 2020/11/26.
//
#include "cuda_runtime.h"
#include "stdio.h"
#define N 64
#define TPB 32
float scale(int i, int n) {
return ((float ) i) / (n - 1);
}
__device__ float distance(float x1, float x2) {
return sqrt((x2 - x1) * (x2 - x1));
}
__global__ void distanceKernel(float *d_out, float *d_in, float ref) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
float x = d_in[i];
d_out[i] = distance(x, ref);
}
int main() {
float ref = 0.5f;
float *in;
float *out;
cudaMallocManaged(&in, N * sizeof(float ));
cudaMallocManaged(&out, N * sizeof(float ));
for (int i = 0; i < N; ++i) {
in[i] = scale(i, N);
}
distanceKernel<<<N / TPB, TPB>>>(out, in, ref);
cudaDeviceSynchronize();
for (int i = 0; i < N; ++i) {
printf("%.2f\t", out[i]);
}
printf("\n");
cudaFree(in);
cudaFree(out);
return 0;
}
|
60
|
#include <iostream>
#include <ctime>
__global__ void matMulKernel(float* matA, float* matB, float* matC, int rows, int cols)
{
dim3 gIdx;
gIdx.y = blockIdx.y * blockDim.y + threadIdx.y;
gIdx.x = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0;
if(gIdx.x < cols && gIdx.y < rows)
{
for(int i = 0; i < rows; ++i)
{
sum += matA[gIdx.y * cols + i] * matB[i * cols + gIdx.x];
}
matC[gIdx.y * cols + gIdx.x] = sum;
}
}
void printMat(float* mat, int rows, int cols)
{
for(int i = 0; i < rows; ++i)
{
for(int j = 0; j < cols; ++j)
{
int index = i * cols + j;
std::cout << mat[index] << " ";
}
std::cout << "\n";
}
}
int main(int argc, char** argv)
{
if(argc != 2)
{
std::cout << "Usage: " << argv[0] << " <DIM>" << std::endl;
exit(1);
}
int matDim = atoi(argv[1]);
const int NUM_COLS = matDim;
const int NUM_ROWS = matDim;
//allocate host mem for input matrices
float* matA_h = new float[NUM_ROWS * NUM_COLS];
float* matB_h = new float[NUM_ROWS * NUM_COLS];
//fill input matrices
for(int i = 0; i < NUM_ROWS; ++i)
{
for(int j = 0; j < NUM_COLS; ++j)
{
int index = i * NUM_COLS + j;
matA_h[index] = index;
//scale matrix (factor 2)
matB_h[index] = (i == j) ? 2 : 0;
}
}
//allocate dev mem for input matrices
float* matA_d;
float* matB_d;
int matSize = NUM_ROWS * NUM_COLS * sizeof(float);
cudaMalloc(&matA_d, matSize);
cudaMalloc(&matB_d, matSize);
//copy input matrices to device
cudaMemcpy(matA_d, matA_h, matSize, cudaMemcpyHostToDevice);
cudaMemcpy(matB_d, matB_h, matSize, cudaMemcpyHostToDevice);
//allocate dev mem for output matrix
float* matC_d;
cudaMalloc(&matC_d, matSize);
cudaMemset(matC_d, 0, matSize);
//determine block and grid size
dim3 bDim(16, 16);
dim3 gDim;
gDim.x = (NUM_ROWS + 16 - 1) / 16; //ceil(num_rows/16)
gDim.y = (NUM_ROWS + 16 - 1) / 16;
cudaEvent_t start, stop;
//record start event
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//launch kernel
matMulKernel<<<gDim, bDim>>>(matA_d, matB_d, matC_d, NUM_ROWS, NUM_COLS);
//record stop event
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsed;
cudaEventElapsedTime(&elapsed, start, stop);
//allocate host mem for output matrix
float* matC_h = new float[NUM_ROWS * NUM_COLS];
//copy output matrix from dev to host
cudaMemcpy(matC_h, matC_d, matSize, cudaMemcpyDeviceToHost);
//print output matrix
printMat(matC_h, NUM_ROWS, NUM_COLS);
std::cout << std::endl << "Compute time: " << elapsed << "ms" << std::endl;
}
|
61
|
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
verifyCollatz(int64_t maxNumber)
{
int timesToRunGrid = maxNumber / (blockDim.x * gridDim.x) + 1;
int64_t number = 0;
int64_t i = 0;
for (int64_t gridRunNumber = 0; gridRunNumber < timesToRunGrid; ++gridRunNumber) {
// odd numbers only
number = 2 * (blockDim.x * gridDim.x * gridRunNumber + blockDim.x * blockIdx.x + threadIdx.x) + 1;
i = number;
if (number > 2 && number < maxNumber) {
while (i >= number) {
if (i & 0x1) {
/* odd case */
i = i * 3 + 1;
} else {
/* even case */
i = i >> 1;
}
}
}
}
}
/**
* Host main routine
*/
int
main()
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
int64_t maxNumber = 256ll * 256ll * 256ll * 256ll;
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid = 256;
// use CUDA builtin heruistics to get max performance
cudaOccupancyMaxPotentialBlockSize(
&blocksPerGrid,
&threadsPerBlock,
(void*) verifyCollatz,
0, 0);
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
verifyCollatz<<<blocksPerGrid, threadsPerBlock>>>(maxNumber);
err = cudaGetLastError();
cudaDeviceSynchronize();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch collatz kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
|
62
|
extern "C"
{
__global__ void tx1mx_32(const int lengthX, const float *t, const float *x, float *z)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthX)
{
z[i] += t[i]*x[i]*(1.0-x[i]);
}
}
}
|
63
|
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <pthread.h>
#include <unistd.h>
#include <ctype.h>
struct ThreadStruct {
float *a, *b, *c;
int size, elapsed_time;
};
__global__ void vectorMultGPU(float *a, float *b, float *c, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < n)
{
c[i] = a[i] * b[i];
i+= blockDim.x * gridDim.x;
}
}
void vectorMultCPU(float *a, float *b, float *c, int n)
{
int i;
for (i = 0; i < n; ++i)
{
c[i] = a[i] * b[i];
}
}
void *threadCPU(void *threadarg)
{
time_t curTime, baseTime;
struct ThreadStruct *data;
data = (struct ThreadStruct*) threadarg;
baseTime = curTime = time(NULL);
while(curTime < baseTime + data->elapsed_time) //Runs for 10 seconds
{
vectorMultCPU(data->a, data->b, data->c, data->size);
curTime = time(NULL);
}
return NULL;
}
int main(int argc, char **argv)
{
int cores = 4;
int size = 100000;
int elapsed_time = 10;
int option;
while ((option = getopt (argc, argv, "s:t:c:")) != -1)
{
switch (option)
{
case 's':
size = atoi(optarg);
break;
case 't':
elapsed_time = atoi(optarg);
break;
case 'c':
cores = atoi(optarg);
break;
case '?':
if (optopt == 's' || optopt == 't' || optopt == 'c')
fprintf (stderr, "Option -%c requires an argument.\n", optopt);
else if (isprint (optopt))
fprintf (stderr, "Unknown option `-%c'.\n", optopt);
else
fprintf (stderr,
"Unknown option character `\\x%x'.\n",
optopt);
return 1;
default:
abort ();
}
}
pthread_t *thread_arr = (pthread_t*)malloc(cores*sizeof(pthread_t));
float *a, *b, *c, *GPUout;
float *d_a, *d_b, *d_c;
int i;
a = (float*)malloc(size*sizeof(float));
b = (float*)malloc(size*sizeof(float));
c = (float*)malloc(size*sizeof(float));
GPUout = (float*)malloc(size*sizeof(float));
cudaMalloc(&d_a, size*sizeof(float));
cudaMalloc(&d_b, size*sizeof(float));
cudaMalloc(&d_c, size*sizeof(float));
for(i = 0; i < size; ++i)
{
a[i] = b[i] = i;
c[i] = 0;
}
cudaMemcpy(d_a, a, size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, size*sizeof(float), cudaMemcpyHostToDevice);
time_t curTime, baseTime;
struct ThreadStruct Threaddata = {a, b, c, size, elapsed_time};
for (i = 0; i < cores; ++i)
pthread_create(&thread_arr[i], NULL, threadCPU, (void *) &Threaddata);
baseTime = curTime = time(NULL);
while(curTime < baseTime + elapsed_time)
{
cudaDeviceSynchronize();
vectorMultGPU<<< (size+511)/512, 512 >>>(d_a, d_b, d_c, size);
curTime = time(NULL);
}
for (i = 0; i < cores; ++i)
pthread_join(thread_arr[i],NULL);
cudaMemcpy(GPUout, d_c, size*sizeof(float), cudaMemcpyDeviceToHost);
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
printf("Test Complete\n");
return 0;
}
|
64
|
#include <curand.h>
#include <curand_kernel.h>
#define DIM 1600
#define PI 3.14159265
__global__ void Rotate(uchar4 *ptr, unsigned char *R_input, unsigned char *G_input,
unsigned char *B_input, size_t i_size, float a,
unsigned long col, unsigned long row)
{
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int offset = x + y * blockDim.x * gridDim.x;
x = x - (blockDim.x * gridDim.x / 2);
y = y - (blockDim.y * gridDim.y / 2);
unsigned char* f_r, *f_g, *f_b;
int ximg = (x*cos(a) + y*sin(a)) + (col/2), yimg = (y*cos(a) - x*sin(a)) + (row/2);
if (ximg < col && yimg < row) {
f_r = (unsigned char*)((char*)R_input + yimg*i_size);
f_g = (unsigned char*)((char*)G_input + yimg*i_size);
f_b = (unsigned char*)((char*)B_input + yimg*i_size);
ptr[offset].x = f_r[ximg];
ptr[offset].y = f_g[ximg];
ptr[offset].z = f_b[ximg];
ptr[offset].w = 255;
} else{
ptr[offset].x = 0;
ptr[offset].y = 0;
ptr[offset].z = 0;
ptr[offset].w = 255;
}
}
__global__ void Scale(unsigned char *R_input, unsigned char *G_input,unsigned char *B_input,
unsigned char *R_output, unsigned char *G_output,unsigned char *B_output,
size_t i_size, size_t pitch2, float s,
unsigned long col, unsigned long row){
float x = threadIdx.x + (blockIdx.x * blockDim.x);
float y = threadIdx.y + (blockIdx.y * blockDim.y);
int offset = x + y * pitch2;
x = x - (DIM / 2);
y = y - (DIM / 2);
unsigned char* f_r, *f_g, *f_b;
x /= s; y /= s;
int ximg = x + (col/2), yimg = y + (row/2);
if (ximg < (col - 1) && yimg < (row - 1)) {
f_r = (unsigned char*)((char*)R_input + yimg*i_size);
f_g = (unsigned char*)((char*)G_input + yimg*i_size);
f_b = (unsigned char*)((char*)B_input + yimg*i_size);
float cx = x - floor(x);
float cy = y - floor(y);
float R1 = f_r[ximg]*(1 - cx) + f_r[ximg + 1]*(cx);
float R2 = f_r[ximg + i_size]*(1 - cx) + f_r[ximg + 1 + i_size]*(cx);
R_output[offset] = R1*(1 - cy) + R2*(cy);
R1 = f_g[ximg]*(1 - cx) + f_g[ximg + 1]*(cx);
R2 = f_g[ximg + i_size]*(1 - cx) + f_g[ximg + 1 + i_size]*(cx);
G_output[offset] = R1*(1 - cy) + R2*(cy);
R1 = f_b[ximg]*(1 - cx) + f_b[ximg + 1]*(cx);
R2 = f_b[ximg + i_size]*(1 - cx) + f_b[ximg + 1 + i_size]*(cx);
B_output[offset] = R1*(1 - cy) + R2*(cy);
}else{
R_output[offset] = 0;
G_output[offset] = 0;
B_output[offset] = 0;
}
}
|
65
|
inline __device__ float operator*(float3 a, float3 b) {
return a.x * b.x + a.y * b.y + a.z * b.z;
}
inline __device__ float dot(float3 a, float3 b) {
return a.x * b.x + a.y * b.y + a.z * b.z;
}
inline __device__ float3 operator*(float3 a, float b) {
return make_float3(a.x * b, a.y * b, a.z * b);
}
inline __device__ float3 operator*(float b, float3 a) {
return make_float3(a.x * b, a.y * b, a.z * b);
}
inline __device__ float3 operator/(float3 a, float b) {
return make_float3(a.x / b, a.y / b, a.z / b);
}
inline __device__ float3 operator+(float3 a, float3 b) {
return make_float3(a.x + b.x, a.y + b.y, a.z + b.z);
}
inline __device__ float3 operator+(float3 a, float b) {
return make_float3(a.x + b, a.y + b, a.z + b);
}
inline __device__ float3 operator+(float b, float3 a) {
return make_float3(a.x + b, a.y + b, a.z + b);
}
inline __device__ float3 operator-(float3 a, float3 b) {
return make_float3(a.x - b.x, a.y - b.y, a.z - b.z);
}
inline __device__ float3 operator-(float3 a, float b) {
return make_float3(a.x - b, a.y - b, a.z - b);
}
/*inline __device__ float3 operator-(float b, float3 a){
return make_float3(a.x-b,a.y-b,a.z-b);
}*/
inline __device__ float length(float3 a) { return norm3df(a.x, a.y, a.z); }
inline __device__ float distance(float3 a, float3 b) {
return norm3df(a.x - b.x, a.y - b.y, a.z - b.z);
}
inline __device__ float clamp(float x, float a, float b) {
return fmaxf(a, fminf(b, x));
}
|
66
|
/***************************************************************************//**
* \file LHS1.cu
* \author Christopher Minar (minarc@oregonstate.edu)
* \brief kernels to generate the left hand side for the intermediate velocity solve
*/
#include "LHS1.h"
namespace kernels
{
__global__
void LHS1_mid_luo_X(int *row, int *col, double *val, int *ghostTagsUV, double *dx, double *dy, double dt, double nu, int nx, int ny)
{
if (threadIdx.x + blockDim.x * blockIdx.x >= (nx-1)*ny)
return;
int i = threadIdx.x + blockDim.x * blockIdx.x,
I = i % (nx-1),
J = i / (nx-1);
if (I == 0 || I == nx-2 || J == 0 || J == ny-1)
return;
//int numE = i*5;
// top row - corner mid sides current row
int numE = (nx-1)*4 - 2 + (J-1)*(5*(nx-1) - 2) + I*5 - 1;
double temp = 1;
//EAST
row[numE] = i;
col[numE] = i+1;
val[numE] = -0.5*dt*nu*(1/(dx[I+1]*(dx[I+1]+dx[I])*0.5));
temp += 0.5*dt*nu*(1/(dx[I+1]*(dx[I+1]+dx[I])*0.5));
numE++;
//WEST
row[numE] = i;
col[numE] = i-1;
val[numE] = -0.5*dt*nu*(1/(dx[I]*(dx[I+1]+dx[I])*0.5));
temp += 0.5*dt*nu*(1/(dx[I]*(dx[I+1]+dx[I])*0.5));
numE++;
//NORTH
row[numE] = i;
col[numE] = i+(nx-1);
val[numE] = -0.5*dt*nu*(1/(dy[J]*(dy[J+1]+dy[J])*0.5));
temp += 0.5*dt*nu*(1/(dy[J]*(dy[J+1]+dy[J])*0.5));
numE++;
//SOUTH
row[numE] = i;
col[numE] = i-(nx-1);
val[numE] = -0.5*dt*nu*(1/(dy[J]*(dy[J-1]+dy[J])*0.5));
temp += 0.5*dt*nu*(1/(dy[J]*(dy[J-1]+dy[J])*0.5));
numE++;
//CENTER
row[numE] = i;
col[numE] = i;
val[numE] = temp;
numE++;
}
__global__
void LHS1_mid_luo_Y(int *row, int *col, double *val, int *ghostTagsUV, double *dx, double *dy, double dt, double nu, int nx, int ny)
{
if (threadIdx.x + blockDim.x * blockIdx.x >= nx*(ny-1))
return;
int ip = threadIdx.x + blockDim.x * blockIdx.x,
I = ip % nx,
J = ip / nx,
i = ip + (nx-1)*ny;
if (I == 0 || I == nx-1 || J == 0 || J == ny-2)
return;
int numE = (nx-1)*ny*5 - 2*ny-2*(nx-1) + nx*4-2 + (J-1)*(nx*5 - 2) + I*5 - 1;
double temp = 1;
//EAST
row[numE] = i;
col[numE] = i+1;
val[numE] = -0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I+1])*0.5));
temp += 0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I+1])*0.5));
numE++;
//WEST
row[numE] = i;
col[numE] = i-1;
val[numE] = -0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I-1])*0.5));
temp += 0.5*dt*nu*(1/(dx[I]*(dx[I]+dx[I-1])*0.5));
numE++;
//NORTH
row[numE] = i;
col[numE] = i + nx;
val[numE] = -0.5*dt*nu*(1/(dy[J+1]*(dy[J]+dy[J+1])*0.5));
temp += 0.5*dt*nu*(1/(dy[J+1]*(dy[J]+dy[J+1])*0.5));
numE++;
//SOUTH
row[numE] = i;
col[numE] = i-nx;
val[numE] = -0.5*dt*nu*(1/(dy[J]*(dy[J]+dy[J+1])*0.5));
temp += 0.5*dt*nu*(1/(dy[J]*(dy[J]+dy[J+1])*0.5));
numE++;
//CENTER
row[numE] = i;
col[numE] = i;
val[numE] = temp;
numE++;
}
}//end kernel
|
67
|
#include <iostream>
#include <math.h>
#include <time.h>
#include <stdlib.h>
#include <random>
#include <vector>
#include <chrono>
#include <deque>
#include <algorithm>
#include <iterator>
#include <curand.h>
#include <curand_kernel.h>
#define BLOCK_SIZE 1024
__global__ void min_reduce(int *arr, const int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
int j = n-i-1;
int x = arr[i];
int y = arr[j];
arr[i] = x < y ? x:y;
}
}
int get_min_val(int *min_arr, int n) {
while (n > 1) {
min_reduce<<<(n + BLOCK_SIZE - 1)/BLOCK_SIZE, BLOCK_SIZE>>>(min_arr, n);
n = (n+1)/2;
}
cudaDeviceSynchronize();
return min_arr[0];
}
void random_vector(int *arr, const int n, const int min_val=0.0, const int max_val=1000.0) {
static std::random_device rd;
static std::mt19937 mte(rd());
std::uniform_int_distribution<int> dist(min_val, max_val);
for (int i = 0; i < n; i++) {
arr[i] = dist(mte);
}
}
bool check_correctness(int *arr, int pred, int n) {
int min_el = 1 << 30;
for (int i = 0; i < n; i++) {
if (arr[i] < min_el) {
min_el = arr[i];
}
}
return pred == min_el;
}
int main(void) {
int n = 1 << 25;
int *arr, *temp;
cudaMallocManaged(&arr, n*sizeof(int));
random_vector(arr, n, 0, 10000);
temp = new int[n];
std::copy(arr, arr+n, temp);
auto t1 = std::chrono::high_resolution_clock::now();
int min_el = get_min_val(arr, n);
auto t2 = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - t1 ).count();
std::cout << duration << std::endl;
t1 = std::chrono::high_resolution_clock::now();
std::cout << check_correctness(temp, min_el, n) << std::endl;
t2 = std::chrono::high_resolution_clock::now();
duration = std::chrono::duration_cast<std::chrono::milliseconds>( t2 - t1 ).count();
std::cout << duration << std::endl;
cudaFree(arr);
return 0;
}
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 2