serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
12,101 | //
// Created by alex on 7/15/20.
//
#include "Processor.cuh"
inline cudaError_t checkCuda(cudaError_t result)
{
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
return result;
}
__global__ void gpu_count_zeros(Message** flow, int* sum, int flowLength)
{
int indx = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = indx; i < flowLength; i += stride)
{
for(int j = 0; j < flow[i]->bufferSize; j++)
{
if(flow[i]->buffer[j] == 0)
{
sum[i] += 1;
//cout << "found a zero at msg[" << i << "] byte[" << j << "]" << endl;
}
}
}
}
void cpu_count_zeros(Message** flow, int& sum, int flowLength)
{
for(int i = 0; i < flowLength; i++)
{
for(int j = 0; j < flow[i]->bufferSize; j++)
{
if(flow[i]->buffer[j] == 0)
{
sum += 1;
//cout << "found a zero at msg[" << i << "] byte[" << j << "]" << endl;
}
}
}
}
Processor::Processor(ITransport* t) {
transport = t;
}
void Processor::procCountZerosGPU(int minMessageToProcess) {
timer t;
int deviceId;
int numberOfSMs;
cudaGetDevice(&deviceId);
cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId);
size_t threadsPerBlock;
size_t numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = 32 * numberOfSMs;
int msgCountReturned = 0;
int processedMessages = 0;
int sum =0;
Message* m[MSG_BLOCK_SIZE];//Create array that is max message block size
uint8_t * d; //The data we will store.
size_t msgBlockSize = MSG_BLOCK_SIZE * sizeof(Message);
size_t msgDataSize = MSG_MAX_SIZE * MSG_BLOCK_SIZE;
checkCuda( cudaMallocManaged(m, msgBlockSize));
checkCuda( cudaMallocManaged(&d, msgDataSize));
int* blockSum; //Array with sum of zeros for this message
size_t sumArraySize = MSG_BLOCK_SIZE * sizeof(int);
checkCuda( cudaMallocManaged(&blockSum, sumArraySize));
// cout << "Processing on GPU using " << numberOfBlocks << " blocks with " << threadsPerBlock << " threads per block" << endl;
while (processedMessages < minMessageToProcess) {
if (0 != transport->pop(m, MSG_BLOCK_SIZE, msgCountReturned, eTransportDest::DEVICE)) {
exit(EXIT_FAILURE);
}
cudaMemPrefetchAsync(m, msgBlockSize, deviceId);
if(msgCountReturned > 0) //If there are new messages process them
{
std::cerr << "\rProcessed " << processedMessages << " messages";
gpu_count_zeros <<< threadsPerBlock, numberOfBlocks >>>(m, blockSum, msgCountReturned);
checkCuda( cudaGetLastError() );
checkCuda( cudaDeviceSynchronize() ); //Wait for GPU threads to complete
cudaMemPrefetchAsync(blockSum, sumArraySize, cudaCpuDeviceId);
for(int k = 0; k < msgCountReturned; k++)
{
sum += blockSum[k]; //Add all the counts to the accumulator
blockSum[k] = 0;
}
processedMessages += msgCountReturned;
}
//m.clear();
msgCountReturned=0;
}
checkCuda( cudaFree(m));
checkCuda( cudaFree(blockSum));
std::cout << "\n Processing Completed: " << std::endl;
std::cout << "\t processed " << processedMessages << " in " << t.seconds_elapsed() << " sec" << std::endl;
std::cout << "\t total zero's in messages = " << sum << std::endl;
exit(EXIT_SUCCESS);
}
int Processor::procCountZerosCPU(int minMessageToProcess) {
timer t;
Message* m[MSG_BLOCK_SIZE];
int msgCountReturned = 0;
int sum = 0;
int processedMessages = 0;
while (processedMessages < minMessageToProcess) {
if (0 != transport->pop(m, MSG_BLOCK_SIZE, msgCountReturned, eTransportDest::HOST)) {
exit(EXIT_FAILURE);
}
if(msgCountReturned > 0) //If there are new messages process them
{
std::cerr << "\rProcessed " << processedMessages << " messages";
cpu_count_zeros(m, sum, msgCountReturned);
processedMessages += msgCountReturned;
}
msgCountReturned=0;
}
std::cout << "\nProcessing Completed: " << std::endl;
std::cout << "\t processed " << processedMessages << " in " << t.seconds_elapsed() << " sec" << std::endl;
std::cout << "\t total zero's in messages = " << sum << std::endl;
exit(EXIT_SUCCESS);
}
void Processor::procDropMsg(int minMessageToProcess) {
timer t;
Message* m[MSG_BLOCK_SIZE];
int msgCountReturned = 0;
int processedMessages = 0;
while (processedMessages < minMessageToProcess) {
if (0 != transport->pop(m, MSG_BLOCK_SIZE, msgCountReturned, eTransportDest::HOST)) {
exit(EXIT_FAILURE);
}
if(msgCountReturned > 0) //If there are new messages process them
{
std::cerr << "\rProcessed " << processedMessages << " messages";
processedMessages += msgCountReturned;
}
msgCountReturned=0;
}
std::cout << "\nProcessing Completed: " << std::endl;
std::cout << "\t processed " << processedMessages << " in " << t.seconds_elapsed() << " sec" << std::endl;
exit(EXIT_SUCCESS);
}
int Processor::procPrintMessages(int minMessageToProcess) {
Message* m[MSG_BLOCK_SIZE];
int processedCount = 0;
int r = 0;
do {
if (0 != transport->pop(m, MSG_BLOCK_SIZE, r, eTransportDest::HOST)) {
exit(EXIT_FAILURE);
}
processedCount += r;
std::cout << "Printing first bytes of " << min(r,minMessageToProcess) << " messages" << std::endl;
for(int i = 0; i<min(r,minMessageToProcess); i++)
{
transport->printMessage(m[i], 32);
std::cout << std::endl;
}
} while (processedCount < minMessageToProcess);
//Simple process (i.e. print)
std::cout << "Processing Completed: found " << processedCount << " messages" << std::endl;
exit(EXIT_SUCCESS);
}
|
12,102 | #include "includes.h"
__global__ void solution_stencil(float *zx, float * zy, float *g, float lambda, int nx, int ny)
{
int px = blockIdx.x * blockDim.x + threadIdx.x;
int py = blockIdx.y * blockDim.y + threadIdx.y;
int idx = px + py*nx;
float DIVZ;
if (px<nx && py<ny)
{
// compute the divergence
DIVZ = 0;
if ((px<(nx - 1))) DIVZ += zx[(idx)];
if ((px>0)) DIVZ -= zx[(idx - 1)];
if ((py<(ny - 1))) DIVZ += zy[(idx)];
if ((py>0)) DIVZ -= zy[(idx - nx)];
// update f
g[idx] = -DIVZ*lambda + g[idx];
}
} |
12,103 | #include<iostream>
#include<cuda.h>
int main(){
return 0;
}
|
12,104 | #include "includes.h"
// ERROR CHECKING MACROS //////////////////////////////////////////////////////
__global__ void mteKernel(int noPaths, int nYears, int noPatches, float timeStep, float* rgr, float* brownians, float* jumpSizes, float* jumps, float* speciesParams, float *initPops, float* caps, float*mmm, int* rowIdx, int* elemsPerCol, float* pathPops, float* eps) {
// Global index for finding the thread number
int ii = blockIdx.x*blockDim.x + threadIdx.x;
// Only perform matrix multiplication sequentially for now. Later, if
// so desired, we can use dynamic parallelism because the card in the
// machine has CUDA compute capability 3.5
if (ii < noPaths) {
//extern __shared__ float s[];
// Initialise the prevailing population vector
for (int jj = 0; jj < noPatches; jj++) {
pathPops[(ii*2)*noPatches+jj] = initPops[jj];
}
float grMean = speciesParams[0];
for (int jj = 0; jj < nYears; jj++) {
// Movement and mortality. This component is very slow without
// using shared memory. As we do not know the size of the patches
// at compile time, we need to be careful how much shared memory we
// allocate. For safety, we assume that we will have less than
// 64KB worth of patch data in the mmm matrix. Using single
// precision floating point numbers, this means that we can only
// have up to 8,000 patches. As this number is extremely large, we
// set a limit outside this routine to have at most 300 patches.
for (int kk = 0; kk < noPatches; kk++) {
pathPops[(ii*2+1)*noPatches+kk] = 0.0;
}
int iterator = 0;
for (int kk = 0; kk < noPatches; kk++) {
for (int ll = 0; ll < elemsPerCol[kk]; ll++) {
pathPops[(ii*2+1)*noPatches+kk] += pathPops[(ii*2)*
noPatches+rowIdx[iterator]]*mmm[iterator];
iterator++;
}
}
// UPDATE: NEED TO IMPLEMENT SHARED MEMORY AS WELL
// DEPRECATED - TO BE DELETED AT LATER STAGE
// Load the correct slice of the mmm matrix for each
// destination patch. Use the thread index as a helper to do
// this. Wait for all information to be loaded in before
// proceeding. We need to tile the mmm matrix here to obtain
// a sufficient speed up.
// for (int kk = 0; kk < noTiles; kk++) {
// int currDim = tileDim;
// if (threadIdx.x < noPatches) {
// // First, allocate the memory for this tile
// if (kk == noTiles-1) {
// currDim = (int)(noTiles*tileDim == noPatches) ?
// (int)tileDim : (int)(noPatches - kk*tileDim);
// }
// for (int ll = 0; ll < currDim; ll++) {
// s[ll*noPatches + threadIdx.x] = mmm[kk*noPatches*
// tileDim + ll*noPatches + threadIdx.x];
// }
// }
// __syncthreads();
// // Now increment the populations for this path
// for (int kk = 0; kk < currDim; kk++) {
// for (int ll = 0; ll < noPatches; ll++) {
// pathPops[(ii*2+1)*noPatches+kk] += pathPops[(ii*2)*
// noPatches+ll]*s[kk*noPatches + ll];
// }
// }
// }
// for (int kk = 0; kk < noPatches; kk++) {
// for (int ll = 0; ll < noPatches; ll++) {
//// pathPops[(ii*2+1)*noPatches+kk] += pathPops[(ii*2)*
//// noPatches+ll]*s[ll];
// pathPops[(ii*2+1)*noPatches+kk] += pathPops[(ii*2)*
// noPatches+ll]*mmm[kk*noPatches+ll];
// }
// }
// matrixMultiplicationKernel<<<noBlocks,noThreadsPerBlock>>>(pathPops
// + (ii*2)*noPatches, mmm, pathPops + (ii*2+1)*noPatches, 1,
// noPatches, noPatches);
// cudaDeviceSynchronize();
// __syncthreads();
// Natural birth and death
// Adjust the global growth rate mean for this species at this
// time step for this path.
float jump = (jumps[ii*nYears + jj] < speciesParams[6]) ? 1.0f :
0.0f;
float meanP = speciesParams[1];
float reversion = speciesParams[4];
float brownian = brownians[ii*nYears + jj]*speciesParams[2];
float jumpSize = jumpSizes[ii*nYears + jj]*pow(speciesParams[5],2)
- pow(speciesParams[5],2)/2;
grMean = grMean + reversion*(meanP - grMean)*timeStep + grMean
*brownian + (exp(jumpSize) - 1)*grMean*jump;
for (int kk = 0; kk < noPatches; kk++) {
float gr = speciesParams[7]*rgr[ii*(nYears*noPatches) + jj*
noPatches + kk]*grMean + grMean;
pathPops[(ii*2)*noPatches+kk] = pathPops[(ii*2+1)*noPatches+kk]
*(1.0f + gr*(caps[kk]-pathPops[(ii*2+1)*noPatches+kk])/
caps[kk]);
}
}
eps[ii] = 0.0f;
for (int jj = 0; jj < noPatches; jj++) {
eps[ii] += pathPops[(ii*2+1)*noPatches+jj];
}
}
} |
12,105 | /*
* Multiplying a 2D matrix using CUDA
*/
#include<stdio.h>
#include<stdlib.h>
#include<assert.h>
#define BLOCK_SIZE 16
__global__ void gpu_matrix_mul( int *a, int *b, int *c, int m, int n, int k){
int row = blockIdx.y + blockDim.y * threadIdx.y;
int col = blockIdx.x + blockDim.x * threadIdx.x;
int sum = 0;
if(col < k && row < m){
for(int i = 0; i < n; i++){
sum += a[row*n + i] * b[i*k + col];
}
c[row * k + col] = sum;
}
}
void cpu_matrix_mult(int *h_a, int *h_b, int *h_result, int m, int n, int k) {
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < k; ++j)
{
int tmp = 0.0;
for (int h = 0; h < n; ++h)
{
tmp += h_a[i * n + h] * h_b[h * k + j];
}
h_result[i * k + j] = tmp;
}
}
}
int main(){
int m,n,k; // m=rows of 1st , n= cols of 1st and rows of 2nd , k = cols of 2nd
srand(3333);
m = 1024;
n = 1024;
k = 1024;
//Allocate memory in host RAM, h_cc is used to store CPU results
int *h_a, *h_b, *h_c , *h_cc;
cudaMallocHost((void **) &h_a , sizeof(int)*m*n);
cudaMallocHost((void **) &h_b , sizeof(int)*n*k);
cudaMallocHost((void **) &h_c , sizeof(int)*m*k);
cudaMallocHost((void **) &h_cc , sizeof(int)*m*k);
//random initialie matrix A
for(int i= 0; i < m; ++i){
for(int j=0; j < n; ++j){
h_a[i*n + j ] = rand() %1024;
}
}
//Random intialize B
for( int i = 0; i<n; ++i){
for( int j = 0; j < n; j++){
h_b[i*k + j] = rand()%1024;
}
}
float gpu_elapsed_time_ms;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//Allocate Memory on the device
int *d_a, *d_b, *d_c;
cudaMalloc((void **) &d_a, sizeof(int)*m*n);
cudaMalloc((void **) &d_b, sizeof(int)*n*k);
cudaMalloc((void **) &d_c, sizeof(int)*m*k);
//Copy matrix A and B from host to device memory
cudaMemcpy(d_a, h_a, sizeof(int)*m*n, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(int)*n*k, cudaMemcpyHostToDevice);
unsigned int grid_rows = (m + BLOCK_SIZE -1 ) / BLOCK_SIZE;
unsigned int grid_cols = (k + BLOCK_SIZE -1 )/ BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock( BLOCK_SIZE, BLOCK_SIZE);
gpu_matrix_mul<<<dimGrid, dimBlock>>>(
d_a, d_b, d_c, m, n, k
);
cudaMemcpy(h_c, d_c, sizeof(int)*m*k, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time Elapsed on matrix multiplication of %dx%d . %dx%d on GPU : %fms.\n\n", m,n,n,n,k, gpu_elapsed_time_ms);
float cpu_elapsed_time_ms;
cudaEventRecord(start, 0);
cpu_matrix_mult(h_a, h_b, h_cc, m, n, k);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&cpu_elapsed_time_ms, start, stop);
printf("Time elapsed on matrix multiplication of %dx%d . %dx%d on CPU: %f ms.\n\n", m, n, n, k, cpu_elapsed_time_ms);
}
|
12,106 | /* Best config: */
/* [('tile_r', [1, 1, 32, 2]), ('tile_y', [8, 1, 2, 4]), ('tile_x', [16, 1, 1, 4]), ('tile_rs', [32, 2, 1]), ('auto_unroll_max_step', 0), ('unroll_explicit', 1)],,None,50730168 */
/* Finish loading 23776 records */
/* Time cost of this operator: 0.000020 */
extern "C"
__global__
void default_function_kernel0( float* __restrict__ U, float* __restrict__ K, float* __restrict__ C) {
float C_local[32];
__shared__ float U_shared[64];
__shared__ float K_shared[128];
float U_shared_local[16];
float K_shared_local[2];
for (int x_c_init = 0; x_c_init < 4; ++x_c_init) {
for (int y_c_init = 0; y_c_init < 4; ++y_c_init) {
for (int r_c_init = 0; r_c_init < 2; ++r_c_init) {
C_local[((x_c_init * 8) + (y_c_init * 2)) + r_c_init] = 0.0f;
}
}
}
for (int sdim_outer = 0; sdim_outer < 32; ++sdim_outer) {
__syncthreads();
int ty = (int)threadIdx.y;
int tz_2 = (int)threadIdx.z * 2;
int tz_strd = ((int)threadIdx.z / 8) * 4096;
int tz_strd_2 = ((int)threadIdx.z % 8) * 64;
int bx_16384 = (((int)blockIdx.x) * 16384);
int by_512 = (int)blockIdx.y * 512;
U_shared[tz_2 + ty] = U[(bx_16384 + tz_strd + by_512 + tz_strd_2) + ty + (sdim_outer * 2)];
for (int ax0_ax1_fused_inner_inner_inner = 0; ax0_ax1_fused_inner_inner_inner < 2; ++ax0_ax1_fused_inner_inner_inner) {
K_shared[(((((int)threadIdx.z) * 4) + (((int)threadIdx.y) * 2)) + ax0_ax1_fused_inner_inner_inner)] = K[((((sdim_outer * 128) + (((int)threadIdx.z) * 4)) + (((int)threadIdx.y) * 2)) + ax0_ax1_fused_inner_inner_inner)];
}
__syncthreads();
for (int sdim_inner_outer = 0; sdim_inner_outer < 2; ++sdim_inner_outer) {
for (int ax1 = 0; ax1 < 4; ++ax1) {
for (int ax2 = 0; ax2 < 4; ++ax2) {
U_shared_local[((ax1 * 4) + ax2)] = U_shared[((((ax1 * 16) + (((int)threadIdx.y) * 8)) + (ax2 * 2)) + sdim_inner_outer)];
}
}
for (int ax11 = 0; ax11 < 2; ++ax11) {
K_shared_local[ax11] = K_shared[(((sdim_inner_outer * 64) + (((int)threadIdx.z) * 2)) + ax11)];
}
for (int x_c = 0; x_c < 4; ++x_c) {
for (int y_c = 0; y_c < 4; ++y_c) {
for (int r_c = 0; r_c < 2; ++r_c) {
C_local[(((x_c * 8) + (y_c * 2)) + r_c)] = (C_local[(((x_c * 8) + (y_c * 2)) + r_c)] + (U_shared_local[((x_c * 4) + y_c)] * K_shared_local[r_c]));
}
}
}
}
}
for (int x_inner_inner_inner = 0; x_inner_inner_inner < 4; ++x_inner_inner_inner) {
for (int y_inner_inner_inner = 0; y_inner_inner_inner < 4; ++y_inner_inner_inner) {
for (int r_inner_inner_inner = 0; r_inner_inner_inner < 2; ++r_inner_inner_inner) {
C[(((((((((int)blockIdx.x) * 16384) + (x_inner_inner_inner * 4096)) + (((int)blockIdx.y) * 512)) + (((int)threadIdx.y) * 256)) + (y_inner_inner_inner * 64)) + (((int)threadIdx.z) * 2)) + r_inner_inner_inner)] = C_local[(((x_inner_inner_inner * 8) + (y_inner_inner_inner * 2)) + r_inner_inner_inner)];
}
}
}
}
|
12,107 | #include "includes.h"
__global__ void array_copy(float*a, float * c, int mx, int my, int mz, int sx,int sy,int sz, int ox, int oy, int oz) // copies between two memories with different strides
{
int pnum=blockIdx.x*blockDim.x+threadIdx.x; // which source array element do I have to deal with?
int px=pnum%(sx/2); // my x pos of a complex number in the subarray
int py=pnum/(sx/2); // my y pos of a complex number
if(px>=sx || py >= (sy/2)) return; // not in range ... quit
int ids=2*(px+py*sx); /// offset to array start in floats
int idd=2*((ox+px)+(oy+py)*sx);
// echange two values using a tmp
float tmpR = c[idd];
float tmpI = c[idd+1];
c[idd]=a[ids]; // (float)(ox+px); //
c[idd+1]=a[ids+1]; // (float)(oy+py); //
a[ids]=tmpR;
a[ids+1]=tmpI;
} |
12,108 | /*
Name: Matthew Matze
Date: 10/4/2016
Class: csc4310
Location: ~/csc4310/add4
General Summary of Program
The program is designed to take one input file holding a matrix of
integers and add the second, third, and fourth quardrant to the first.
After the addition the first quadrant is outputed to the output file.
To Compile:
nvcc addquad.cu
To Execute:
a.out <inputfile >outputfile
*/
#include<cuda.h>
#include<stdio.h>
#include<stdlib.h>
void load(int *matrix, int n);
/*
* The load function puts the matrix from the file into a 1-d array of ints.
*
* Precondition: The file has the row/column dimension on the first line
* which has already been read in. On the following lines it will have that
* number of rows and columns of integers to be read in. The next parameter
* is an empty array of integers large enough to hold the contents of the
* input file. Lastly we have the row/column value in the final in parameter
*
* Postcondition: After Execution the file has been completely read through
* and the integer array is now fully loaded from the provided input file
*/
__global__ void kerneladd4(int *matrix, int n);
/*
* The kerneladd4 function adds the contents of quadrant 2, 3, and 4 into the
* contents of quadrant 1.
*
* Precondition: The matrix array is filled with the values of the matrix
* ready to be processed. The integer holds the size of the rows/columns
*
* Postcondition: After execution the contents of the first quadrant of the
* matrix will be equal to the all four quadrants added together.
*/
void add4(int *matrix, int n);
/*
* The add4 function sets the size of the grid and block and calls the kerneladd4
* function.
*
* Precondition: The matrix array is filled with the values of the matrix
* ready to be processed. The integer holds the size of the rows/columns
*
* Postcondition: After execution the contents of the first quadrant of the
* matrix will be equal to the all four quadrants added together.
*/
void outfunc(int *output, int n);
/*
* The output function outputs the matrix in the form of a 1-d array to the
* output file.
*
* Precondition: The first parameter is the array of the integers we have
* already processed and the second is the row/column dimension
*
* Postcondition: After Execution the output file is loaded with the first
* quadrant of the output array.
*/
int main(int argc, char *argv[]){
int *matrix;
int n;
//Intialize Variables
scanf("%d", &n);
//Scan in the size
matrix = (int*) malloc(n*n*sizeof(int *));
//Allocate memory for matrix
load(matrix, n);
//Load the 1-d array from the input file
add4(matrix, n);
//add all quadrants of the matrix into the first
outfunc(matrix, n);
//Output the matrix to the file and close the file
return 0;
}
void load(int *matrix,int n){
for(int i=0;i<n*n;i++){
scanf("%d", &matrix[i]);
}
}
__global__ void kerneladd4(int *matrix, int n){
int row = threadIdx.x + blockDim.x * blockIdx.x;
int col = threadIdx.y + blockDim.y * blockIdx.y;
if((row<n/2)&&col<(n/2)){
int loc=row+(col*n);
matrix[loc]+=matrix[loc+(n/2)]
+matrix[loc+(n*n/2)]+matrix[loc+(n*n/2)+(n/2)];
}
}
void add4(int *matrix, int n){
int size=n*n*sizeof(int *);
int *output;
cudaMalloc((void **) &output, size);
cudaMemcpy(output, matrix, size, cudaMemcpyHostToDevice);
dim3 DimGrid((int)ceil((double)n/8.0),(int)ceil((double)n/8.0),1);
if(size%8) DimGrid.x++;
dim3 DimBlock(8,8,1);
kerneladd4<<<DimGrid,DimBlock>>>(output, n);
cudaMemcpy(matrix, output, size, cudaMemcpyDeviceToHost);
cudaFree(output);
}
void outfunc(int *output, int n){
int loc=0;
printf("%d\n", n);
for(int j=0;j<n/2;j++){
for(int i=0;i<n/2;i++){
loc=i+(j*n);
printf("%d ", output[loc]);
}
printf("\n");
}
}
|
12,109 | // ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Winter Semester 2015/2016, March 15 - April 15
// ###
// ###
#include <cuda_runtime.h>
#include <iostream>
using namespace std;
// cuda error checking
#define CUDA_CHECK cuda_check(__FILE__,__LINE__)
void cuda_check(string file, int line)
{
cudaError_t e = cudaGetLastError();
if (e != cudaSuccess)
{
cout << endl << file << ", line " << line << ": " << cudaGetErrorString(e) << " (" << e << ")" << endl;
exit(1);
}
}
__device__ float add(float a, float b) {
return a + b;
}
__global__ void add_global(float *d_a, float *d_b, float *d_c, int n) {
int ind = threadIdx.x + blockDim.x * blockIdx.x;
if (ind < n) d_c[ind] = add(d_a[ind], d_b[ind]);
}
int main(int argc, char **argv)
{
// alloc and init input arrays on host (CPU)
int n = 20;
float *a = new float[n];
float *b = new float[n];
float *c = new float[n];
for(int i=0; i<n; i++)
{
a[i] = i;
b[i] = (i%5)+1;
c[i] = 0;
}
// CPU computation
for(int i=0; i<n; i++) c[i] = a[i] + b[i];
// print result
cout << "CPU:"<<endl;
for(int i=0; i<n; i++) cout << i << ": " << a[i] << " + " << b[i] << " = " << c[i] << endl;
cout << endl;
// init c
for(int i=0; i<n; i++) c[i] = 0;
// GPU computation
// ###
// ### TODO: Implement the array addition on the GPU, store the result in "c"
// ###
// ### Notes:
// ### 1. Remember to free all GPU arrays after the computation
// ### 2. Always use the macro CUDA_CHECK after each CUDA call, e.g. "cudaMalloc(...); CUDA_CHECK;"
// ### For convenience this macro is defined directly in this file, later we will only include "helper.h"
// allocate GPU memory
size_t nbytes = n*sizeof(float);
float *d_a = NULL;
float *d_b = NULL;
float *d_c = NULL;
cudaMalloc(&d_a, nbytes);
cudaMemset(d_a, 0, nbytes);
cudaMalloc(&d_b, nbytes);
cudaMemset(d_b, 0, nbytes);
cudaMalloc(&d_c, nbytes);
cudaMemset(d_c, 0, nbytes);
//copy host memory to device
cudaMemcpy( d_a, a, nbytes, cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, nbytes, cudaMemcpyHostToDevice );
// launch kernel
dim3 block = dim3(32,1,1);
dim3 grid = dim3(1,1,1);
add_global <<<grid,block>>> (d_a, d_b, d_c, n);
CUDA_CHECK;
// copy device memory to host
cudaMemcpy( c, d_c, nbytes, cudaMemcpyDeviceToHost );
// free GPU arrays
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// print result
cout << "GPU:"<<endl;
for(int i=0; i<n; i++) cout << i << ": " << a[i] << " + " << b[i] << " = " << c[i] << endl;
cout << endl;
// free CPU arrays
delete[] a;
delete[] b;
delete[] c;
}
|
12,110 | //
// Created by postaron on 04/04/2019.
//
#include "images.cuh"
|
12,111 | #include "includes.h"
__global__ void convKernel(const float* source, const float* kernel, float* target, const int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= len) return;
float value = 0.0f;
for (int i = 0; i < len; i++) {
value += source[i] *
kernel[(len + len / 2 + idx - i) % len]; // Positive modulo
}
target[idx] = value;
} |
12,112 | #include <cmath>
#include <cuda_runtime_api.h>
#include "../include/MatrixOps.cuh"
namespace blas1{
namespace cudaBlas {
__global__ void naiveTranspose(float *input, float *output, size_t width, size_t height) {
unsigned int row_id = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int column_id = blockIdx.x * blockDim.x + threadIdx.x;
if ((row_id < height) && (column_id < width)) {
output[row_id + height * column_id] = input[row_id * width + column_id];
}
}
__global__ void naiveCopy(float *input, float *output, size_t width, size_t height) {
unsigned int row_id = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int column_id = blockIdx.x * blockDim.x + threadIdx.x;
if ((row_id < height) && (column_id < width)) {
output[row_id * width + column_id] = input[row_id * width + column_id];
}
}
__global__ void transpose_32(float *input, float *output, size_t width, size_t height) {
const unsigned int BLOCK_SIZE = 32;
__shared__ float sharedMem[BLOCK_SIZE][BLOCK_SIZE + 1]; // Bank conflict
unsigned int column_index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row_index = blockIdx.y * blockDim.y + threadIdx.y;
if ((column_index < width) && (row_index < height)) {
unsigned element_index = row_index * width + column_index;
sharedMem[threadIdx.y][threadIdx.x] = input[element_index];
}
__syncthreads();
unsigned int local_row = blockIdx.y * BLOCK_SIZE + threadIdx.x; // coalesced access
unsigned int local_column = blockIdx.x * BLOCK_SIZE + threadIdx.y; // coalesced access
if ((local_row < height) && (local_column < width)) {
output[local_column * height + local_row] = sharedMem[threadIdx.x][threadIdx.y];
}
}
__global__ void transpose_16(float *input, float *output, size_t width, size_t height) {
const unsigned int BLOCK_SIZE = 16;
__shared__ float sharedMem[BLOCK_SIZE][BLOCK_SIZE + 1]; // Bank conflict
unsigned int column_index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row_index = blockIdx.y * blockDim.y + threadIdx.y;
if ((column_index < width) && (row_index < height)) {
unsigned element_index = row_index * width + column_index;
sharedMem[threadIdx.y][threadIdx.x] = input[element_index];
}
__syncthreads();
unsigned int local_row = blockIdx.y * BLOCK_SIZE + threadIdx.x; // coalesced access
unsigned int local_column = blockIdx.x * BLOCK_SIZE + threadIdx.y; // coalesced access
if ((local_row < height) && (local_column < width)) {
output[local_column * height + local_row] = sharedMem[threadIdx.x][threadIdx.y];
}
}
__global__ void transposeRowBlock_32(float *input, float *output, size_t width, size_t heights) {
const unsigned int TILE_SIZE = 32;
const unsigned int BLOCK_ROWS = 8;
__shared__ float tile[TILE_SIZE][TILE_SIZE + 1];
unsigned int x = blockIdx.x * TILE_SIZE + threadIdx.x;
unsigned int y = blockIdx.y * TILE_SIZE + threadIdx.y;
auto w = gridDim.x * TILE_SIZE;
for (int j = 0; j < TILE_SIZE; j += BLOCK_ROWS) {
tile[threadIdx.y + j][threadIdx.x] = input[(y + j) * w + x];
}
__syncthreads();
auto local_row = blockIdx.y * TILE_SIZE + threadIdx.x;
auto local_column = blockIdx.x * TILE_SIZE + threadIdx.y;
#pragma unroll
for (int j = 0; j < TILE_SIZE; j += BLOCK_ROWS) {
output[(local_column + j) * w + local_row] = tile[threadIdx.x][threadIdx.y + j];
}
}
__global__ void transposeRowBlock_16(float *input, float *output, size_t width, size_t heights) {
const unsigned int TILE_SIZE = 16;
const unsigned int BLOCK_ROWS = 8;
__shared__ float tile[TILE_SIZE][TILE_SIZE + 1];
unsigned int x = blockIdx.x * TILE_SIZE + threadIdx.x;
unsigned int y = blockIdx.y * TILE_SIZE + threadIdx.y;
auto w = gridDim.x * TILE_SIZE;
for (int j = 0; j < TILE_SIZE; j += BLOCK_ROWS) {
tile[threadIdx.y + j][threadIdx.x] = input[(y + j) * w + x];
}
__syncthreads();
auto local_row = blockIdx.y * TILE_SIZE + threadIdx.x;
auto local_column = blockIdx.x * TILE_SIZE + threadIdx.y;
#pragma unroll
for (int j = 0; j < TILE_SIZE; j += BLOCK_ROWS) {
output[(local_column + j) * w + local_row] = tile[threadIdx.x][threadIdx.y + j];
}
}
__global__ void MatrixCopy_32(float *input, float *output, size_t width, size_t height) {
const unsigned int BLOCK_SIZE = 32;
__shared__ float sharedMem[BLOCK_SIZE][BLOCK_SIZE + 1]; // Bank conflict
unsigned int column_index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row_index = blockIdx.y * blockDim.y + threadIdx.y;
if ((column_index < width) && (row_index < height)) {
unsigned element_index = row_index * width + column_index;
sharedMem[threadIdx.y][threadIdx.x] = input[element_index];
}
__syncthreads();
unsigned int local_row = blockIdx.y * BLOCK_SIZE + threadIdx.x; // coalesced access
unsigned int local_column = blockIdx.x * BLOCK_SIZE + threadIdx.y; // coalesced access
if ((local_row < height) && (local_column < width)) {
output[local_row * width + local_column] = sharedMem[threadIdx.x][threadIdx.y];
}
}
__global__ void MatrixCopy_16(float *input, float *output, size_t width, size_t height) {
const unsigned int BLOCK_SIZE = 32;
__shared__ float sharedMem[BLOCK_SIZE][BLOCK_SIZE + 1]; // Bank conflict solution
unsigned int column_index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row_index = blockIdx.y * blockDim.y + threadIdx.y;
if ((column_index < width) && (row_index < height)) {
unsigned element_index = row_index * width + column_index;
sharedMem[threadIdx.y][threadIdx.x] = input[element_index];
}
__syncthreads();
unsigned int local_row = blockIdx.y * BLOCK_SIZE + threadIdx.x; // coalesced access
unsigned int local_column = blockIdx.x * BLOCK_SIZE + threadIdx.y; // coalesced access
if ((local_row < height) && (local_column < width)) {
output[local_row * width + local_column] = sharedMem[threadIdx.x][threadIdx.y];
}
}
}
} |
12,113 | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define N 300
#define NUM_THREADS 16
struct timeval start, end;
__global__ void matmul(int* a, int* b, int* c) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= N || col >= N) {
return;
}
int c_val = 0.0;
for (int i = 0; i < N; i++) {
c_val += a[row*N+i] * b[i*N+col];
}
c[row*N+col] = c_val;
}
int main() {
int a[N*N], b[N*N], c[N*N]; int *dev_a, *dev_b, *dev_c;
cudaMalloc((void**)&dev_a, N*N*sizeof(int));
cudaMalloc((void**)&dev_b, N*N*sizeof(int));
cudaMalloc((void**)&dev_c, N*N*sizeof(int));
srand(5);
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
a[i*N+j] = rand();
b[i*N+j] = rand();
c[i*N+j] = 0.0;
}
}
cudaMemcpy(dev_a, a, N*N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N*N*sizeof(int), cudaMemcpyHostToDevice);
dim3 threads(NUM_THREADS, NUM_THREADS);
dim3 blocks((N+NUM_THREADS-1)/NUM_THREADS, (N+NUM_THREADS-1)/NUM_THREADS);
gettimeofday(&start, NULL);
matmul<<<blocks, threads>>>(dev_a, dev_b, dev_c);
cudaThreadSynchronize();
gettimeofday(&end, NULL);
cudaMemcpy(c, dev_c, N*N*sizeof(int), cudaMemcpyDeviceToHost);
// find sum
int sum = 0;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
sum += c[i*N+j];
}
}
std::cout << "sum is " << sum << std::endl;
printf("Seconds elapsed: %f\n",
(end.tv_sec*1000000.0 + end.tv_usec - start.tv_sec*1000000.0 -
start.tv_usec) / 1000000.0);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
}
|
12,114 | #include <algorithm>
#include <cassert>
#include <cstdlib>
#include <functional>
#include <iostream>
#include <vector>
using namespace std;
__global__ void matrixMul(const int *__restrict a, const int *__restrict b,int *__restrict c, int N);
void verify_result(vector<int> &a, vector<int> &b, vector<int> &c, int N);
void afficheMatrix(vector<int>& m,int line, int colone);
int main()
{
int N = 1 << 3;
size_t bytes = N * N * sizeof(int);
// CPU
vector<int> h_a(N * N);
vector<int> h_b(N * N);
vector<int> h_c(N * N);
generate(h_a.begin(), h_a.end(), []() { return rand() % 100; });
generate(h_b.begin(), h_b.end(), []() { return rand() % 100; });
// GPU
int *d_a, *d_b, *d_c;
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
// CPU ---> GPU
cudaMemcpy(d_a, h_a.data(), bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b.data(), bytes, cudaMemcpyHostToDevice);
int THREADS = 1 << 2;
int BLOCKS = N / THREADS;
dim3 threads(THREADS, THREADS);
dim3 blocks(BLOCKS, BLOCKS);
matrixMul<<<blocks, threads>>>(d_a, d_b, d_c, N);
// CPU ---> GPU
cudaMemcpy(h_c.data(), d_c, bytes, cudaMemcpyDeviceToHost);
verify_result(h_a, h_b, h_c, N);
cout << "terminé avec succès"<<endl;
afficheMatrix(h_a,N,N);
afficheMatrix(h_b,N,N);
afficheMatrix(h_c,N,N);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
__global__ void matrixMul(const int *__restrict a, const int *__restrict b,int *__restrict c, int N)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
c[row * N + col] = 0;
for (int k = 0; k < N; k++)
c[row * N + col] += a[row * N + k] * b[k * N + col];
}
void verify_result(vector<int> &a, vector<int> &b, vector<int> &c, int N)
{
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
{
int tmp = 0;
for (int k = 0; k < N; k++)
tmp += a[i * N + k] * b[k * N + j];
assert(tmp == c[i * N + j]);
}
}
void afficheMatrix(vector<int>& m,int line, int colone)
{
for (int i = 0; i <line; i++)
{
for (int j = 0; j < colone; j++)
{
cout<<m[i]<<" ";
}
cout<<endl;
}
cout<<"\n_______________________________________"<<endl;
}
|
12,115 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define BLOCK_SIZE 16
__global__ void MatrixMulKernel(float *M, float *N, float *P, int Width)
{
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
if( Col < Width && Row < Width)
{
float Pvalue = 0;
for(int k = 0; k < Width; ++k)
{
Pvalue += M[Row * Width + k] * N[k * Width + Col];
}
P[Row * Width + Col] = Pvalue;
}
}
void cpu_matrix_mult(float *M, float *N, float *P, int Width) {
for (int i = 0; i < Width; ++i)
{
for (int j = 0; j < Width; ++j)
{
int tmp = 0.0;
for(int k = 0; k < Width; ++k)
{
tmp += M[i * Width + k] * N[k * Width + j];
}
P[i * Width + j] = tmp;
}
}
}
int main()
{
int Width =1024;
srand(3333);
float *h_a=0, *h_b=0, *h_c=0, *h_cc=0;
cudaMallocHost((void **) &h_a, sizeof(float)*Width*Width);
cudaMallocHost((void **) &h_b, sizeof(float)*Width*Width);
cudaMallocHost((void **) &h_c, sizeof(float)*Width*Width);
cudaMallocHost((void **) &h_cc, sizeof(float)*Width*Width);
if(h_a==0 || h_b==0 || h_c==0 || h_cc==0)
{
printf("No asignacion de memoria\n");
return 1;
}
for (int i = 0; i < Width; ++i) {
for (int j = 0; j < Width; ++j) {
h_a[i * Width + j] = rand()%1024;
}
}
for (int i = 0; i < Width; ++i) {
for (int j = 0; j < Width; ++j) {
h_b[i * Width + j] = rand()%1024;
}
}
float gpu_time_ms, cpu_time_ms;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
float *d_a=0, *d_b=0, *d_c=0;
cudaMalloc((void **) &d_a, sizeof(float)*Width*Width);
cudaMalloc((void **) &d_b, sizeof(float)*Width*Width);
cudaMalloc((void **) &d_c, sizeof(float)*Width*Width);
if(d_a==0 || d_b==0 || d_c==0)
{
printf("No asignacion Gpu\n");
return 1;
}
cudaMemcpy(d_a, h_a, sizeof(float)*Width*Width, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(float)*Width*Width, cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((int)ceil(float(Width)/dimBlock.x), (int)ceil(float(Width)/dimBlock.y));
MatrixMulKernel<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, Width);
cudaMemcpy(h_c, d_c, sizeof(int)*Width*Width, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_time_ms, start, stop);
printf("Tiempo transcurrido en GPU: %f ms.\n\n", gpu_time_ms);
//CPU version
cudaEventRecord(start, 0);
cpu_matrix_mult(h_a, h_b, h_cc, Width);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&cpu_time_ms, start, stop);
printf("Tiempo transcurrido en CPU: %f ms.\n\n", cpu_time_ms);
//Validando resultados
int all_ok = 1;
for (int i = 0; i < Width; ++i)
{
for (int j = 0; j < Width; ++j)
{
if(h_c[i*Width + j] != h_cc[i*Width + j])
{
all_ok = 0;
}
}
}
if(all_ok)
{
printf("Todo bien!!, speedup = %f\n", cpu_time_ms / gpu_time_ms);
}
else
{
printf("Error\n");
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
cudaFreeHost(h_cc);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
12,116 | #include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
__global__
void MatAdd1D(int* A, int* B, int* C, int nx, int ny)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int idx = iy * nx + ix;
C[idx] = A[idx] + B[idx];
}
__global__
void MatAdd2D(int* A, int* B, int* C, int nx, int ny)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int idx = iy * nx + ix;
if (ix < nx && iy < ny)
C[idx] = A[idx] + B[idx];
}
__global__
void MatAdd1D1D(int* A, int* B, int* C, int nx, int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < nx)
{
for (int iy = 0; iy < ny; ++iy)
{
int idx = iy * nx + ix;
C[idx] = A[idx] + B[idx];
}
}
}
__global__
void MatAdd2D1D(int* A, int* B, int* C, int nx, int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = blockIdx.y;
unsigned int idx = iy*nx + ix;
if (ix < nx && iy < ny)
C[idx] = A[idx] + B[idx];
}
void sumMatrixOnHost (int *A, int *B, int *C, const int nx, const int ny)
{
int *ia = A;
int *ib = B;
int *ic = C;
for (int iy = 0; iy < ny; iy++)
{
for (int ix = 0; ix < nx; ix++)
{
ic[ix] = ia[ix] + ib[ix];
}
ia += nx;
ib += nx;
ic += nx;
}
}
void checkResult(int *hostRef, int *gpuRef, const int N)
{
double epsilon = 1.0E-8;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
printf("host %f gpu %f ", hostRef[i], gpuRef[i]);
printf("Arrays do not match.\n\n");
break;
}
}
}
void GenerateMatrix(int* m, int size)
{
for (int i=0; i<size; ++i)
m[i] = rand()% 10;
}
int main( void ) {
double time1, time2, time3, time4;
// size of matrix
unsigned int nx = 1<<10; // столбцы
unsigned int ny = 1<<10; // строки
int size = nx*ny;
size_t sizeBytes = size * sizeof(int);
int* h_A = (int*)malloc(sizeBytes);
int* h_B = (int*)malloc(sizeBytes);
int* h_C = (int*)malloc(sizeBytes);
int* cpu_C = (int*)malloc(sizeBytes);
GenerateMatrix(h_A, size);
GenerateMatrix(h_B, size);
sumMatrixOnHost(h_A, h_B, cpu_C, nx, ny);
int* d_A;
int* d_B;
cudaMalloc((void**)&d_A, sizeBytes);
cudaMalloc((void**)&d_B, sizeBytes);
cudaMemcpy(d_A, h_A, sizeBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, sizeBytes, cudaMemcpyHostToDevice);
printf("Started succesfylly\n");
/* Варивант с 1D */
int* d_C1D;
cudaMalloc((void**)&d_C1D, sizeBytes);
int BlockPerGrid = ny; // строки
int ThreadsPerBlock = nx; // столбцы
cudaDeviceSynchronize();
time1 = cpuSecond();
MatAdd1D<<< BlockPerGrid, ThreadsPerBlock >>>(d_A, d_B, d_C1D, nx, ny);
cudaDeviceSynchronize();
time1 = cpuSecond() - time1;
printf("MattAdd1D <<<(%d, %d)>>> elapsed %f ms\n", BlockPerGrid,
ThreadsPerBlock, time1);
cudaMemcpy(h_C, d_C1D, sizeBytes, cudaMemcpyDeviceToHost);
checkResult(cpu_C, h_C, size);
cudaFree(d_C1D);
/* Вариант с 2D */
int* d_C2D;
cudaMalloc((void**)&d_C2D, sizeBytes);
int dimx = 32;
int dimy = 16;
dim3 block(dimx, dimy);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
cudaDeviceSynchronize();
time2 = cpuSecond();
MatAdd2D<<<grid, block>>>(d_A, d_B, d_C2D, nx, ny);
cudaDeviceSynchronize();
time2 = cpuSecond() - time2;
printf("MattAdd2D <<<(%d, %d), (%d, %d)>>> elapsed %f ms\n", grid.x,
grid.y,
block.x, block.y, time2);
cudaMemcpy(h_C, d_C1D, sizeBytes, cudaMemcpyDeviceToHost);
checkResult(cpu_C, h_C, size);
cudaFree(d_C1D);
/* Вариант с 1D-сеткой и 1D-блоками */
int* d_C1D1D;
cudaMalloc((void**)&d_C1D1D, sizeBytes);
block = dim3{128,1};
grid = dim3{(nx+block.x-1)/block.x,1};
cudaDeviceSynchronize();
time3 = cpuSecond();
MatAdd1D1D <<<grid, block>>> (d_A, d_B, d_C1D1D, nx, ny);
cudaDeviceSynchronize();
time3 = cpuSecond() - time3;
printf("MatAdd1D1D <<<(%d, %d), (%d, %d)>>> elapsed %f ms\n", grid.x,
grid.y,
block.x, block.y, time3);
cudaMemcpy(h_C, d_C1D1D, sizeBytes, cudaMemcpyDeviceToHost);
checkResult(cpu_C, h_C, size);
cudaFree(d_C1D1D);
/* Вариант с 2D-сеткой и 1D-блоками */
int* d_C2D1D;
cudaMalloc((void**)&d_C2D1D, sizeBytes);
block = dim3{256};
grid = dim3{(nx + block.x - 1) / block.x,ny};
cudaDeviceSynchronize();
time4 = cpuSecond();
MatAdd2D1D <<<grid, block>>> (d_A, d_B, d_C2D1D, nx, ny);
cudaDeviceSynchronize();
time4 = cpuSecond() - time4;
printf("MatAdd2D1D <<<(%d, %d), (%d, %d)>>> elapsed %f ms\n", grid.x,
grid.y,
block.x, block.y, time4);
cudaMemcpy(h_C, d_C2D1D, sizeBytes, cudaMemcpyDeviceToHost);
checkResult(cpu_C, h_C, size);
cudaFree(d_C2D1D);
cudaFree(d_A);
cudaFree(d_B);
free(h_A);
free(h_B);
free(h_C);
return 0;
}
|
12,117 | /* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#define END_OF_VEC -1
//fill table with values based on pointt
void make_distance_makecompositiontable_p( short *table, int *pointt )
{
int point;
while( ( point = *pointt++ ) != END_OF_VEC )
table[point]++;
}
|
12,118 | #include "includes.h"
__global__ void _mat_sum_row(float *m, float *target,int nrow, int ncol){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < nrow){
float sum = 0;
for(int i = 0; i < ncol; i++){
sum += m[tid*ncol+i];
}
target[tid] = sum;
}
} |
12,119 | #include<bits/stdc++.h>
#include<device_launch_parameters.h>
#include<cuda_runtime.h>
using namespace std;
#define SIZE 256
#define SSIZE SIZE*4 // sizeof(int)
__global__ void sum_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] += partial_sum[threadIdx.x+s];
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void max_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] = max(partial_sum[threadIdx.x],partial_sum[threadIdx.x+s]);
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void variance(int *v,int *v_r,float *mean){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
partial_sum[threadIdx.x] = (partial_sum[threadIdx.x] - *mean) * (partial_sum[threadIdx.x] - *mean);
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] += partial_sum[threadIdx.x+s];
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
__global__ void min_reduction(int *v,int *v_r){
__shared__ int partial_sum[SSIZE];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for(int s = blockDim.x/2;s>0;s=s/2){
if(threadIdx.x < s){
partial_sum[threadIdx.x] = min(partial_sum[threadIdx.x],partial_sum[threadIdx.x+s]);
}
__syncthreads();
}
if(threadIdx.x ==0){
v_r[blockIdx.x] = partial_sum[0];
}
}
void inititialise(int* v,int n){
for(int i =0;i<n;i++){
v[i]= rand()%1000;
}
}
int main(){
int n = SIZE*SIZE;
float elapsed_cpu, elapsed_gpu;
clock_t t1, t2;
int thread_block_size = SIZE;
int num_blocks = n / thread_block_size;
int *h_v,*d_v,*h_v_r,*d_v_r;
float *d_mean;
h_v = (int*)malloc(n*sizeof(int));
cudaMalloc(&d_v,n*sizeof(int));
h_v_r = (int*)malloc(num_blocks*sizeof(int));
cudaMalloc(&d_v_r,num_blocks*sizeof(int));
cudaMalloc((void**)&d_mean,sizeof(float));
inititialise(h_v,n);
int minimum = 0;
for(int i =0;i<n;i++){
minimum = minimum+h_v[i];
}
//cout<<minimum<<endl;
float mean = minimum / n;
int var = 0;
t1 = clock();
for(int i =0;i<n;i++){
var = var + (h_v[i]-mean)*(h_v[i]-mean);
}
cout<<var<<endl;
t2 = clock();
elapsed_cpu = ((float)t2 - (float)t1) / CLOCKS_PER_SEC * 1000; //cpu elapsed time in ms
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMemcpy(d_v,h_v,n*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_mean,&mean,sizeof(float),cudaMemcpyHostToDevice);
variance<<<num_blocks,thread_block_size>>>(d_v,d_v_r,d_mean);
sum_reduction<<<1,thread_block_size>>>(d_v_r,d_v_r);
cudaMemcpy(h_v_r,d_v_r,thread_block_size*sizeof(int),cudaMemcpyDeviceToHost);
cout<<h_v_r[0]<<endl;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_gpu, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cout<<elapsed_cpu<<endl;
cout<<elapsed_gpu<<endl;
cout<<"speedup"<<elapsed_cpu/elapsed_gpu<<endl;
return 0;
}
|
12,120 | #include "includes.h"
__global__ void Not( bool * x, size_t idx, size_t N)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
x[(idx-1)*N+i] = ! x[(idx-1)*N+i] ;
}
return;
} |
12,121 | #include "includes.h"
__global__ void fill( int * v, std::size_t size )
{
// Get the id of the thread ( 0 -> 99 ).
auto tid = threadIdx.x;
// Each thread fills a single element of the array.
v[ tid ] = tid;
} |
12,122 | #include <time.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
__global__ void GPUEuler2(float *y, float t_i, float delta,int N) {
int myID = threadIdx.x + blockDim.x * blockIdx.x;
if(myID < N) {
y[myID] = y[myID] + delta * (4*t_i - y[myID]+3+myID);
}
}
int main(int argc, char** argv) {
int hilos2b = 256,bloque2b;
float tiempoGPU2b, t_i2b;
float *dev_e2b, *hst_y2b;
cudaEvent_t start2b, end2b;
printf("seccion 2.b\n");
for (int j=4;j<9;j++){
int m=pow(10,j);
hst_y2b = (float*) malloc(sizeof(float)*m+1);
cudaMalloc((void**) &dev_e2b,(m+1)*sizeof(float));
bloque2b = ceil((float) (m+1) /hilos2b);
for(int i=0;i<m+1;i++){
hst_y2b[i]=i;
}
cudaEventCreate(&start2b);
cudaEventCreate(&end2b);
cudaEventRecord(start2b,0);
cudaMemcpy(dev_e2b, hst_y2b, (m+1)*sizeof(float), cudaMemcpyHostToDevice);
float n=powf(10,3);
for (int i=0;i<n+1;i++){
t_i2b = i/n;
GPUEuler2<<<bloque2b,hilos2b>>>(dev_e2b,t_i2b,1/n,m+1);
}
cudaEventRecord(end2b,0);
cudaEventSynchronize(end2b);
cudaEventElapsedTime(&tiempoGPU2b,start2b,end2b);
printf("%f\n",tiempoGPU2b);
cudaFree(dev_e2b);
free(hst_y2b);
}
return 0;
}
|
12,123 | #define INTERVAL 16777216
#define ITEMS_IN_THREAD 16
#define THREADS_PER_BLOCK 16
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <random>
using namespace std;
__global__ void count_pi_1( float *dev_randX, float *dev_randY, int *dev_threads_num ) {
int tid = blockIdx.x * blockDim.x + threadIdx.x ;
float result = dev_randX[tid] * dev_randX[tid] + dev_randY[tid] * dev_randY[tid];
if (result <= 1.0) {
dev_threads_num[tid] = 1;
} else {
dev_threads_num[tid] = 0;
}
}
int main()
{
vector<float> randX(INTERVAL);
vector<float> randY(INTERVAL);
clock_t c_start, c_end;
srand((unsigned)time(NULL));
for (int i = 0; i < INTERVAL; i++) {
randX[i] = float(rand()) / RAND_MAX;
randY[i] = float(rand()) / RAND_MAX;
}
//cout << "initial array" << endl << endl;
//for (int i = 0; i < INTERVAL; i++) {
// cout << randX[i] << " | ";
//}
//start cont gpu time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//send data to GPU
size_t size = INTERVAL * sizeof(float);
float *dev_randX;
float *dev_randY;
cudaMalloc((void**)&dev_randX, size);
cudaMalloc((void**)&dev_randY, size);
cudaMemcpy(dev_randX, &randX.front(), size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_randY, &randY.front(), size, cudaMemcpyHostToDevice);
int block_num = INTERVAL / THREADS_PER_BLOCK ;
int *dev_threads_num;
cudaMalloc((void**)&dev_threads_num, INTERVAL * sizeof(int) );
float *test;
cudaMalloc((void**)&test, INTERVAL * sizeof(float) );
count_pi_1 <<<block_num, THREADS_PER_BLOCK >>> ( dev_randX, dev_randY, dev_threads_num );
int* threads_num = (int*)malloc( INTERVAL * sizeof(int) );
cudaMemcpy(threads_num, dev_threads_num, INTERVAL * sizeof(int), cudaMemcpyDeviceToHost);
float* test_host = (float*)malloc( INTERVAL * sizeof(float) );
cudaMemcpy(test_host, test, INTERVAL * sizeof(float), cudaMemcpyDeviceToHost);
int g_count = 0;
for (int i = 0; i < INTERVAL ; i++) {
g_count += threads_num[i];
};
//end cont gpu time
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float t_gpu1;
cudaEventElapsedTime(&t_gpu1, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
float g_num = float(g_count) * 4.0 / INTERVAL;
cout << "GPU_1 Time" << endl;
cout << g_num << endl;
cout << "time = " << t_gpu1 << " ms" << endl;
} |
12,124 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
// Interleaved addressing with divergent branching
__global__ void reduce_kernel0(int *d_out, int *d_in)
{
extern __shared__ int s_data[];
// thread ID inside the block
unsigned int tid = threadIdx.x;
// global ID across all blocks
unsigned int gid = blockIdx.x * blockDim.x + threadIdx.x;
// Copy elements from global memoery into per-block shared memory
s_data[tid] = d_in[gid];
// Ensure all elements have been copied into shared memory
__syncthreads();
// s = 1, 2, 4, 8, ..... blockDim.x / 2
for (unsigned int s = 1; s < blockDim.x; s <<= 1) {
if (tid % (s << 1) == 0) {
s_data[tid] += s_data[tid + s];
}
// Ensure all threads in the block finish add in this round
__syncthreads();
}
// write the reduction sum back to the global memory
if (tid == 0) {
d_out[blockIdx.x] = s_data[0];
}
}
// Interleaved addressing with bank conflicts
__global__ void reduce_kernel1(int *d_out, int *d_in)
{
extern __shared__ int s_data[];
// thread ID inside the block
unsigned int tid = threadIdx.x;
// global ID across all blocks
unsigned int gid = blockIdx.x * blockDim.x + threadIdx.x;
// Copy elements from global memoery into per-block shared memory
s_data[tid] = d_in[gid];
// Ensure all elements have been copied into shared memory
__syncthreads();
// s = 1, 2, 4, 8, ..... blockDim.x / 2
for (unsigned int s = 1; s < blockDim.x; s <<= 1) {
int index = tid * s * 2;
if (index + s < blockDim.x) {
s_data[index] += s_data[index + s];
}
// Ensure all threads in the block finish add in this round
__syncthreads();
}
if (tid == 0) {
d_out[blockIdx.x] = s_data[0];
}
}
// Sequential addressing
__global__ void reduce_kernel2(int *d_out, int *d_in)
{
extern __shared__ int s_data[];
// thread ID inside the block
unsigned int tid = threadIdx.x;
// global ID across all blocks
unsigned int gid = blockIdx.x * blockDim.x + threadIdx.x;
// Copy elements from global memoery into per-block shared memory
s_data[tid] = d_in[gid];
// Ensure all elements have been copied into shared memory
__syncthreads();
// s = blockDim.x / 2, ....., 8, 4, 2, 1
for (unsigned int s = (blockDim.x >> 1); s >= 1; s >>= 1) {
if (tid < s) {
s_data[tid] += s_data[tid + s];
}
// Ensure all threads in the block finish add in this round
__syncthreads();
}
if (tid == 0) {
d_out[blockIdx.x] = s_data[0];
}
}
// First add during global load
__global__ void reduce_kernel3(int *d_out, int *d_in)
{
extern __shared__ int s_data[];
// thread ID inside the block
unsigned int tid = threadIdx.x;
// global ID across all blocks
unsigned int gid = blockIdx.x * blockDim.x * 2 + threadIdx.x;
// perform first level of reduction,
// reading from global memory, writing to shared memory
s_data[tid] = d_in[gid] + d_in[gid + blockDim.x];
// Ensure all elements have been copied into shared memory
__syncthreads();
// s = blockDim.x / 2, ....., 8, 4, 2, 1
for (unsigned int s = (blockDim.x >> 1); s >= 1; s >>= 1) {
if (tid < s) {
s_data[tid] += s_data[tid + s];
}
// Ensure all threads in the block finish add in this round
__syncthreads();
}
if (tid == 0) {
d_out[blockIdx.x] = s_data[0];
}
}
inline bool is_power_of_2(int n)
{
return ((n & (n - 1)) == 0);
}
// input: array (in host memory), array size, expected result, kernel function ID and iterations
void reduce(int *h_in, int array_size, int expected_result, int kernel_id, int iters)
{
// # of threads per block. It should be the power of two
int threads = 1 << 10;
// # of blocks in total.
int blocks = 1;
// GPU memory pointers
int *d_in, *d_intermediate, *d_out;
// final result in host memory
int h_out;
if (!h_in || array_size <= 0 || !is_power_of_2(array_size))
goto out;
if (array_size > threads)
blocks = array_size / threads;
// allocate GPU memory
if (cudaMalloc((void**) &d_in, array_size * sizeof(int)) != cudaSuccess
|| cudaMalloc((void**) &d_intermediate, blocks * sizeof(int)) != cudaSuccess
|| cudaMalloc((void**) &d_out, sizeof(int)) != cudaSuccess)
goto out;
// copy the input array from the host memory to the GPU memory
cudaMemcpy(d_in, h_in, array_size * sizeof(int), cudaMemcpyHostToDevice);
// run many times
for (int i = 0; i < iters; i++) {
switch (kernel_id) {
// Interleaved addressing with divergent branching
case 0:
// first stage reduce
reduce_kernel0<<<blocks, threads, threads * sizeof(int)>>>(d_intermediate, d_in);
// second stage reduce
reduce_kernel0<<<1, blocks, blocks * sizeof(int)>>>(d_out, d_intermediate);
break;
// Interleaved addressing with bank conflicts
case 1:
reduce_kernel1<<<blocks, threads, threads * sizeof(int)>>>(d_intermediate, d_in);
reduce_kernel1<<<1, blocks, blocks * sizeof(int)>>>(d_out, d_intermediate);
break;
// Sequential addressing
case 2:
reduce_kernel2<<<blocks, threads, threads * sizeof(int)>>>(d_intermediate, d_in);
reduce_kernel2<<<1, blocks, blocks * sizeof(int)>>>(d_out, d_intermediate);
break;
// First add during global load
case 3:
reduce_kernel3<<<blocks, threads / 2 , threads / 2 * sizeof(int)>>>(d_intermediate, d_in);
reduce_kernel3<<<1, blocks / 2, blocks / 2 * sizeof(int)>>>(d_out, d_intermediate);
break;
default:
printf("Invalid kernel function ID %d\n", kernel_id);
goto out;
}
}
// copy the result from the GPU memory to the host memory
cudaMemcpy(&h_out, d_out, sizeof(int), cudaMemcpyDeviceToHost);
if (h_out != expected_result) {
printf("Wrong result: %d (expected) %d (actual)\n", expected_result, h_out);
}
out:
// free GPU memory
cudaFree(d_in);
cudaFree(d_intermediate);
cudaFree(d_out);
}
// generate a random integer in [min, max]
inline int random_range(int min, int max)
{
if (min > max)
return 0;
else
return min + rand() / (RAND_MAX / (max - min + 1) + 1);
}
int main(int argc, char **argv)
{
if (argc != 3) {
printf("%s [kernel ID] [iterations]\n", argv[0]);
exit(EXIT_FAILURE);
}
int kernel_id = atoi(argv[1]);
int iters = atoi(argv[2]);
if (iters <= 0 || kernel_id < 0) {
printf("Invalid input\n");
exit(EXIT_FAILURE);
}
const int ARRAY_SIZE = 1 << 20;
int h_in[ARRAY_SIZE];
int sum = 0;
// initialize random number generator
srand(time(NULL));
int min = 0, max = 10;
for (int i = 0; i < ARRAY_SIZE; i++) {
// generate a random int in a range
h_in[i] = random_range(min, max);
sum += h_in[i];
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
reduce(h_in, ARRAY_SIZE, sum, kernel_id, iters);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsed_time;
cudaEventElapsedTime(&elapsed_time, start, stop);
elapsed_time /= iters;
printf("Average time elapsed: %f ms\n", elapsed_time);
return 0;
} |
12,125 |
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#define BLOCKSIZE 256
__global__ void vectorReductionKernel(int N, int *c_a, int *c_suma){
volatile __shared__ int s_a[BLOCKSIZE]; // size must be specified at compile time
int threadIndex = threadIdx.x;
int blockIndex = blockIdx.x;
int threadCount = blockDim.x;
int n = threadIndex + threadCount*blockIndex;
// check if n is in [0,N)
if(n<N){
s_a[threadIndex] = c_a[n];
}else{
s_a[threadIndex] = 0;
}
// barrier for all threads in thread block
__syncthreads(); // make sure 256 values stored in shared
if(threadIndex<128)
s_a[threadIndex] += s_a[threadIndex+128];
__syncthreads();
if(threadIndex<64)
s_a[threadIndex] += s_a[threadIndex+64];
__syncthreads();
if(threadIndex<32)
s_a[threadIndex] += s_a[threadIndex+32];
// __syncthreads();
if(threadIndex<16)
s_a[threadIndex] += s_a[threadIndex+16];
// __syncthreads();
if(threadIndex<8)
s_a[threadIndex] += s_a[threadIndex+8];
// __syncthreads();
if(threadIndex<4)
s_a[threadIndex] += s_a[threadIndex+4];
// __syncthreads();
if(threadIndex<2)
s_a[threadIndex] += s_a[threadIndex+2];
// __syncthreads();
if(threadIndex<1)
s_a[threadIndex] += s_a[threadIndex+1];
if(threadIndex==0)
c_suma[blockIndex] = s_a[0];
}
int main(int argc, char **argv){
int N = 100000000;
int threadsPerBlock = BLOCKSIZE;
int blocks = ( N+threadsPerBlock-1)/threadsPerBlock;
// ON HOST
int *h_a = (int*) malloc(N*sizeof(int));
int *h_b = (int*) malloc(blocks*sizeof(int));
int n;
for(n=0;n<N;++n){
h_a[n] = 1;
}
// ON DEVICE
int *c_a, *c_b;
cudaMalloc(&c_a, N*sizeof(int));
cudaMalloc(&c_b, blocks*sizeof(int));
cudaMemcpy(c_a, h_a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
// INITIATE KERNEL ON DEVICE
vectorReductionKernel <<< blocks, threadsPerBlock >>> (N, c_a, c_b);
cudaEventRecord(end);
cudaEventSynchronize(end);
float elapsed;
cudaEventElapsedTime(&elapsed, start, end);
elapsed /= 1000.f;
printf("elapsed time = %g\n", elapsed);
int NbytesRead = N*sizeof(int);
int NbytesWritten = blocks*sizeof(int);
float bandwidth = (NbytesRead + NbytesWritten)/elapsed;
printf("bandwidth %g GB/s\n", bandwidth/1.e9);
// COPY DATA FROM DEVICE TO HOST
cudaMemcpy(h_b, c_b, blocks*sizeof(int), cudaMemcpyDeviceToHost);
int reda = 0;
for(n=0;n<blocks;++n)
reda += h_b[n];
printf("sum entries a is %d\n", reda);
// PRINT ENTRIES
for(n=0;n<5;++n){
printf("suma[%d] = %d\n", n, h_b[n]);
}
cudaDeviceSynchronize();
cudaFree(c_a);
cudaFree(c_b);
}
|
12,126 | #include <iostream>
#include <math.h>
__constant__ int constant_values[100];
__global__ void test_kernel(int* d_array)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (int i=0; i<100; i++) {
d_array[idx] = d_array[idx] + constant_values[i];
}
return;
}
int main(int argc, char** argv)
{
std::cout << "Starting" << std::endl;
int size = 100*sizeof(int);
int* d_array;
int h_angle[360];
int BLOCK_SIZE = 64;
cudaError_t cudaStatus;
//std::srand(std::time(0));
// Reserva espacio device memory
cudaMalloc((void**)&d_array, sizeof(int)*size);
// Inicializacion memoria device a 0
cudaMemset(d_array, 0, sizeof(int)*size);
// Inicializacion en el host la informacion constante
for (int i=0; i<100; i++) {
h_angle[i] = std::rand();
// Copia datos a memoria constante en CUDA
cudaMemcpyToSymbol(constant_values, h_angle, sizeof(int)*100);
test_kernel<<<100/BLOCK_SIZE,BLOCK_SIZE>>>(d_array);
// Comprueba errores llamada al kernel ( se han obviado el resto de comprobaciones)
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Error: %s\n", cudaGetErrorString(cudaStatus));
return 1;
}
}
// liberamos memoria device
cudaFree(d_array);
std::cout << "Finishing" << std::endl;
return 0;
}
|
12,127 | // This is a generated file, do not edit it!
#pragma once
#include <stdint.h>
typedef struct RandomState {
uint32_t Taus1;
uint32_t Taus2;
uint32_t Taus3;
uint32_t Lcg;
} RandomState;
|
12,128 | #include "init.cuh"
void init_params()
{
srand((unsigned)time(NULL));
for(int i=0;i<CONV_W_NUM;i++)
{
for(int j=0;j<CONV_W_SIZE;j++)
for(int k=0;k<CONV_W_SIZE;k++)
conv_w[i][j][k]=get_rand(CONV_W_SIZE*CONV_W_SIZE);
conv_b[i]=get_rand(CONV_W_SIZE*CONV_W_SIZE);
}
for(int i=0;i<FC1_SIZE;i++)
{
for(int j=0;j<CONV_W_NUM;j++)
for(int k=0;k<POOL_SIZE;k++)
for(int l=0;l<POOL_SIZE;l++)
fc1_w[i][j][k][l]=get_rand(POOL_SIZE*POOL_SIZE*CONV_W_NUM);
fc1_b[i]=get_rand(POOL_SIZE*POOL_SIZE*CONV_W_NUM);
}
for(int i=0;i<FC2_SIZE;i++)
{
for(int j=0;j<FC1_SIZE;j++)
fc2_w[i][j]=get_rand(FC1_SIZE);
fc2_b[i]=get_rand(FC1_SIZE);
}
} |
12,129 | extern "C"
__global__ void multiply(long n, float *a, float *b, float *output) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n) {
output[i] = a[i] + b[i];
}
} |
12,130 | #include "includes.h"
__global__ void ReturnFloat( float *sum, float *out, const float *pIn )
{
extern __shared__ float s[];
s[threadIdx.x] = pIn[threadIdx.x];
__syncthreads();
(void) atomicAdd( &s[threadIdx.x], *pIn );
__syncthreads();
out[threadIdx.x] = s[threadIdx.x];
} |
12,131 |
// basic int main function stored here for safe keeping.
/*
GpuTimer timer;
// size
int size = 10000;
// number of blocks we will be using
int blocks = size / 512 + 1;
// threads per block, = size of problem / number of blocks
int threads = size / blocks;
// host data
int* host_data = new int[size];
// initialize host
zero_initialize(host_data, size);
// device data
int* device_data = new int[size];
// allocate for device
allocate<int>(&device_data, size);
// initialize device
index_initialize <<< blocks, threads >>>(device_data);
// square device
//square<int><<< 1, size >>> (device_data);
// copy device to host
copy_to_host<int>(host_data, device_data, size);
cudaDeviceReset();
// print contents
print_array(host_data, size);
*/
|
12,132 | /*
============================================================================
Name : NavierStokesSolver.cu
Author : Matt Kennedy
Version : 1.0
Description : Solve the Navier-Stokes over a flat plate
============================================================================
*/
#include <iostream>
#include <numeric>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
using namespace std;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// ================================================================
// ================================================================
// define any configuration parameters needed by both CPU and GPU
// ================================================================
// ================================================================
// note that we'll probably get segfaults if the cpu and gpu variables are set to different values!
// (they didn't seem to be happy trying to set one from the other, so need to manually change both for now)
// (and host code isn't happy about reading from a device global variable)
int jmax = 70;
int kmax = 70;
__constant__ int jmax_d = 70;
__constant__ int kmax_d = 70;
double plateLength = 0.00001;
__constant__ double plateLength_d = 0.00001;
double CFL = 0.2; // can fudge this to help stability
__constant__ double CFL_d = 0.2; // can fudge this to help stability
double M0 = 4.0;
double dx = plateLength / (jmax - 1); // uniform for now
//double dy = plateLength / (kmax - 1);
double dy = 1.1869 * pow(10.0,-7); // calculated from boundary layer
__constant__ double u0_d = 1361.12;
// ================================================================
// ================================================================
// define any global variables needed by the GPU
// ================================================================
// ================================================================
__constant__ double gam_d = 1.4; // seems "gamma" is a protected name from the numeric library
__constant__ double Pr_d = 0.71; // Prandtl number
__constant__ double R_d = 287.0; // specific gas constant
__constant__ double Cv_d = 0.7171*1000; // specific heat capacity of air
__constant__ double Cp_d = 1.006 * 1000; // specific heat capacity of air
__constant__ double mu0_d = 1.7894E-5; // dynamic viscosity of air
__constant__ double T0_d = 288.16;
__constant__ double p0_d = 101325.0;
// ================================================================
// ================================================================
// define any global variables needed by the CPU
// ================================================================
// ================================================================
double a0 = 340.28;
double u0 = M0*a0;
double p0 = 101325.0;
double T0 = 288.16;
double v0 = 0;
double gam = 1.4; // seems "gamma" is a protected name from the numeric library
double Pr = 0.71; // Prandtl number
double R = 287.0; // specific gas constant
double Cv = 0.7171*1000; // specific heat capacity of air
double Cp = 1.006 * 1000; // specific heat capacity of air
double rho0 = p0 / (R * T0);
double e0 = T0 * Cv;
double mu0 = 1.7894E-5; // dynamic viscosity of air
double Re = rho0 * u0 * plateLength / mu0;
__device__ void calc_Q(double* Q_d, double* u_d, double* v_d, double* p_d, double* T_d, int j, int k) {
int ind2 = j*kmax_d + k; // flattened index for our 2d arrays
int ind3_0 = (j + 0*jmax_d)*kmax_d + k; // flattened index for the first dim of our 3d arrays
int ind3_1 = (j + 1*jmax_d)*kmax_d + k; // stack them like extra rows
int ind3_2 = (j + 2*jmax_d)*kmax_d + k;
int ind3_3 = (j + 3*jmax_d)*kmax_d + k;
double rho_val = p_d[ind2] / (R_d * T_d[ind2]);
double e_val = Cv_d * T_d[ind2]; // energy of air based on temp
double Et_val = rho_val * (e_val + 0.5*(u_d[ind2]*u_d[ind2] + v_d[ind2]*v_d[ind2]));
Q_d[ind3_0] = rho_val;
Q_d[ind3_1] = rho_val * u_d[ind2];
Q_d[ind3_2] = rho_val * v_d[ind2];
Q_d[ind3_3] = Et_val;
}
__device__ void heatFluxParameters(double* T_d, double mu_val, bool isPredictor, int j, int k, double dx, double dy, double* q) {
double dTdx;
double dTdy;
if (isPredictor) { // scheme is forward, make this backward
if (j > 0)
dTdx = (T_d[j*kmax_d + k] - T_d[(j-1)*kmax_d + k])/dx;
else if (j == 0)
dTdx = (T_d[(j+1)*kmax_d + k] - T_d[j*kmax_d + k])/dx;
if (k > 0)
dTdy = (T_d[j*kmax_d + k] - T_d[j*kmax_d + (k-1)])/dy;
else if (k == 0)
dTdy = (T_d[j*kmax_d + k + 1] - T_d[j*kmax_d + k])/dy;
}
else { // scheme is backward, make this forward
if (j < jmax_d-1)
dTdx = (T_d[(j+1)*kmax_d + k] - T_d[j*kmax_d + k])/dx;
else if (j == jmax_d - 1)
dTdx = (T_d[j*kmax_d + k] - T_d[(j-1)*kmax_d + k]) / dx;
if (k < kmax_d-1)
dTdy = (T_d[j*kmax_d+k+1] - T_d[j*kmax_d + k]) / dy;
else if (k == kmax_d - 1)
dTdy = (T_d[j*kmax_d + k] - T_d[j*kmax_d + k-1]) / dy;
}
double k_cond = mu_val * Cp_d / Pr_d;
q[0] = -k_cond * dTdx;
q[1] = -k_cond * dTdy;
}
__device__ void shearParameters(double* u_d, double* v_d, double mu, bool isPredictor, int j, int k, double dx, double dy, double* shears) {
// calculate shear for a single location (j,k)
// inputs are assumed to be entire matrices
double dvdx_FB;
double dudx_FB;
double dvdy_FB;
double dudy_FB;
double dvdx_C;
double dudx_C;
double dvdy_C;
double dudy_C;
// calculate the forward or backward differenced versions
if (isPredictor) {
// want opposite direction from scheme step differencing
// scheme is forward, make this backward
if (j > 0) {
dvdx_FB = (v_d[j*kmax_d + k] - v_d[(j-1)*kmax_d + k])/dx;
dudx_FB = (u_d[j*kmax_d + k] - u_d[(j-1)*kmax_d + k])/dx;
}
else {
dvdx_FB = (v_d[(j+1)*kmax_d + k] - v_d[j*kmax_d + k])/dx; // except first point forward
dudx_FB = (u_d[(j+1)*kmax_d + k] - u_d[j*kmax_d + k])/dx; // except first point forward
}
if (k > 0) {
dudy_FB = (u_d[j*kmax_d+k] - u_d[j*kmax_d+k-1])/dy;
dvdy_FB = (v_d[j*kmax_d+k] - v_d[j*kmax_d+k-1])/dy;
}
else {
dudy_FB = (u_d[j*kmax_d+k+1] - u_d[j*kmax_d+k])/dy; // except first point forward
dvdy_FB = (v_d[j*kmax_d+k+1] - v_d[j*kmax_d+k])/dy; // except first point forward
}
}
else {
// scheme is backward, make this forward
if (j < jmax_d - 1) {
dvdx_FB = (v_d[(j+1)*kmax_d + k] - v_d[j*kmax_d + k])/dx;
dudx_FB = (u_d[(j+1)*kmax_d + k] - u_d[j*kmax_d + k])/dx;
}
else {
dvdx_FB = (v_d[j*kmax_d+k] - v_d[(j-1)*kmax_d + k])/dx; // except jmax backward
dudx_FB = (u_d[j*kmax_d+k] - u_d[(j-1)*kmax_d + k])/dx; // except jmax backward
}
if (k < kmax_d-1) {
dudy_FB = (u_d[j*kmax_d + k+1] - u_d[j*kmax_d + k])/dy;
dvdy_FB = (v_d[j*kmax_d + k+1] - v_d[j*kmax_d + k])/dy;
}
else {
dudy_FB = (u_d[j*kmax_d + k] - u_d[j*kmax_d + k-1])/dy; // except kmax backward
dvdy_FB = (v_d[j*kmax_d + k] - v_d[j*kmax_d + k-1])/dy; // except kmax backward
}
}
// and then we want centeral differenced versions
if (j == 0) {
dvdx_C = (v_d[(j+1)*kmax_d + k] - v_d[j*kmax_d + k])/dx;
dudx_C = (u_d[(j+1)*kmax_d + k] - u_d[j*kmax_d + k])/dx;
}
else if (j == jmax_d - 1)
{
dvdx_C = (v_d[j*kmax_d + k] - v_d[(j-1)*kmax_d + k])/dx;
dudx_C = (u_d[j*kmax_d + k] - u_d[(j-1)*kmax_d + k])/dx;
}
else {
dvdx_C = (v_d[(j+1)*kmax_d + k] - v_d[(j-1)*kmax_d + k])/(2*dx);
dudx_C = (u_d[(j+1)*kmax_d + k] - u_d[(j-1)*kmax_d + k])/(2*dx);
}
if (k == 0) {
dudy_C = (u_d[j*kmax_d + k+1] - u_d[j*kmax_d + k])/dy;
dvdy_C = (v_d[j*kmax_d + k+1] - v_d[j*kmax_d + k])/dy;
}
else if (k == kmax_d-1) {
dudy_C = (u_d[j*kmax_d + k] - u_d[j*kmax_d + k-1])/dy;
dvdy_C = (v_d[j*kmax_d + k] - v_d[j*kmax_d + k-1])/dy;
}
else {
dudy_C = (u_d[j*kmax_d + k+1] - u_d[j*kmax_d + k-1])/(2*dy);
dvdy_C = (v_d[j*kmax_d + k+1] - v_d[j*kmax_d + k-1])/(2*dy);
}
// these come from page 65 and 66 in Anderson
double lambda = -(2.0/3.0) * mu; // second viscosity coefficient estimated by Stokes
// use the forward/backward du/dx and central dv/dy for both F and G
double txx = lambda * ( dudx_FB + dvdy_C ) + 2 * mu * dudx_FB;
// use the forward/backward dv/dy and central du/dx for both F and G
double tyy = lambda * ( dudx_C + dvdy_FB ) + 2 * mu * dvdy_FB;
double txy_F = mu * ( dvdx_FB + dudy_C );
double txy_G = mu * ( dvdx_C + dudy_FB );
shears[0] = txx;
shears[1] = tyy;
shears[2] = txy_F;
shears[3] = txy_G;
}
__device__ void calc_FG(double* F_d, double* G_d, double* u_d, double* v_d, double* p_d, double* T_d, bool isPredictor, int j, int k, double dx, double dy) {
int ind2 = j*kmax_d + k; // flattened index for our 2d arrays
int ind3_0 = (j + 0*jmax_d)*kmax_d + k; // flattened index for the first dim of our 3d arrays
int ind3_1 = (j + 1*jmax_d)*kmax_d + k; // stack them like extra rows
int ind3_2 = (j + 2*jmax_d)*kmax_d + k;
int ind3_3 = (j + 3*jmax_d)*kmax_d + k;
double rho_val = p_d[ind2] / (R_d * T_d[ind2]);
double e_val = Cv_d * T_d[ind2]; // energy of air based on temp
double Et_val = rho_val * (e_val + 0.5*(u_d[ind2]*u_d[ind2] + v_d[ind2]*v_d[ind2]));
double mu_val = mu0_d * pow(T_d[ind2] / T0_d, 1.5) * (T0_d + 110)/(T_d[ind2] + 110); // sutherlands law
double q[2];
double shears[4];
heatFluxParameters(T_d, mu_val, isPredictor, j, k, dx, dy, q);
shearParameters(u_d, v_d, mu_val, isPredictor, j, k, dx, dy, shears);
// and unpack these for easier use
double qx = q[0];
double qy = q[1];
double txx = shears[0];
double tyy = shears[1];
double txy_F = shears[2];
double txy_G = shears[3];
F_d[ind3_0] = rho_val * u_d[ind2];
F_d[ind3_1] = rho_val * pow(u_d[ind2],2) + p_d[ind2] - txx;
F_d[ind3_2] = rho_val * u_d[ind2]*v_d[ind2] - txy_F;
F_d[ind3_3] = (Et_val + p_d[ind2]) * u_d[ind2] - u_d[ind2] * txx - v_d[ind2] * txy_F + qx;
G_d[ind3_0] = rho_val * v_d[ind2];
G_d[ind3_1] = rho_val * u_d[ind2] * v_d[ind2] - txy_G;
G_d[ind3_2] = rho_val * pow(v_d[ind2],2) + p_d[ind2] - tyy;
G_d[ind3_3] = (Et_val + p_d[ind2]) * v_d[ind2] - u_d[ind2] * txy_G - v_d[ind2] * tyy + qy;
}
__device__ void MacCormackPredictorUniform(double* Q_pred_d, double* Q_d, double* F_d, double* G_d, double dt, int j, int k, double dx, double dy) {
// DO MACCORMACKS FOR INTERIOR POINTS ONLY
if (j == 0 || k == 0 || j == jmax_d-1 || k == kmax_d-1)
return;
// have each thread calculate all 4 dimensions at a single loc
double flux;
for (int dim=0; dim<4; dim++) {
int ind_this = (j + dim*jmax_d)*kmax_d + k;
int ind_nextJ = (j+1 + dim*jmax_d)*kmax_d + k;
int ind_nextK = (j + dim*jmax_d)*kmax_d + k+1;
flux = (F_d[ind_nextJ] - F_d[ind_this])/dx + (G_d[ind_nextK] - G_d[ind_this])/dy;
Q_pred_d[ind_this] = Q_d[ind_this] - dt * flux;
}
}
__device__ void MacCormackCorrectorUniform(double* Q_pred_d, double* Q_d, double* F_d, double* G_d, double dt, int j, int k, double dx, double dy) {
// DO MACCORMACKS FOR INTERIOR POINTS ONLY
if (j == 0 || k == 0 || j == jmax_d-1 || k == kmax_d-1)
return;
// have each thread calculate all 4 dimensions at a single (j,k) location
double flux;
for (int dim=0; dim<4; dim++) {
int ind_this = (j + dim*jmax_d)*kmax_d + k;
int ind_prevJ = (j-1 + dim*jmax_d)*kmax_d + k;
int ind_prevK = (j + dim*jmax_d)*kmax_d + k-1;
flux = (F_d[ind_this] - F_d[ind_prevJ])/dx + (G_d[ind_this] - G_d[ind_prevK])/dy;
Q_d[ind_this] = 0.5*( Q_d[ind_this] + Q_pred_d[ind_this] - dt*flux );
}
}
__device__ void primativesFromQ(double* Q_d, double* rho_d, double* u_d, double* v_d, double* p_d, double* T_d, double* e_d, int j, int k) {
int ind2 = j*kmax_d + k; // flattened index for our 2d arrays
int ind3_0 = (j + 0*jmax_d)*kmax_d + k; // flattened index for the first dim of our 3d arrays
int ind3_1 = (j + 1*jmax_d)*kmax_d + k; // stack them like extra rows
int ind3_2 = (j + 2*jmax_d)*kmax_d + k;
int ind3_3 = (j + 3*jmax_d)*kmax_d + k;
rho_d[ind2] = Q_d[ind3_0];
u_d[ind2] = Q_d[ind3_1] / Q_d[ind3_0];
v_d[ind2] = Q_d[ind3_2] / Q_d[ind3_0];
e_d[ind2] = Q_d[ind3_3] / Q_d[ind3_0] - 0.5*( pow(u_d[ind2], 2) + pow(v_d[ind2], 2) );
T_d[ind2] = e_d[ind2] / Cv_d;
p_d[ind2] = Q_d[ind3_0] * R_d * T_d[ind2];
}
__device__ void enforceBC_nonSurface(double* u_d, double* v_d, double* p_d, double* T_d, int j, int k) {
// need to first establish all the boundary conditions at the non-surface
// values, and then go back and do the surface boundary conditions
// this is really only needed if the surface goes all the way to the outflow
// so that the last surface point can be interpolated with updated values
int ind = j*kmax_d + k;
if ( j == 0 && k == 0) { // leading edge
u_d[ind] = 0;
v_d[ind] = 0;
p_d[ind] = p0_d;
T_d[ind] = T0_d;
}
else if (j == 0 || k == kmax_d-1) { // inflow from upstream OR upper boundary
u_d[ind] = u0_d;
v_d[ind] = 0;
p_d[ind] = p0_d;
T_d[ind] = T0_d;
}
else if (j == jmax_d-1) { // outflow -- extrapolate from interior values
int ind1 = (j-1)*kmax_d + k;
int ind2 = (j-2)*kmax_d + k;
u_d[ind] = 2*u_d[ind1] - u_d[ind2];
v_d[ind] = 2*v_d[ind1] - v_d[ind2];
p_d[ind] = 2*p_d[ind1] - p_d[ind2];
T_d[ind] = 2*T_d[ind1] - T_d[ind2];
}
}
__device__ void enforceBC_surface(double* u_d, double* v_d, double* p_d, double* T_d, int j, int k) {
// need to first establish all the boundary conditions at the non-surface
// values, and then go back and do the surface boundary conditions
// this is really only needed if the surface goes all the way to the outflow
// so that the last surface point can be interpolated with updated values
int ind = j*kmax_d + k;
if (k == 0 && j > 0){
u_d[ind] = 0;
v_d[ind] = 0;
p_d[ind] = 2*p_d[j*kmax_d + 1] - p_d[j*kmax_d + 2];
T_d[ind] = T_d[j*kmax_d + 1];
}
}
__global__ void iterateScheme_part1(double* x_d, double* y_d, double* u_d, double* v_d, double* p_d, double* T_d, double* rho_d, double* e_d, double* Q_d, double* Q_pred_d, double* F_d, double* G_d, double dx, double dy, double dt) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockIdx.y * blockDim.y + threadIdx.y;
if (j < jmax_d && k < kmax_d)
{
calc_Q(Q_d, u_d, v_d, p_d, T_d, j, k);
bool isPredictor = true;
calc_FG(F_d, G_d, u_d, v_d, p_d, T_d, isPredictor, j, k, dx, dy);
// think we need to actually do different kernel launches here ...
// seems to be no easy way to sync all blocks, and inherently not all blocks may be executed at once if the grid gets too large
// MacCormackPredictorUniform(Q_pred_d, Q_d, F_d, G_d, dt, j, k, dx, dy);
}
}
__global__ void iterateScheme_part2(double* x_d, double* y_d, double* u_d, double* v_d, double* p_d, double* T_d, double* rho_d, double* e_d, double* Q_d, double* Q_pred_d, double* F_d, double* G_d, double dx, double dy, double dt) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockIdx.y * blockDim.y + threadIdx.y;
if (j < jmax_d && k < kmax_d)
{
// think we need to actually do different kernel launches here ...
// seems to be no easy way to sync all blocks, and inherently not all blocks may be executed at once if the grid gets too large
MacCormackPredictorUniform(Q_pred_d, Q_d, F_d, G_d, dt, j, k, dx, dy);
primativesFromQ(Q_pred_d, rho_d, u_d, v_d, p_d, T_d, e_d, j, k);
}
}
__global__ void iterateScheme_part3(double* x_d, double* y_d, double* u_d, double* v_d, double* p_d, double* T_d, double* rho_d, double* e_d, double* Q_d, double* Q_pred_d, double* F_d, double* G_d, double dx, double dy, double dt) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockIdx.y * blockDim.y + threadIdx.y;
if (j < jmax_d && k < kmax_d)
{
enforceBC_nonSurface(u_d, v_d, p_d, T_d, j, k);
}
}
__global__ void iterateScheme_part4(double* x_d, double* y_d, double* u_d, double* v_d, double* p_d, double* T_d, double* rho_d, double* e_d, double* Q_d, double* Q_pred_d, double* F_d, double* G_d, double dx, double dy, double dt) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockIdx.y * blockDim.y + threadIdx.y;
if (j < jmax_d && k < kmax_d)
{
enforceBC_surface(u_d, v_d, p_d, T_d, j, k);
}
}
__global__ void iterateScheme_part5(double* x_d, double* y_d, double* u_d, double* v_d, double* p_d, double* T_d, double* rho_d, double* e_d, double* Q_d, double* Q_pred_d, double* F_d, double* G_d, double dx, double dy, double dt) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockIdx.y * blockDim.y + threadIdx.y;
if (j < jmax_d && k < kmax_d)
{
bool isPredictor = false;
calc_FG(F_d, G_d, u_d, v_d, p_d, T_d, isPredictor, j, k, dx, dy);
}
}
__global__ void iterateScheme_part6(double* x_d, double* y_d, double* u_d, double* v_d, double* p_d, double* T_d, double* rho_d, double* e_d, double* Q_d, double* Q_pred_d, double* F_d, double* G_d, double dx, double dy, double dt) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockIdx.y * blockDim.y + threadIdx.y;
if (j < jmax_d && k < kmax_d)
{
MacCormackCorrectorUniform(Q_pred_d, Q_d, F_d, G_d, dt, j, k, dx, dy);
primativesFromQ(Q_d, rho_d, u_d, v_d, p_d, T_d, e_d, j, k);
}
}
__global__ void iterateScheme_part7(double* x_d, double* y_d, double* u_d, double* v_d, double* p_d, double* T_d, double* rho_d, double* e_d, double* Q_d, double* Q_pred_d, double* F_d, double* G_d, double dx, double dy, double dt) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockIdx.y * blockDim.y + threadIdx.y;
if (j < jmax_d && k < kmax_d)
{
enforceBC_nonSurface(u_d, v_d, p_d, T_d, j, k);
}
}
__global__ void iterateScheme_part8(double* x_d, double* y_d, double* u_d, double* v_d, double* p_d, double* T_d, double* rho_d, double* e_d, double* Q_d, double* Q_pred_d, double* F_d, double* G_d, double dx, double dy, double dt) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockIdx.y * blockDim.y + threadIdx.y;
if (j < jmax_d && k < kmax_d)
{
enforceBC_surface(u_d, v_d, p_d, T_d, j, k);
}
}
double BoundaryLayerThickness() {
return 5 * plateLength / sqrt(Re);
}
void setupGrid(double* x, double* y) {
// just do a uniform grid for now
for (int j=0; j<jmax; j++)
x[j] = j*dx;
for (int k=0; k<kmax; k++)
y[k] = k*dy;
}
void initializePrimatives(double* u, double* v, double* p, double* T, double* rho, double* e) {
for (int j=0; j<jmax; j++) {
for (int k=0; k<kmax; k++) {
u[j*kmax+k] = u0;
v[j*kmax+k] = v0;
p[j*kmax+k] = p0;
T[j*kmax+k] = T0;
rho[j*kmax+k] = rho0;
e[j*kmax+k] = e0;
}
}
}
void applyBC_UpperPlate(double* u, double* v, double* p, double* T) {
// leading edge
u[0*kmax+0] = 0;
v[0*kmax+0] = 0;
p[0*kmax+0] = p0;
T[0*kmax+0] = T0;
// inflow (j=0, k=all)
for (int k=0; k<kmax; k++) {
u[0*kmax+k] = u0;
v[0*kmax+k] = 0;
p[0*kmax+k] = p0;
T[0*kmax+k] = T0;
}
// upper boundary (j=all, k=kmax-1)
for (int j=0; j<jmax; j++) {
u[j*kmax+kmax-1] = u0;
v[j*kmax+kmax-1] = 0;
p[j*kmax+kmax-1] = p0;
T[j*kmax+kmax-1] = T0;
}
// outflow (j=jmax-1, k=all)
// extrapolate from interior values
for (int k=0; k<kmax; k++) {
u[(jmax-1)*kmax + k] = 2*u[(jmax-2)*kmax + k] - u[(jmax-3)*kmax + k];
v[(jmax-1)*kmax + k] = 2*v[(jmax-2)*kmax + k] - v[(jmax-3)*kmax + k];
p[(jmax-1)*kmax + k] = 2*p[(jmax-2)*kmax + k] - p[(jmax-3)*kmax + k];
T[(jmax-1)*kmax + k] = 2*T[(jmax-2)*kmax + k] - T[(jmax-3)*kmax + k];
}
// and plate surface (j=all, k=0)
for (int j=0; j<jmax; j++) {
u[j*kmax + 0] = 0;
v[j*kmax + 0] = 0;
p[j*kmax + 0] = 2*p[j*kmax + 1] - p[j*kmax + 2];
T[j*kmax + 0] = T[j*kmax + 1];
}
}
double calc_dt(double* u, double* v, double* p, double* T, double dx, double dy) {
// not sure the best way to do this on the GPU
// seems to be some parallel reduce functions which can be called as their own kernels
// which would at least prevent us from having to copy back the primative variables to the host every iteration
// but lets not worry about that for now
double rho_val;
double mu_val;
double temp_val;
double vprime = -INFINITY;
for (int j=0; j<jmax; j++) {
for (int k=0; k<kmax; k++) {
int ind = j*kmax + k;
rho_val = p[ind] / (R * T[ind]);
mu_val = mu0 * pow(T[ind]/T0, 1.5) * (T0 + 110)/(T[ind] + 110);
temp_val = (4/3) * mu_val * (gam * mu_val / Pr) / rho_val; // find the max of this
if (temp_val > vprime)
vprime = temp_val;
}
}
double spaceUnit = pow( 1/(dx*dx) + 1/(dy*dy), 0.5 );
double term1;
double term2;
double term3;
double term4;
double dt_cfl;
double dt = INFINITY;
for (int j=0; j<jmax; j++) {
for (int k=0; k<kmax; k++) {
int ind = j*kmax + k;
rho_val = p[ind] / (R * T[ind]);
term1 = abs( u[ind] ) / dx;
term2 = abs( v[ind] ) / dy;
term3 = pow( gam*p[ind]/rho_val, 0.5 ) * spaceUnit;
term4 = 2 * vprime * pow(spaceUnit, 2);
dt_cfl = 1/(term1 + term2 + term3 + term4);
if (CFL*dt_cfl < dt)
dt = CFL*dt_cfl;
}
}
return dt;
}
void arrayToCSV(double* values, char* filename, int numDims) {
FILE *fp;
fp = fopen(filename, "w+");
for (int dim=0; dim<numDims; dim++) {
for (int j=0; j<jmax; j++) {
for (int k=0; k<kmax; k++) {
fprintf(fp, ", %f", values[(j + dim*jmax)*kmax + k]);
}
fprintf(fp, "\n");
}
if (dim < numDims-1)
fprintf(fp, "Dimension Starting: %i\n", dim+1);
}
}
int main(void)
{
double* x = (double*)malloc(jmax*sizeof(double));
double* y = (double*)malloc(kmax*sizeof(double));
double* u = (double*)malloc( jmax*kmax*sizeof(double) );
double* v = (double*)malloc( jmax*kmax*sizeof(double) );
double* p = (double*)malloc( jmax*kmax*sizeof(double) );
double* T = (double*)malloc( jmax*kmax*sizeof(double) );
double* rho = (double*)malloc( jmax*kmax*sizeof(double) );
double* e = (double*)malloc( jmax*kmax*sizeof(double) );
initializePrimatives(u, v, p, T, rho, e);
applyBC_UpperPlate(u, v, p, T);
// technically only needed in GPU memory, but I assume we may want to copy back intermediate results for debugging
// calculating these will be a main component of what's being done in parallel, so don't need to initialize anything
double* Q = (double*)malloc( 4*jmax*kmax*sizeof(double));
double* Q_pred = (double*)malloc( 4*jmax*kmax*sizeof(double));
double* F = (double*)malloc( 4*jmax*kmax*sizeof(double));
double* G = (double*)malloc( 4*jmax*kmax*sizeof(double));
// iniitialize and allocate device variables
double* x_d;
double* y_d;
double* u_d;
double* v_d;
double* p_d;
double* T_d;
double* rho_d;
double* e_d;
double* Q_d;
double* Q_pred_d;
double* F_d;
double* G_d;
cudaError_t err;
err = cudaMalloc((void**)&x_d, jmax*kmax*sizeof(double) );
err = cudaMalloc((void**)&y_d, jmax*kmax*sizeof(double) );
err = cudaMalloc((void**)&u_d, jmax*kmax*sizeof(double) );
err = cudaMalloc((void**)&v_d, jmax*kmax*sizeof(double) );
err = cudaMalloc((void**)&p_d, jmax*kmax*sizeof(double) );
err = cudaMalloc((void**)&T_d, jmax*kmax*sizeof(double) );
err = cudaMalloc((void**)&rho_d, jmax*kmax*sizeof(double) );
err = cudaMalloc((void**)&e_d, jmax*kmax*sizeof(double) );
// these are all 3d arrays
err = cudaMalloc((void**)&Q_d, 4*jmax*kmax*sizeof(double) );
err = cudaMalloc((void**)&Q_pred_d, 4*jmax*kmax*sizeof(double) );
err = cudaMalloc((void**)&F_d, 4*jmax*kmax*sizeof(double) );
err = cudaMalloc((void**)&G_d, 4*jmax*kmax*sizeof(double) );
err = cudaMemcpy(x_d, x, jmax*kmax*sizeof(double), cudaMemcpyHostToDevice);
err = cudaMemcpy(y_d, y, jmax*kmax*sizeof(double), cudaMemcpyHostToDevice);
err = cudaMemcpy(u_d, u, jmax*kmax*sizeof(double), cudaMemcpyHostToDevice);
err = cudaMemcpy(v_d, v, jmax*kmax*sizeof(double), cudaMemcpyHostToDevice);
err = cudaMemcpy(p_d, p, jmax*kmax*sizeof(double), cudaMemcpyHostToDevice);
err = cudaMemcpy(T_d, T, jmax*kmax*sizeof(double), cudaMemcpyHostToDevice);
err = cudaMemcpy(rho_d, rho, jmax*kmax*sizeof(double), cudaMemcpyHostToDevice);
gpuErrchk( cudaMemcpy(e_d, e, jmax*kmax*sizeof(double), cudaMemcpyHostToDevice) );
dim3 threadsPerBlock(16,16);
dim3 numBlocks(jmax/threadsPerBlock.x + 1, kmax/threadsPerBlock.y + 1);
// so to force the threads to sync I think it's safer to just do the different stages in different kernel calls, at least initially
int maxIter = 1000;
for (int iter=0; iter<maxIter; iter++) {
printf("Calculating iteration %i / %i\n", iter+1, maxIter);
double dt = calc_dt(u, v, p, T, dx, dy);
// calculate F, G, and Q
iterateScheme_part1<<<numBlocks, threadsPerBlock>>> (x_d, y_d, u_d, v_d, p_d, T_d, rho_d, e_d, Q_d, Q_pred_d, F_d, G_d, dx, dy, dt);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// calculate MacCormack's Predictor and get back primatives out of Q
iterateScheme_part2<<<numBlocks, threadsPerBlock>>> (x_d, y_d, u_d, v_d, p_d, T_d, rho_d, e_d, Q_d, Q_pred_d, F_d, G_d, dx, dy, dt);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// enforce boundary conditions at non-surface points
iterateScheme_part3<<<numBlocks, threadsPerBlock>>> (x_d, y_d, u_d, v_d, p_d, T_d, rho_d, e_d, Q_d, Q_pred_d, F_d, G_d, dx, dy, dt);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// enforce boundary conditions at surface points
iterateScheme_part4<<<numBlocks, threadsPerBlock>>> (x_d, y_d, u_d, v_d, p_d, T_d, rho_d, e_d, Q_d, Q_pred_d, F_d, G_d, dx, dy, dt);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// update F and G for corrected primatives
iterateScheme_part5<<<numBlocks, threadsPerBlock>>> (x_d, y_d, u_d, v_d, p_d, T_d, rho_d, e_d, Q_d, Q_pred_d, F_d, G_d, dx, dy, dt);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// calculate MacCormack's Corrector and get back primatives out of Q
iterateScheme_part6<<<numBlocks, threadsPerBlock>>> (x_d, y_d, u_d, v_d, p_d, T_d, rho_d, e_d, Q_d, Q_pred_d, F_d, G_d, dx, dy, dt);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// enforce boundary conditions at non-surface points
iterateScheme_part7<<<numBlocks, threadsPerBlock>>> (x_d, y_d, u_d, v_d, p_d, T_d, rho_d, e_d, Q_d, Q_pred_d, F_d, G_d, dx, dy, dt);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// enforce boundary conditions at surface points
iterateScheme_part8<<<numBlocks, threadsPerBlock>>> (x_d, y_d, u_d, v_d, p_d, T_d, rho_d, e_d, Q_d, Q_pred_d, F_d, G_d, dx, dy, dt);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
cudaMemcpy(u, u_d, jmax*kmax*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(v, v_d, jmax*kmax*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(p, p_d, jmax*kmax*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(T, T_d, jmax*kmax*sizeof(double), cudaMemcpyDeviceToHost);
}
cudaMemcpy(F, F_d, 4*jmax*kmax*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(G, G_d, 4*jmax*kmax*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(Q, Q_d, 4*jmax*kmax*sizeof(double), cudaMemcpyDeviceToHost);
arrayToCSV(u, "u.csv", 1);
arrayToCSV(v, "v.csv", 1);
arrayToCSV(p, "p.csv", 1);
arrayToCSV(T, "T.csv", 1);
arrayToCSV(F, "F.csv", 4);
arrayToCSV(G, "G.csv", 4);
arrayToCSV(Q, "Q.csv", 4);
free(x);
free(y);
free(u);
free(v);
free(p);
free(T);
free(rho);
free(e);
free(Q);
free(Q_pred);
free(F);
free(G);
cudaFree(x_d);
cudaFree(y_d);
cudaFree(u_d);
cudaFree(v_d);
cudaFree(p_d);
cudaFree(T_d);
cudaFree(rho_d);
cudaFree(e_d);
cudaFree(Q_d);
cudaFree(Q_pred_d);
cudaFree(F_d);
cudaFree(G_d);
printf("Finishing!\n");
return 0;
}
|
12,133 | #include "includes.h"
__global__ void MarkMerges(int size, int* desiredMerges, int* merging, int* mergesToMake, int* incomplete) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
// Find what aggregate this one wants to merge with
int desiredMerge = desiredMerges[idx];
// If this aggregate has a real potential merger:
if (desiredMerge >= 0)
{
// If the aggregates agree to merge mark as merging
if (desiredMerges[desiredMerge] == idx)
{
// Mark the merge as the higher indexed aggregate merging into lower
if (desiredMerge > idx)
mergesToMake[desiredMerge] = idx;
else
mergesToMake[idx] = desiredMerge;
// Mark both aggregates as merging
merging[idx] = 1;
merging[desiredMerge] = 1;
}
// Otherwise mark incomplete to check again
else
{
incomplete[0] = 1;
}
}
}
} |
12,134 | #include <cuda.h>
#include <bits/stdc++.h>
#define BLOCK_SIZE 32
using namespace std;
void fill_matrix_random(int *mat, int rows, int cols){
for (int i = 0; i < rows; i++){
for (int j = 0; j < cols; j++){
mat[i * cols + j] = rand() % 5;
}
}
}
bool check_matrix(int *A, int *B, int rows, int cols){
for (int i = 0; i < rows; i++){
for (int j = 0; j < cols; j++){
if (A[i * cols + j] != B[i * cols +j])
return false;
}
}
return true;
}
void print_matrix(int *mat, int rows, int cols){
cout << "------------" << endl;
for (int i = 0; i < rows; i++){
for (int j = 0; j < cols; j++){
cout << mat[i * cols + j] << " ";
}
cout << endl;
}
cout << "------------" << endl;
}
void mat_mul_seq(int *m_A, int *m_B, int *m_C, int A_rows, int A_cols, int B_rows, int B_cols){
int sum;
for(int i = 0; i < A_rows; i++){
for (int j = 0; j < B_cols; j++){
sum = 0;
for (int k = 0; k < A_cols; k++){
sum += m_A[i * A_cols + k] * m_B[k * B_cols + j];
}
m_C[i * B_cols + j] = sum;
}
}
}
__global__ void mat_mul_kernel(int *m_A, int *m_B, int *m_C, int A_rows, int A_cols, int B_rows, int B_cols){
int sum = 0;
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
if(row < A_rows && col < B_cols){
for(int i = 0; i < A_cols; i ++){
sum += m_A[row * A_cols + i] * m_B[i * B_cols + col];
}
m_C[row * B_cols + col] = sum;
}
}
__global__ void mat_mul_kernel_tiled(int *m_A, int *m_B, int *m_C, int A_rows, int A_cols, int B_rows, int B_cols){
__shared__ int s_A[BLOCK_SIZE][BLOCK_SIZE], s_B[BLOCK_SIZE][BLOCK_SIZE];
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
int threadRow = threadIdx.y;
int threadCol = threadIdx.x;
int row = blockRow * BLOCK_SIZE + threadRow;
int col = blockCol * BLOCK_SIZE + threadCol;
int sum = 0;
for (int sm = 0; sm < ceil(A_cols / float (BLOCK_SIZE)); sm++){
if (row < A_rows && (sm * BLOCK_SIZE + threadCol) < A_cols){
s_A[threadRow][threadCol] = m_A[(row) * A_cols + (sm * BLOCK_SIZE + threadCol)];
} else{
s_A[threadRow][threadCol] = 0;
}
if (col < B_cols && (threadRow + sm * BLOCK_SIZE) < B_rows){
s_B[threadRow][threadCol] = m_B[(threadRow + sm * BLOCK_SIZE) * B_cols + (col)];
} else{
s_B[threadRow][threadCol] = 0;
}
__syncthreads();
for (int i = 0; i < BLOCK_SIZE; i++){
sum += s_A[threadRow][i] * s_B[i][threadCol];
}
__syncthreads();
if (row < A_rows && col < B_cols)
m_C[row * B_cols + col] = sum;
}
}
void mat_mul_con(int *m_A, int *m_B, int *m_C, int A_rows, int A_cols, int B_rows, int B_cols){
int A_size = A_rows * A_cols * sizeof(int);
int B_size = B_rows * B_cols * sizeof(int);
int C_size = A_rows * B_cols * sizeof(int);
int *d_A, *d_B, *d_C;
//1. Allocate memory for d_A, etc. on the device (cudaMalloc)
cudaMalloc(&d_A, A_size);
cudaMalloc(&d_B, B_size);
cudaMalloc(&d_C, C_size);
//2. Copy Data from host to d_A, etc. (cudaMemcpy)
cudaMemcpy(d_A, m_A, A_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, m_B, B_size, cudaMemcpyHostToDevice);
//3. Kernel Launch Code
dim3 dimGrid(ceil(max(A_cols, B_cols) / float(BLOCK_SIZE)), ceil(max(A_rows, B_rows) / float(BLOCK_SIZE)), 1);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1);
mat_mul_kernel<<<dimGrid, dimBlock>>> (d_A, d_B, d_C, A_rows, A_cols, B_rows, B_cols);
cudaDeviceSynchronize();
//4. Copy d_C to C from device, free device memory (cusdaFree), sync if neccessary
cudaMemcpy (m_C, d_C, C_size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
void mat_mul_con_tiled(int *m_A, int *m_B, int *m_C, int A_rows, int A_cols, int B_rows, int B_cols){
int A_size = A_rows * A_cols * sizeof(int);
int B_size = B_rows * B_cols * sizeof(int);
int C_size = A_rows * B_cols * sizeof(int);
int *d_A, *d_B, *d_C;
//1. Allocate memory for d_A, etc. on the device (cudaMalloc)
cudaMalloc(&d_A, A_size);
cudaMalloc(&d_B, B_size);
cudaMalloc(&d_C, C_size);
//2. Copy Data from host to d_A, etc. (cudaMemcpy)
cudaMemcpy(d_A, m_A, A_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, m_B, B_size, cudaMemcpyHostToDevice);
//3. Kernel Launch Code
dim3 dimGrid(ceil(max(A_cols, B_cols) / float(BLOCK_SIZE)), ceil(max(A_rows, B_rows) / float(BLOCK_SIZE)), 1);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1);
mat_mul_kernel_tiled<<<dimGrid, dimBlock>>> (d_A, d_B, d_C, A_rows, A_cols, B_rows, B_cols);
cudaDeviceSynchronize();
//4. Copy d_C to C from device, free device memory (cusdaFree), sync if neccessary
cudaMemcpy (m_C, d_C, C_size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
int main(int argc, char **argv){
if (argc < 5){
cout << "Usage: ./mul max_number step offset_A, offset_B" << endl;
return 0;
}
const int max_number = atoi(argv[1]),
step = atoi(argv[2]),
offset_A = atoi(argv[3]),
offset_B = atoi(argv[4]);
srand (time(NULL));
ofstream x("x.mio"),
y_seq("y_seq.mio"),
y_con("y_con.mio"),
y_con_tiled("y_con_tiled.mio");
clock_t begin, end;
double elapsed_secs;
for (int i = step; i <= max_number; i += step){
int *A, *B, *C, *D;
A = (int*) malloc((i + offset_A) * i * sizeof(int));
B = (int*) malloc((i + offset_B) * i * sizeof(int));
C = (int*) malloc((i + offset_A) * (i + offset_B) * sizeof(int));
D = (int*) malloc((i + offset_A) * (i + offset_B) * sizeof(int));
x << i << endl;
fill_matrix_random(A, i + offset_A, i);
fill_matrix_random(B, i, i + offset_B);
begin = clock();
mat_mul_seq(A, B, C, i + offset_A, i, i, i + offset_B);
end = clock();
elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
y_seq << elapsed_secs << endl;
begin = clock();
mat_mul_con(A, B, D, i + offset_A, i, i, i + offset_B);
end = clock();
elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
y_con << elapsed_secs << endl;
cout << "-----------" << endl << "Not Tiled: ";
if (check_matrix(C, D, i + offset_A, i + offset_B))
cout << "All good" << endl;
else
cout << "Something Went Wrong" << endl;
cout << "-----------" << endl;
begin = clock();
mat_mul_con_tiled(A, B, D, i + offset_A, i, i, i + offset_B);
end = clock();
elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
y_con_tiled << elapsed_secs << endl;
cout << "Tiled: ";
if (check_matrix(C, D, i + offset_A, i + offset_B))
cout << "All good" << endl;
else
cout << "Something Went Wrong ARow " << i + offset_A << " ACol " << i << " BCol " << i + offset_B << endl;
cout << "-----------" << endl;
free(A);
free(B);
free(C);
free(D);
}
return 0;
}
|
12,135 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define CLOCKS_PAR_SEC 1000000l
typedef int vector_t;
#define N 8192
/************************************************************************/
/* Example */
/************************************************************************/
__global__ void add_matrix(vector_t *a, vector_t *b, vector_t *c)
{
int tx = threadIdx.x + blockIdx.x * blockDim.x;
c[tx] = a[tx] + b[tx];
}
/************************************************************************/
/* Main */
/************************************************************************/
int main(int argc, char* argv[])
{
vector_t *a = new vector_t[N];
vector_t *b = new vector_t[N];
vector_t *c = new vector_t[N];
for ( int i = 0; i < N; ++i )
{
a[i] = 3;
b[i] = 2;
}
vector_t *ad, *bd, *cd;
const int size = N*sizeof(vector_t);
cudaMalloc( (vector_t**)&ad, size );
cudaMalloc( (vector_t**)&bd, size );
cudaMalloc( (vector_t**)&cd, size );
/* mesure du temps d'execution */
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
/* Copie des données vers le GPU */
cudaMemcpy( ad, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, size, cudaMemcpyHostToDevice );
dim3 dimBlock ( N/512, 1 );
dim3 dimGrid ( 512, 1 );
/* execution de l'opération sur GPU */
add_matrix<<<dimGrid, dimBlock>>>( ad, bd, cd);
cudaMemcpy( c, cd, size, cudaMemcpyDeviceToHost );
/* Fin de la mesure du temps d'execution du programme */
cudaEventRecord(stop, 0);
cudaEventSynchronize( stop );
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree( ad );
cudaFree( bd );
cudaFree( cd );
/* vérification des résultats */
for (int i=0; i<N; i++)
{
if (c[i] != 5)
{
printf("erreur à l'adresse %d \n", i);
printf("c[%d] = %d \n", i, c[i] );
getchar();
return 0;
}
}
/* affichage du temps d'execution */
printf("temps écoule sur GPU : %f ms \n", time);
/**********************************************
execution de la même opération sur CPU
**********************************************/
int j=0;
clock_t t1, t2;
double tempsCPU;
t1 = clock();
/* execution de l'opération sur CPU */
for (j=0; j<1000; j++)
{
for (int i=0; i<N; i++)
c[i] = a[i] + b[i];
}
t2 = clock();
tempsCPU = (double)difftime(t2, t1)/(double)CLOCKS_PAR_SEC;
/* affichage du temps d'execution */
printf("temps écoule sur CPU: %f ms \n", tempsCPU * 1000.0 / j);
getchar();
delete[] a;
delete[] b;
delete[] c;
return EXIT_SUCCESS;
}
|
12,136 | #include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <math.h>
// to build on Titan V:
// nvcc -arch=sm_70 --ptxas-options=-v -o vanilladeriv vanilladeriv.cu;
#ifdef USE_DOUBLE
#define dfloat double
#else
#define dfloat float
#endif
#ifndef POLYNOMIAL_ORDER
#define POLYNOMIAL_ORDER 4
#endif
// note the order of the fields below is also assumed in the code.
const int64_t _nstate = 5;
const int64_t _R = 0, _U = 1, _V = 2, _W = 3, _E = 4;
const int64_t _nvgeo = 14;
const int64_t _XIx = 0;
const int64_t _ETAx = 1;
const int64_t _ZETAx = 2;
const int64_t _XIy = 3;
const int64_t _ETAy = 4;
const int64_t _ZETAy = 5;
const int64_t _XIz = 6;
const int64_t _ETAz = 7;
const int64_t _ZETAz = 8;
const int64_t _MJ = 9;
const int64_t _MJI = 10;
const int64_t _x = 11;
const int64_t _y = 12;
const int64_t _z = 13;
#define grav ((dfloat) 9.81)
#define gdm1 ((dfloat) 0.4)
template <int64_t Nq, int64_t Np, int64_t nvar>
__global__ void volumerhs(dfloat * __restrict__ rhs,
const dfloat * __restrict__ Q,
const dfloat * __restrict__ vgeo,
const dfloat gravity,
const dfloat * __restrict__ D,
const int64_t nelem){
__shared__ dfloat s_D[Nq][Nq];
__shared__ dfloat s_F[Nq][Nq][_nstate];
__shared__ dfloat s_G[Nq][Nq][_nstate];
dfloat r_rhsR[Nq];
dfloat r_rhsU[Nq];
dfloat r_rhsV[Nq];
dfloat r_rhsW[Nq];
dfloat r_rhsE[Nq];
int64_t e = blockIdx.x;
int64_t j = threadIdx.y;
int64_t i = threadIdx.x;
s_D[j][i] = D[j*Nq+i];
#pragma unroll Nq
for(int64_t k=0;k<Nq;++k){
r_rhsR[k] = 0;
r_rhsU[k] = 0;
r_rhsV[k] = 0;
r_rhsW[k] = 0;
r_rhsE[k] = 0;
}
#pragma unroll Nq
for(int64_t k=0;k<Nq;++k){
__syncthreads();
// Load values will need int64_to registers
int64_t gid = i + j*Nq + k*Nq*Nq + e*Np*_nvgeo;
dfloat MJ = vgeo[gid + _MJ*Np];
dfloat XIx = vgeo[gid + _XIx*Np];
dfloat XIy = vgeo[gid + _XIy*Np];
dfloat XIz = vgeo[gid + _XIz*Np];
dfloat ETAx = vgeo[gid + _ETAx*Np];
dfloat ETAy = vgeo[gid + _ETAy*Np];
dfloat ETAz = vgeo[gid + _ETAz*Np];
dfloat ZETAx = vgeo[gid + _ZETAx*Np];
dfloat ZETAy = vgeo[gid + _ZETAy*Np];
dfloat ZETAz = vgeo[gid + _ZETAz*Np];
dfloat z = vgeo[gid + _z*Np];
int64_t qid = i + j*Nq + k*Nq*Nq + e*Np*nvar;
dfloat R = Q[qid + _R*Np];
dfloat U = Q[qid + _U*Np];
dfloat V = Q[qid + _V*Np];
dfloat W = Q[qid + _W*Np];
dfloat E = Q[qid + _E*Np];
dfloat P = gdm1*(E - (U*U + V*V + W*W)/(2*R) - R*gravity*z);
dfloat Rinv = 1 / R;
dfloat fluxR_x = U;
dfloat fluxU_x = Rinv * U * U + P;
dfloat fluxV_x = Rinv * U * V;
dfloat fluxW_x = Rinv * U * W;
dfloat fluxE_x = Rinv * U * (E + P);
dfloat fluxR_y = V;
dfloat fluxU_y = Rinv * V * U;
dfloat fluxV_y = Rinv * V * V + P;
dfloat fluxW_y = Rinv * V * W;
dfloat fluxE_y = Rinv * V * (E + P);
dfloat fluxR_z = W;
dfloat fluxU_z = Rinv * W * U;
dfloat fluxV_z = Rinv * W * V;
dfloat fluxW_z = Rinv * W * W + P;
dfloat fluxE_z = Rinv * W * (E + P);
s_F[i][j][ _R] = MJ * (XIx * fluxR_x + XIy * fluxR_y + XIz * fluxR_z);
s_F[i][j][ _U] = MJ * (XIx * fluxU_x + XIy * fluxU_y + XIz * fluxU_z);
s_F[i][j][ _V] = MJ * (XIx * fluxV_x + XIy * fluxV_y + XIz * fluxV_z);
s_F[i][j][ _W] = MJ * (XIx * fluxW_x + XIy * fluxW_y + XIz * fluxW_z);
s_F[i][j][ _E] = MJ * (XIx * fluxE_x + XIy * fluxE_y + XIz * fluxE_z);
s_G[i][j][ _R] = MJ * (ETAx * fluxR_x + ETAy * fluxR_y + ETAz * fluxR_z);
s_G[i][j][ _U] = MJ * (ETAx * fluxU_x + ETAy * fluxU_y + ETAz * fluxU_z);
s_G[i][j][ _V] = MJ * (ETAx * fluxV_x + ETAy * fluxV_y + ETAz * fluxV_z);
s_G[i][j][ _W] = MJ * (ETAx * fluxW_x + ETAy * fluxW_y + ETAz * fluxW_z);
s_G[i][j][ _E] = MJ * (ETAx * fluxE_x + ETAy * fluxE_y + ETAz * fluxE_z);
dfloat r_HR = MJ * (ZETAx * fluxR_x + ZETAy * fluxR_y + ZETAz * fluxR_z);
dfloat r_HU = MJ * (ZETAx * fluxU_x + ZETAy * fluxU_y + ZETAz * fluxU_z);
dfloat r_HV = MJ * (ZETAx * fluxV_x + ZETAy * fluxV_y + ZETAz * fluxV_z);
dfloat r_HW = MJ * (ZETAx * fluxW_x + ZETAy * fluxW_y + ZETAz * fluxW_z);
dfloat r_HE = MJ * (ZETAx * fluxE_x + ZETAy * fluxE_y + ZETAz * fluxE_z);
// one shared access per 10 flops
#pragma unroll Nq
for(int64_t n=0;n<Nq;++n){
dfloat Dnk = s_D[n][k];
r_rhsR[n] += Dnk * r_HR;
r_rhsU[n] += Dnk * r_HU;
r_rhsV[n] += Dnk * r_HV;
r_rhsW[n] += Dnk * r_HW;
r_rhsE[n] += Dnk * r_HE;
}
r_rhsW[k] -= MJ * R * gravity;
__syncthreads();
// loop of XI-grid lines
#pragma unroll Nq
for(int64_t n=0;n<Nq;++n){
dfloat Dni = s_D[n][i];
dfloat Dnj = s_D[n][j];
r_rhsR[k] += Dni * s_F[n][j][_R];
r_rhsR[k] += Dnj * s_G[i][n][_R];
r_rhsU[k] += Dni * s_F[n][j][_U];
r_rhsU[k] += Dnj * s_G[i][n][_U];
r_rhsV[k] += Dni * s_F[n][j][_V];
r_rhsV[k] += Dnj * s_G[i][n][_V];
r_rhsW[k] += Dni * s_F[n][j][_W];
r_rhsW[k] += Dnj * s_G[i][n][_W];
r_rhsE[k] += Dni * s_F[n][j][_E];
r_rhsE[k] += Dnj * s_G[i][n][_E];
}
}
#pragma unroll Nq
for(int64_t k=0;k<Nq;++k){
int64_t gid = i + j*Nq + k*Nq*Nq + e*Np*_nvgeo;
dfloat MJI = vgeo[gid + _MJI*Np];
int64_t qid = i + j*Nq + k*Nq*Nq + e*Np*nvar;
rhs[qid+_U*Np] += MJI*r_rhsU[k];
rhs[qid+_V*Np] += MJI*r_rhsV[k];
rhs[qid+_W*Np] += MJI*r_rhsW[k];
rhs[qid+_R*Np] += MJI*r_rhsR[k];
rhs[qid+_E*Np] += MJI*r_rhsE[k];
}
}
void randArray(int64_t N, dfloat base, dfloat range, dfloat **q, dfloat **c_q){
*q = (dfloat*) calloc(N, sizeof(dfloat));
cudaMalloc(c_q, N*sizeof(dfloat));
for(int64_t n=0;n<N;++n){
q[0][n] = base + drand48()*range;
}
cudaMemcpy(c_q[0], q[0], N*sizeof(dfloat), cudaMemcpyHostToDevice);
}
int main(int argc, char **argv){
srand48(1234);
const int64_t N = POLYNOMIAL_ORDER;
const int64_t nelem = 4000;
const int64_t Nq = N+1;
const int64_t Np = Nq*Nq*Nq;
const int64_t Ntotal = Np*nelem*_nstate;
dfloat *Q, *c_Q;
randArray(Ntotal, 0., 1., &Q, &c_Q);
for(int64_t e=0;e<nelem;++e){
for(int64_t n=0;n<Np;++n){
int64_t idR = n + _R*Np + e*_nstate*Np;
int64_t idE = n + _E*Np + e*_nstate*Np;
Q[idR] += 2.;
Q[idE] += 20.;
}
}
cudaMemcpy(c_Q, Q, nelem*_nstate*Np*sizeof(dfloat), cudaMemcpyHostToDevice);
const int64_t Gtotal = Np*nelem*_nvgeo;
dfloat *vgeo, *c_vgeo;
randArray(Gtotal, 0, 1., &vgeo, &c_vgeo);
// Make sure the entries of the mass matrix satisfy the inverse relation
for(int64_t e=0;e<nelem;++e){
for(int64_t n=0;n<Np;++n){
int64_t idMJ = n + _MJ*Np + e*_nvgeo*Np;
int64_t idMJI = n + _MJI*Np + e*_nvgeo*Np;
vgeo[idMJ] += 3;
vgeo[idMJI] = 1./vgeo[idMJ];
}
}
cudaMemcpy(c_vgeo, vgeo, nelem*_nvgeo*Np*sizeof(dfloat), cudaMemcpyHostToDevice);
dfloat *D, *c_D;
randArray(Nq*Nq, 1., 1., &D, &c_D);
dfloat *rhs, *c_rhs;
srand48(1234);
randArray(Ntotal, 1., 1., &rhs, &c_rhs);
dim3 G(nelem,1,1);
dim3 B2(Nq,Nq,Nq);
dim3 B3(Nq,Nq,1);
volumerhs<Nq, Np, _nstate> <<< G, B3 >>> (c_rhs, c_Q, c_vgeo, grav, c_D, nelem);
cudaDeviceSynchronize();
exit(0);
return 0;
}
|
12,137 | #include <iostream>
#include <ostream>
#include <sstream>
#include <iomanip>
#include <stdio.h>
#include <vector>
#include <fstream>
const unsigned int field_size = 53;
const unsigned int step = 100;
__device__ unsigned int d_field_size;
__device__ float d_dx;
__device__ float d_a;
__device__ float d_w;
__device__ float d_beta;
__device__ float d_tau;
__device__ float d_r0;
__global__ void calc_step(float *d_phase, float *d_phase_tmp) {
int x_i = blockIdx.x * blockDim.x + threadIdx.x;
int y_i = blockIdx.y * blockDim.y + threadIdx.y;
if (x_i <= 0 || x_i >= d_field_size - 1 || y_i <= 0 || y_i >= d_field_size - 1) return;
int i = y_i * d_field_size + x_i;
float ddx = d_dx * d_dx;
float rpx = (d_phase[i + 1] - 2.* d_phase[i] + d_phase[i - 1]) / ddx;
float rpy = (d_phase[i + d_field_size] - 2. * d_phase[i] + d_phase[i - d_field_size]) / ddx;
float dpi1 = d_a * d_a * (rpx + rpy);
float dpi2 = 4. * d_w * d_phase[i] * (1 - d_phase[i]) * (d_phase[i] - .5 + d_beta);
float dpi = dpi1 + dpi2;
d_phase_tmp[i] = d_phase[i] + d_tau * dpi;
}
__global__ void set_bc(float *field) {
int x_i = blockIdx.x * blockDim.x + threadIdx.x;
if ( x_i >= field_size - 2) return;
int i = x_i + 1;
// top
field[i] = field[i+field_size];
// bottom
field[field_size * (field_size - 1) + i] = field[field_size * (field_size - 2) + i];
// left
field[field_size * i] = field[field_size * i + 1];
// right
field[field_size * (i + 1) - 1] = field[field_size * (i + 1) - 2];
return;
}
__global__ void init_field(float *field) {
int x_i = blockIdx.x * blockDim.x + threadIdx.x;
int y_i = blockIdx.y * blockDim.y + threadIdx.y;
if (x_i <= 0 || x_i >= d_field_size - 1 || y_i <= 0 || y_i >= d_field_size - 1) {
return;
}
int i = y_i * d_field_size + x_i;
float y = (y_i - 1) * d_dx;
float x = (x_i - 1) * d_dx;
float r = sqrt(x*x + y*y) - d_r0;
field[i] = .5 * (1. - tanh(sqrt(2. * d_w) / (2. * d_a) * r));
printf("%f\n", field[i]);
return;
}
bool save(float *phase, unsigned int n) {
try {
std::ofstream file;
std::ostringstream filename;
filename << "datas/step_" << std::setfill('0') << std::right << std::setw(std::log10(step)+1) << n << ".dat";
file.open(filename.str(), std::ios_base::app);
file << "#x #y #phase" << std::endl;
// remove boundaries
for (unsigned int y_i = 1; y_i < field_size - 1; y_i++) {
for (unsigned int x_i = 1; x_i < field_size - 1; x_i++) {
file << y_i << ' ' << x_i << ' ' << phase[y_i * field_size + x_i] << std::endl;
}
file << std::endl;
}
file.close();
} catch(char *str) {
std::cout << str << std::endl;
return false;
}
return true;
}
int main() {
unsigned int N = field_size * field_size;
float *phase; // phase field for host
float *d_phase, *d_phase_tmp; // phase field for device
phase = (float *)malloc(N * sizeof(float));
const float dx = 5e-7;
// 界面エネルギー
float gamma = 1.;
// 界面幅
float delta = 4. * dx;
// 界面モビリティ
float M = 4e-14;
// 界面領域
float lambda = .1;
// 勾配計数
float b = 2. * std::atanh(1.-2.*lambda);
float a = std::sqrt(3. * delta * gamma / b);
// エネルギー障壁
float w = 6. * gamma * b / delta;
// フェーズフィールドモビリティ
float M_phi = M * std::sqrt(2. * w) / (6. * a);
// 時間ステップ
float dt = dx * dx / (5. * M_phi * a * a);
printf("Time Step: %.3e[s]\n", dt);
// 固相初期半径
float r0 = .5 * (field_size - 1) * dx;
float beta = .5;
float tau = M_phi * dt;
cudaMemcpyToSymbol(d_field_size, &field_size, sizeof(unsigned int));
cudaMemcpyToSymbol(d_dx, &dx, sizeof(float));
cudaMemcpyToSymbol(d_a, &a, sizeof(float));
cudaMemcpyToSymbol(d_w, &w, sizeof(float));
cudaMemcpyToSymbol(d_beta, &beta, sizeof(float));
cudaMemcpyToSymbol(d_tau, &tau, sizeof(float));
cudaMemcpyToSymbol(d_r0, &r0, sizeof(float));
// allocate memory to GPU
cudaMalloc((void**)&d_phase, N * sizeof(float));
cudaMalloc((void**)&d_phase_tmp, N * sizeof(float));
int threadsPerBlock = 32;
int blocksInGrid = (field_size + threadsPerBlock -1)/threadsPerBlock;
dim3 blocks(threadsPerBlock, threadsPerBlock);
dim3 grid(blocksInGrid, blocksInGrid);
cudaMemcpy(d_phase, phase, N * sizeof(float), cudaMemcpyHostToDevice);
init_field<<<grid, blocks>>>(d_phase);
set_bc<<<1, field_size - 2>>>(d_phase);
// メインループ
for (unsigned int n = 0; n < step; n++) {
printf("step: %d\n", n);
cudaMemcpy(phase, d_phase, N * sizeof(float), cudaMemcpyDeviceToHost);
save(phase, n);
calc_step<<<grid, blocks>>>(d_phase, d_phase_tmp);
// Swap
cudaMemcpy(d_phase, d_phase_tmp, N * sizeof(float), cudaMemcpyDeviceToDevice);
set_bc<<<1, field_size - 2>>>(d_phase);
}
free(phase);
cudaFree(d_phase);
cudaFree(d_phase_tmp);
return 0;
}
|
12,138 | /*
* EDDL Library - European Distributed Deep Learning Library.
* Version: 0.7
* copyright (c) 2020, Universidad Politécnica de Valencia (UPV), PRHLT Research Centre
* Date: April 2020
* Author: PRHLT Research Centre, UPV, (rparedes@prhlt.upv.es), (jon@prhlt.upv.es)
* All rights reserved
*/
#include <string.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <cuda.h>
__global__ void gpu_where(float *condition, float *A, float *B, float *C, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id_x < size){
if((bool) condition[thread_id_x]){
C[thread_id_x] = A[thread_id_x];
}else{
C[thread_id_x] = B[thread_id_x];
}
}
}
|
12,139 | // Copyright (c) 2013 Damond Howard
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt
//CUDA Kernels
//test kernel
extern "C" __global__ void kernel1(int *a)
{
a++;
}
//vector addition kernel
extern "C" __global__ void
vector_add(float *a, float *b, float *c, int n)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < n)
c[id] = a[id] + b[id];
}
|
12,140 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define BLOCKSIZE 1024
#define MAXIT 359
#define TOTROWS (BLOCKSIZE*8)
#define TOTCOLS (BLOCKSIZE*8)
#define NOTSETLOC -1 // for cells that are not fixed
#define SETLOC 1 // for cells that are fixed
#define EPSILON 0.1
#define QMAX(x,y) (((x) > (y))? (x): (y))
int *lkeepgoing;
float *iplate;
float *oplate;
float *fixed;
float *tmp;
int ncols, nrows;
double When();
void Compute();
int main(int argc, char *argv[])
{
double t0, tottime;
ncols = TOTCOLS;
nrows = TOTROWS;
int i=0;
cudaMalloc((void **) &lkeepgoing, nrows * ncols * sizeof(int));
cudaMalloc((void **) &iplate, nrows * ncols * sizeof(float));
cudaMalloc((void **) &oplate, nrows * ncols * sizeof(float));
cudaMalloc((void **) &fixed, nrows * ncols * sizeof(float));
fprintf(stderr,"Memory allocated\n");
t0 = When();
/* Now proceed with the Jacobi algorithm */
for(i=0;i<10;i++) {
Compute();
}
tottime = (When() - t0)/10;
printf("Total Time is: %lf sec.\n", tottime);
return 0;
}
__global__ void InitArrays(float *ip, float *op, float *fp, int *kp, int ncols)
{
int i;
float *fppos, *oppos, *ippos;
int *kppos;
int blockOffset;
int rowStartPos;
int colsPerThread;
// Each block gets a row, each thread will fill part of a row
// Calculate the offset of the row
blockOffset = blockIdx.x * ncols;
// Calculate our offset into the row
rowStartPos = threadIdx.x * (ncols/blockDim.x);
// The number of cols per thread
colsPerThread = ncols/blockDim.x;
ippos = ip + blockOffset+ rowStartPos;
fppos = fp + blockOffset+ rowStartPos;
oppos = op + blockOffset+ rowStartPos;
kppos = kp + blockOffset+ rowStartPos;
for (i = 0; i < colsPerThread; i++) {
fppos[i] = NOTSETLOC; // Not Fixed
ippos[i] = 50;
oppos[i] = 50;
kppos[i] = 1; // Keep Going
}
if(rowStartPos == 0) {
fppos[0] = SETLOC;
ippos[0] = 0;
oppos[0] = 0;
kppos[0] = 0;
}
if(rowStartPos + colsPerThread >= ncols) {
fppos[colsPerThread-1] = SETLOC;
ippos[colsPerThread-1] = 0;
oppos[colsPerThread-1] = 0;
kppos[colsPerThread-1] = 0;
}
if(blockOffset == 0) {
for(i=0;i < colsPerThread; i++) {
fppos[i] = SETLOC;
ippos[i] = 0;
oppos[i] = 0;
kppos[i] = 0;
}
}
if(blockOffset == ncols - 1) {
for(i=0;i < colsPerThread; i++) {
fppos[i] = SETLOC;
ippos[i] = 100;
oppos[i] = 100;
kppos[i] = 0;
}
}
if(blockOffset == 400 && rowStartPos < 330) {
if(rowStartPos + colsPerThread > 330) {
int end = 330 - rowStartPos;
for(i=0;i<end;i++) {
fppos[i] = SETLOC;
ippos[i] = 100;
oppos[i] = 100;
kppos[i] = 0;
}
}
else {
for(i=0;i<colsPerThread;i++) {
fppos[i] = SETLOC;
ippos[i] = 100;
oppos[i] = 100;
kppos[i] = 0;
}
}
}
if(blockOffset == 200 && rowStartPos <= 500 && rowStartPos + colsPerThread >=500) {
i=500-rowStartPos;
fppos[i] = SETLOC;
ippos[i] = 100;
oppos[i] = 100;
kppos[i] = 0;
}
// Insert code to set the rest of the boundary and fixed positions
}
__global__ void doCalc(float *iplate, float *oplate, float *fplate, int ncols)
{
/* Compute the 5 point stencil for my region */
int i;
int rowStartPos;
int blockOffset;
float *ippos, *oppos, *fppos;
__shared__ float oldRow[TOTCOLS];
blockOffset = blockIdx.x * ncols;
rowStartPos = threadIdx.x * (ncols/blockDim.x);
int colsPerThread = ncols/blockDim.x;
ippos = iplate + blockOffset + rowStartPos;
oppos = oplate + blockOffset + rowStartPos;
fppos = fplate + blockOffset + rowStartPos;
for(i=0;i<colsPerThread;i++) {
oldRow[rowStartPos+i] = oplate[blockOffset+rowStartPos+i];
}
__syncthreads();
for(i=0; i<colsPerThread; i++) {
if(fppos[i] != SETLOC) {
ippos[i] = (oldRow[rowStartPos+i-1]+oldRow[rowStartPos+i+1]+oppos[i-ncols] + oppos[i+ncols] + 4*oldRow[rowStartPos+i])/8;
}
}
}
__global__ void doCheck(float *iplate, float *oplate, float *fixed, int *lkeepgoing, int ncols)
{
int i;
int rowStartPos;
int blockOffset;
float *ippos, *fppos;
int *kppos;
__shared__ float currentRow[TOTCOLS];
blockOffset = blockIdx.x * ncols;
rowStartPos = threadIdx.x * (ncols/blockDim.x);
int colsPerThread = ncols/blockDim.x;
ippos = iplate + blockOffset + rowStartPos;
fppos = fixed + blockOffset + rowStartPos;
kppos = lkeepgoing + blockOffset + rowStartPos;
for(i=0;i<colsPerThread;i++) {
currentRow[rowStartPos+i] = iplate[rowStartPos + blockOffset + i];
}
__syncthreads();
for(i=0;i<colsPerThread;i++) {
if(fppos[i] != SETLOC) {
if(fabsf(currentRow[rowStartPos+i]-(currentRow[rowStartPos+i-1]+currentRow[rowStartPos+i+1]+ippos[i-ncols]+ippos[i+ncols])/4) > 0.1) {
kppos[i] = 1;
}
else {
kppos[i] = 0;
}
}
}
}
__global__ void reduceSingle(int *idata, int *single, int nrows)
{
// Reduce rows to the first element in each row
int i;
extern __shared__ int parts[];
// Each block gets a row, each thread will reduce part of a row
// Calculate our offset into the row
// The number of cols per thread
// Sum my part of one dimensional array and put it shared memory
parts[threadIdx.x] = 0;
for (i = threadIdx.x; i < nrows; i+=blockDim.x) {
parts[threadIdx.x] += idata[i];
}
int tid = threadIdx.x;
if (tid < 512) { parts[tid] += parts[tid + 512];}
__syncthreads();
if (tid < 256) { parts[tid] += parts[tid + 256];}
__syncthreads();
if (tid < 128) { parts[tid] += parts[tid + 128];}
__syncthreads();
if (tid < 64) { parts[tid] += parts[tid + 64];}
__syncthreads();
if (tid < 32) { parts[tid] += parts[tid + 32];}
__syncthreads();
if(threadIdx.x == 0) {
*single = 0;
for(i = 0; i < 32; i++) {
*single += parts[i];
}
}
}
__global__ void iReduceSingle(int *idata, int *single, int ncols) {
int i;
unsigned int tid = threadIdx.x;
extern __shared__ int sdata[];
unsigned int startPos = blockDim.x + threadIdx.x;
int colsPerThread = ncols/blockDim.x;
int myPart = 0;
for(i=0;i<colsPerThread;i++) {
myPart+=idata[startPos+i];
}
sdata[tid]=myPart;
__syncthreads();
unsigned int s;
for(s=1;s<blockDim.x;s*=2){
if(tid%(2*s) == 0){
sdata[tid]+=sdata[tid+s];
}
__syncthreads();
}
if(tid==0)*single=sdata[0];
}
__global__ void iReduceSingle2(int *idata, int *single, unsigned int ncols) {
int i;
unsigned int tid = threadIdx.x;
extern __shared__ int sdata[];
unsigned int startPos = blockDim.x + threadIdx.x;
int colsPerThread = ncols/blockDim.x;
int myPart = 0;
for(i=0;i<colsPerThread;i++) {
myPart+=idata[startPos+i];
}
sdata[tid]=myPart;
__syncthreads();
unsigned int s;
for(s=1;s<blockDim.x;s*=2) {
int index = 2*s*tid;
if(index<blockDim.x) {
sdata[index] += sdata[index+s];
}
__syncthreads();
}
if(tid==0)*single=sdata[0];
}
__global__ void sReduceSingle(int *idata,int *single,unsigned int ncols) {
int i;
unsigned int tid = threadIdx.x;
extern __shared__ int sdata[];
unsigned int startPos = blockDim.x + threadIdx.x;
int colsPerThread = ncols/blockDim.x;
int myPart = 0;
for(i=0;i<colsPerThread;i++) {
myPart+=idata[startPos+i];
}
sdata[tid]=myPart;
__syncthreads();
unsigned int s;
for(s=blockDim.x/2;s>0;s>>=1) {
if(tid<s) {
sdata[tid] += sdata[tid+s];
}
__syncthreads();
}
if(tid==0)*single=sdata[0];
}
__global__ void reduceSum(int *idata, int *odata, unsigned int ncols)
{
// Reduce rows to the first element in each row
int i;
int blockOffset;
int rowStartPos;
int colsPerThread;
int *mypart;
// Each block gets a row, each thread will reduce part of a row
// Calculate the offset of the row
blockOffset = blockIdx.x * ncols;
// Calculate our offset into the row
rowStartPos = threadIdx.x * (ncols/blockDim.x);
// The number of cols per thread
colsPerThread = ncols/blockDim.x;
mypart = idata + blockOffset + rowStartPos;
// Sum all of the elements in my thread block and put them
// into the first column spot
for (i = 1; i < colsPerThread; i++) {
mypart[0] += mypart[i];
}
__syncthreads(); // Wait for everyone to complete
// Now reduce all of the threads in my block into the first spot for my row
if(threadIdx.x == 0) {
odata[blockIdx.x] = 0;
for(i = 0; i < blockDim.x; i++) {
odata[blockIdx.x] += mypart[i*colsPerThread];
}
}
// We cant synchronize between blocks, so we will have to start another kernel
}
__global__ void iReduceSum(int *idata, int *odata, unsigned int ncols) {
int i;
unsigned int tid = threadIdx.x;
extern __shared__ int sdata[];
unsigned int startPos = blockDim.x + threadIdx.x;
int colsPerThread = ncols/blockDim.x;
int blockOffset = threadIdx.x *(ncols/blockDim.x);
int myPart = 0;
for(i=0;i<colsPerThread;i++) {
myPart+=idata[blockOffset+startPos+i];
}
sdata[tid]=myPart;
__syncthreads();
unsigned int s;
for(s=1;s<blockDim.x;s*=2){
if(tid%(2*s) == 0){
sdata[tid]+=sdata[tid+s];
}
__syncthreads();
}
if(tid==0)odata[blockIdx.x]=sdata[0];
}
__global__ void iReduceSum2(int *idata, int *odata, unsigned int ncols) {
int i;
unsigned int tid = threadIdx.x;
extern __shared__ int sdata[];
unsigned int startPos = blockDim.x + threadIdx.x;
int colsPerThread = ncols/blockDim.x;
int blockOffset = threadIdx.x *(ncols/blockDim.x);
int myPart = 0;
for(i=0;i<colsPerThread;i++) {
myPart+=idata[blockOffset+startPos+i];
}
sdata[tid]=myPart;
__syncthreads();
unsigned int s;
for(s=1;s<blockDim.x;s*=2) {
int index = 2*s*tid;
if(index<blockDim.x) {
sdata[index] += sdata[index+s];
}
__syncthreads();
}
if(tid==0)odata[blockIdx.x]=sdata[0];
}
__global__ void sReduceSum(int *idata,int *odata,unsigned int ncols) {
int i;
unsigned int tid = threadIdx.x;
extern __shared__ int sdata[];
unsigned int startPos = blockDim.x + threadIdx.x;
int colsPerThread = ncols/blockDim.x;
int blockOffset = threadIdx.x *(ncols/blockDim.x);
int myPart = 0;
for(i=0;i<colsPerThread;i++) {
myPart+=idata[blockOffset+startPos+i];
}
sdata[tid]=myPart;
__syncthreads();
unsigned int s;
for(s=blockDim.x/2;s>0;s>>=1) {
if(tid<s) {
sdata[tid] += sdata[tid+s];
}
__syncthreads();
}
if(tid==0)odata[blockIdx.x]=sdata[0];
}
void Compute()
{
int *keepgoing_single;
int *keepgoing_sums;
int keepgoing;
int blocksize = BLOCKSIZE;
int iteration;
double t0, tottime;
// double start = When();
ncols = TOTCOLS;
nrows = TOTROWS;
// One block per row
InitArrays<<< nrows, blocksize >>>(iplate, oplate, fixed, lkeepgoing, ncols);
cudaMalloc((void **)&keepgoing_single, 1 * sizeof(int));
keepgoing = 1;
cudaMalloc((void **)&keepgoing_sums, nrows * sizeof(int));
int *peek = (int *)malloc(nrows*sizeof(int));
for (iteration = 0; (iteration < MAXIT); iteration++)
{
// t0 = When();
doCalc<<< nrows, blocksize >>>(iplate, oplate, fixed, ncols);
// fprintf(stderr,"calc: %f\n",When()-t0);
// t0 = When();
doCheck<<< nrows, blocksize >>>(iplate, oplate, fixed, lkeepgoing, ncols);
// fprintf(stderr,"check: %f\n",When()-t0);
// t0 = When();
iReduceSum2<<< nrows, blocksize, blocksize*sizeof(int)>>>(lkeepgoing, keepgoing_sums, ncols);
// fprintf(stderr,"reduce: %f\n",When()-t0);
// cudaMemcpy(peek, keepgoing_sums, nrows*sizeof(int), cudaMemcpyDeviceToHost);
// fprintf(stderr, "after cudaMemcpy \n");
// int i;
// for(i = 0; i < nrows; i++) {
// fprintf(stderr, "%d, ",peek[i]);
// }
// Now we have the sum for each row in the first column,
// reduce to one value
// t0 = When();
// int timeit;
// for(timeit = 0; timeit < 10000; timeit++){
// t0 = When();
iReduceSingle2<<<1, blocksize, blocksize*sizeof(int)>>>(keepgoing_sums, keepgoing_single, nrows);
// fprintf(stderr,"reduceSingle: %f\n",When()-t0);
// }
// tottime = When()-t0;
keepgoing = 0;
cudaMemcpy(&keepgoing, keepgoing_single, 1 * sizeof(int), cudaMemcpyDeviceToHost);
// tottime = When() - start;
// fprintf(stderr, "keepgoing = %d time %f\n", keepgoing, tottime);
//fprintf(stderr, "keepgoint[100]: %d\n", lkeepgoing[100]);
/* swap the new value pointer with the old value pointer */
tmp = oplate;
oplate = iplate;
iplate = tmp;
}
free(peek);
cudaFree(keepgoing_single);
cudaFree(keepgoing_sums);
fprintf(stderr,"Finished in %d iterations\n", iteration);
}
/* Return the current time in seconds, using a double precision number. */
double When()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double) tp.tv_sec + (double) tp.tv_usec * 1e-6);
}
|
12,141 | // JacobiRel.cu
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
__global__ void kernelOne(float* a, float* newa, float* lchange, int n, int m,
float w0, float w1, float w2 )
{
int ti = threadIdx.x, tj = threadIdx.y; /* local indices */
int i = blockIdx.x*16+ti+1, j = blockIdx.y*16+tj+1; /* global */
newa[j*m+i] = w0 * a[j*m+i] +
w1 * (a[j*m+i-1] + a[(j-1)*m+i] + a[j*m+i+1] + a[(j+1)*m+i]) +
w2 * (a[(j-1)*m+i-1] + a[(j+1)*m+i-1] + a[(j-1)*m+i+1] + a[(j+1)*m+i+1]);
__shared__ float mychange[16*16];
/* store this thread's "change" */
mychange[ti+16*tj] = fabsf(newa[j*m-i]-a[j*m+i]);
__syncthreads();
/* reduce all "change" values for this thread block
* to a single value */
int nn = 256;
while( (nn >>= 1) > 0 ){
if( ti+tj*16 < nn )
mychange[ti+tj*16] = fmaxf( mychange[ti+tj*16],
mychange[ti+tj*16+nn]);
__syncthreads();
}
/* store this thread block's "change" */
if( ti==0 && tj==0 )
lchange[blockIdx.x+gridDim.x*blockIdx.y] = mychange[0];
}
__global__ void kernelTwo( float* lchange, int n )
{
__shared__ float mychange[256];
float mych;
int i = threadIdx.x, m;
mych = lchange[i];
m = 256;
while( m <= n ){
mych = fmaxf(mych,lchange[i+m]);
m += 256;
}
mychange[i] = mych;
__syncthreads();
n = 256;
while( (n >>= 1) > 0 ){
if(i<n) mychange[i] = fmaxf(mychange[i],mychange[i+n]);
__syncthreads();
}
if(i==0) lchange[0] = mychange[0];
}
int main(void)
{
float change;
float *newa, *a;
float *da, *dnewa, *lchange;
int i, j, m, n;
printf("Please give m and n: ");
scanf("%d %d",&m,&n);
size_t memsize = sizeof(float)*n*m;
a = (float *)malloc(memsize);
newa = (float *)malloc(memsize);
cudaMalloc( &da, memsize );
cudaMalloc( &dnewa, memsize );
cudaMalloc( &lchange, ((n-2)/16)*((m-2)/16)*sizeof(float) );
for (i=0; i<m; i++){
for (j=0; j<n; j++){
a[i*n+j] = i;
}
}
cudaMemcpy( da, a, memsize, cudaMemcpyHostToDevice );
dim3 threads( 16, 16 );
dim3 blocks( (n-2)/16, (m-2)/16 );
kernelOne<<<blocks,threads>>>( da, dnewa, lchange, n, m, 1.0, 1.0, 1.0 );
kernelTwo<<<1,256>>>( lchange, ((n-2)/16)*((m-2)/16) );
cudaMemcpy( newa, dnewa, memsize, cudaMemcpyDeviceToHost );
cudaMemcpy( &change, lchange, 4, cudaMemcpyDeviceToHost );
// check results
printf("Change: %f\n", change);
for (i=0; i<m; i++)
for (j=0; j<n; j++)
printf("%f\n", newa[i*n+j]);
cudaFree( da );
cudaFree( dnewa );
cudaFree( lchange );
}
|
12,142 | #include "includes.h"
__global__ void NewNodeConnectionKernel( int f, int q, int r, int *activityFlag, int *connection, int *age, float *localError, float alfa, int maxCells, float errorFraction )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < 1)
{
activityFlag[r] = 1;
connection[q * maxCells + f] = 0;
age[q * maxCells + f] = 0;
connection[f * maxCells + q] = 0;
age[f * maxCells + q] = 0;
connection[q * maxCells + r] = 1;
age[q * maxCells + r] = 0;
connection[r * maxCells + q] = 1;
age[r * maxCells + q] = 0;
connection[f * maxCells + r] = 1;
age[f * maxCells + r] = 0;
connection[r * maxCells + f] = 1;
age[r * maxCells + f] = 0;
localError[q] -= alfa * localError[q];
localError[f] -= alfa * localError[f];
localError[r] = errorFraction * (localError[q] + localError[f]);
}
} |
12,143 | #include <stdio.h>
#include <cuda.h>
int devCount;
int myid;
int ihavecuda;
int deviceselector=0;
int main(void) {
cudaGetDeviceCount(&devCount);
if (devCount == 0) {
printf("Devcount %4d NONE\n", devCount);
ihavecuda=0;
}
else{
ihavecuda=1;
if (devCount >= 1){
printf("Devcount %4d\n", devCount);
for (int i = 0; i < devCount; ++i)
{
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
printf(" devprop name %s i=(%d) \n ", devProp.name, i);
}
}
}
}
|
12,144 | #include <stdio.h>
#include <string.h>
#include <math.h>
#include <stdlib.h>
#include <sys/time.h>
#include "cuda_runtime.h"
void printVect(float* xi, int l) {
int i;
for(i=0; i<l; i++) {
printf("%8.10f ", xi[i]);
}
printf("\n");
}
void printMat(float* mat, int n) {
int row, col;
for(row=0; row<n; row++) {
for(col=0; col<n; col++) {
printf("%8.10f ", mat[row*n+col]);
}
printf("\n");
}
}
void computePoints(float* xi, float* yi, float h, int m) {
//compute all points
int i, j, idx = 0;
for(i=1; i<=m; i++) {
for(j=1; j<=m; j++) {
xi[idx] = i*h;
yi[idx] = j*h;
idx = idx+1;
}
}
}
float computeRes(float* kt, float* df, int n) {
int i;
float sum = 0;
for(i=0; i<n; i++) {
sum += kt[i]*df[i];
}
return sum;
}
__global__ void computeKT(float* xi, float* yi, float* kt, int n, float xp, float yp) {
int j = blockIdx.x;
kt[j] = exp(-1.0*((xp-xi[j])*(xp-xi[j]) + (yp-yi[j])*(yp-yi[j])));
}
__global__ void computeK(float* xi, float* yi, float* a, int n) {
int numRows = n/blockDim.x;
//xi,yi 1*4 0,4
for(int k=0;k<numRows;k++){
int i = threadIdx.x*numRows+k;
if(i>=n)
break;
for(int j=0;j<n;j++){
int idx = i*n + j;
a[idx] = exp(-1.0*((xi[i]-xi[j])*(xi[i]-xi[j]) + (yi[i]-yi[j])*(yi[i]-yi[j])));
if(i==j) {
a[idx] = a[idx] + 0.01;
}
}
}
}
void computeF(float* xi, float* yi, float* f, int n) {
int i;
for(i=0; i<n; i++) {
float d = (float)0.1 * ((((float)rand()) / (float)RAND_MAX)-0.5);
f[i] = (float)1.0 - ((xi[i]-0.5) * (xi[i]-0.5) + (yi[i]-0.5) * (yi[i]-0.5)) + d;
}
}
__global__ void LU(float *a, int n, int blockSize) {
// Normal parallel
// int k, y;
// int numRows = n/blockDim.x;
// int start = threadIdx.x;
//
// for(k=0;k<n-1;k++){
//
// for(y=0;y<numRows;y++){
// int i = threadIdx.x*numRows + y;
// if(i>k&&i<n){
// int Aik = i*n + k, Akk = n*k + k;
// a[Aik] = a[Aik]/a[Akk];
// }
// }
// __syncthreads();
//
// int l, z;
// for(l=0; l<numRows; l++) {
// int i = threadIdx.x*numRows + l;
//
// if(i>k && i<n) {
// for(z=k+1; z<n; z++) {
// int Aiz = i*n + z, Aik = i*n + k, Akz = k*n + z;
// // printf("i= %d z= %d\n", i, z);
// // printf("Aik= %d Akz= %d Aiz=%d\n", Aik, Akz, Aiz);
// // printf("threadIdx= %d\n", threadIdx.x);
// a[Aiz] = a[Aiz] - a[Aik]*a[Akz];
// }
// }
// }
// __syncthreads();
// }
// cyclic row partion method
int k, y;
int numRows = n/blockDim.x;
int start = threadIdx.x;
for(k=0;k<n-1;k++){
for(y=0;y<numRows;y++){
int i = y*blockDim.x+start;
if(i>k&&i<n){
int Aik = i*n + k, Akk = n*k + k;
a[Aik] = a[Aik]/a[Akk];
}
}
//__syncthreads();
int l, z;
for(l=0; l<numRows; l++) {
int i = l*blockDim.x+start;
if(i>k && i<n) {
for(z=k+1; z<n; z++) {
int Aiz = i*n + z, Aik = i*n + k, Akz = k*n + z;
a[Aiz] = a[Aiz] - a[Aik]*a[Akz];
}
}
}
__syncthreads();
}
}
//solve L
__global__ void solveL(float *x, float *L, int n) {
int i, k;
int numRows = n/blockDim.x;
for (i = 1; i < n; i++) {
for(k=0;k<numRows; k++){
int j = threadIdx.x*numRows + k;
int LIdx = j*n + i - 1;
if(j>=i&&j<n)
x[j] = x[j] - L[LIdx]*x[i-1];
}
__syncthreads();
}
}
//solve U
__global__ void solveU(float *x, float *U, int n) {
int i, k;
int numRows = n/blockDim.x;
for (i = n-1; i > 0; i--) {
if(threadIdx.x==0)
x[i] = x[i]/U[i*n+i];
__syncthreads();
for(k=0;k<numRows;k++){
int j = threadIdx.x*numRows + k;
int UIdx = j*n + i;
if(j>=0&&j<i)
x[j] = x[j] - U[UIdx]*x[i];
}
__syncthreads();
}
if(threadIdx.x==0)
x[0] = x[0]/U[0];
}
int main(int argc, char **argv)
{
int m, n, size, matSize, blockSize;
float xp, yp;
/* Check input parameters */
m = atoi(argv[1]);
xp = atof(argv[2]);
yp = atof(argv[3]);
blockSize = atoi(argv[4]);
n = m*m;
size = n*sizeof(float);
matSize = n*n*sizeof(float);
float* xi = (float*)malloc(size);
float* yi = (float*)malloc(size);
float* f = (float*)malloc(size);
//float* K = (float*)malloc(matSize);
float h = ((float)1.0)/(float)(m+1);
//Initialize points and f
computePoints(xi, yi, h, m);
computeF(xi, yi, f, n);
cudaSetDevice(0);
// allocate vector in device memory
float* dXi;
cudaMalloc(&dXi, size);
float* dYi;
cudaMalloc(&dYi, size);
float* dK;
cudaMalloc(&dK, matSize);
//copy f to device
float* df;
cudaMalloc(&df, size);
cudaMemcpy(df, f, size, cudaMemcpyHostToDevice);
//copy vectors to device
cudaMemcpy(dXi, xi, size, cudaMemcpyHostToDevice);
cudaMemcpy(dYi, yi, size, cudaMemcpyHostToDevice);
// Invoke kernel
computeK<<<1, blockSize>>>(dXi, dYi, dK, n);
//NOTE: K for debug
//cudaMemcpy(K, dK, matSize, cudaMemcpyDeviceToHost);
// FILE * fp;
// /* open the file for writing*/
// fp = fopen ("origin_Matrix","w+");
// int row, col;
// /* write 10 lines of text into the file stream*/
// for(row=0; row<n; row++) {
// for(col=0; col<n; col++) {
// fprintf (fp, "%8.10f ", K[row*n+col]);
// }
// fprintf(fp, "\n");
// }
// fclose(fp);
//printMat(K,n);
printf("Start computing LU\n");
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
LU<<<1, blockSize>>>(dK, n, blockSize);
cudaEventRecord( stop, 0);
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
printf("Time for LU factors routines: %4.10f s \n", time/1000);
//NOTE: K for debug
//cudaMemcpy(K, dK, matSize, cudaMemcpyDeviceToHost);
/* open the file for writing*/
// fp = fopen ("LU_Matrix","w+");
// /* write 10 lines of text into the file stream*/
// for(row=0; row<n; row++) {
// for(col=0; col<n; col++) {
// fprintf (fp, "%8.10f ", K[row*n+col]);
// }
// fprintf(fp, "\n");
// }
//
// fclose(fp);
// /* open the file for writing*/
// fp = fopen ("fVec","w+");
// /* write 10 lines of text into the file stream*/
// for(row=0; row<n; row++) {
// fprintf (fp, "%8.10f ", f[row]);
// }
cudaEvent_t start1, stop1;
float time1;
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cudaEventRecord( start, 0 );
solveL<<<1,blockSize>>>(df, dK, n);
solveU<<<1,blockSize>>>(df, dK, n);
cudaEventRecord( stop1, 0);
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time1, start1, stop1);
cudaEventDestroy( start1 );
cudaEventDestroy( stop1 );
printf("Time for solver routine is: %4.10f s \n", time1/1000);
//NOTE: K for debug
//cudaMemcpy(K, dK, matSize, cudaMemcpyDeviceToHost);
/* open the file for writing*/
// fp = fopen ("LU_Matrix","w+");
// /* write 10 lines of text into the file stream*/
// for(row=0; row<n; row++) {
// for(col=0; col<n; col++) {
// fprintf (fp, "%8.10f ", K[row*n+col]);
// }
// fprintf(fp, "\n");
// }
float* res = (float*)malloc(size);
cudaMemcpy(res, df, size, cudaMemcpyDeviceToHost);
/* open the file for writing*/
// fp = fopen ("fRes","w+");
// /* write 10 lines of text into the file stream*/
// for(row=0; row<n; row++) {
// fprintf (fp, "%8.10f ", res[row]);
// }
//create the kt on the device
float* DkTrans;
cudaMalloc(&DkTrans, size);
//computer kt
computeKT<<<n,1>>>(dXi, dYi, DkTrans, n, xp, yp);
float* kt = (float*)malloc(size);
cudaMemcpy(kt, DkTrans, size, cudaMemcpyDeviceToHost);
//compute result
float result;
result = computeRes(kt, res, n);
printf(" result is: %8.10f\n", result);
//free all memory
free(xi);
free(yi);
free(f);
//free(K);
free(res);
free(kt);
cudaFree(dXi);
cudaFree(dYi);
cudaFree(dK);
cudaFree(df);
cudaFree(DkTrans);
}
|
12,145 | //raytracer.mustafaisik.net//
#include "memory_handler.cuh"
#include "cuda_utils.cuh"
#include "cuda_runtime.h"
Memory MemoryHandler::allocateOnDevice(size_t data_size, const Memory& src)
{
void* ptr = nullptr;
HANDLE_ERROR(cudaMalloc((void **)&ptr, data_size));
if (!ptr)
{
throw std::runtime_error("Error: Device memory allocation cannot be performed");
}
Memory memory(Memory::DEVICE, ptr);
m_pointers.push_back(memory);
if (src.type == Memory::DEVICE)
{
HANDLE_ERROR(cudaMemcpy(ptr, src.pointer, data_size, cudaMemcpyDeviceToDevice));
}
else if (src.type == Memory::HOST)
{
HANDLE_ERROR(cudaMemcpy(ptr, src.pointer, data_size, cudaMemcpyHostToDevice));
}
return memory;
}
Memory MemoryHandler::allocateOnHost(size_t data_size, const Memory& src)
{
void* ptr = nullptr;
ptr = malloc(data_size);
if (!ptr)
{
throw std::runtime_error("Error: Host memory allocation cannot be performed");
}
Memory memory(Memory::HOST, ptr);
m_pointers.push_back(memory);
if (src.type == Memory::DEVICE)
{
HANDLE_ERROR(cudaMemcpy(ptr, src.pointer, data_size, cudaMemcpyDeviceToHost));
}
else if (src.type == Memory::HOST)
{
HANDLE_ERROR(cudaMemcpy(ptr, src.pointer, data_size, cudaMemcpyHostToHost));
}
return memory;
}
//Frees the pointer contained by the memory structure only if it is allocated by the MemoryHandler.
//Returns true if it is a successful operation.
bool MemoryHandler::free(const Memory& memory)
{
for (auto& mem : m_pointers)
{
if (mem == memory)
{
if (mem.type == Memory::DEVICE)
{
HANDLE_ERROR(cudaFree(mem.pointer));
}
else if (mem.type == Memory::HOST)
{
::free(mem.pointer);
}
mem.pointer = nullptr;
m_pointers.remove(mem);
return true;
}
}
return false;
}
MemoryHandler& MemoryHandler::Handler()
{
static MemoryHandler handler;
return handler;
}
//Returns true if it is a successful operation.
bool MemoryHandler::copy(const Memory& dst, const Memory& src, size_t data_size)
{
if (dst.type == Memory::HOST && src.type == Memory::HOST)
{
HANDLE_ERROR(cudaMemcpy(dst.pointer, src.pointer, data_size, cudaMemcpyHostToHost));
}
else if (dst.type == Memory::DEVICE && src.type == Memory::HOST)
{
HANDLE_ERROR(cudaMemcpy(dst.pointer, src.pointer, data_size, cudaMemcpyHostToDevice));
}
else if (dst.type == Memory::HOST && src.type == Memory::DEVICE)
{
HANDLE_ERROR(cudaMemcpy(dst.pointer, src.pointer, data_size, cudaMemcpyDeviceToHost));
}
else if (dst.type == Memory::DEVICE && src.type == Memory::DEVICE)
{
HANDLE_ERROR(cudaMemcpy(dst.pointer, src.pointer, data_size, cudaMemcpyDeviceToDevice));
}
else
{
return false;
}
return true;
}
MemoryHandler::~MemoryHandler()
{
for (auto& memory : m_pointers)
{
if (memory.type == Memory::DEVICE)
{
HANDLE_ERROR(cudaFree(memory.pointer));
}
else if (memory.type == Memory::HOST)
{
::free(memory.pointer);
}
memory.pointer = nullptr;
}
} |
12,146 | #include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <ctime>
#define WIDTH 7
#define HEIGHT 3
#define THREADS_PER_BLOCK 32
#define CUDA_CHECK(err) if(err != cudaSuccess)\
{\
printf("cudaMalloc returned error %s (code %d) (file %s) (line %d)\n", cudaGetErrorString(err), err, __FILE__, __LINE__);\
}\
__global__ void transpose(int *input, int *output, int width, int height)
{
__shared__ int temp[THREADS_PER_BLOCK][THREADS_PER_BLOCK];
int xIndex = blockIdx.x*blockDim.x + threadIdx.x;
int yIndex = blockIdx.y*blockDim.y + threadIdx.y;
if((xIndex < width) && (yIndex < height)) {
int id_in = yIndex * width + xIndex;
temp[threadIdx.y][threadIdx.x] = input[id_in];
}
__syncthreads();
xIndex = blockIdx.y * blockDim.y + threadIdx.x;
yIndex = blockIdx.x * blockDim.x + threadIdx.y;
if((xIndex < height) && (yIndex < width)) {
int id_out = yIndex * height + xIndex;
output[id_out] = temp[threadIdx.x][threadIdx.y];
}
}
inline __device__
void PrefixSum(int* output, int* input, int w, int nextpow2)
{
extern __shared__ int temp[];
const int tdx = threadIdx.x;
int offset = 1;
const int tdx2 = 2*tdx;
const int tdx2p = tdx2 + 1;
temp[tdx2] = tdx2 < w ? input[tdx2] : 0;
temp[tdx2p] = tdx2p < w ? input[tdx2p] : 0;
for(int d = nextpow2>>1; d > 0; d >>= 1) {
__syncthreads();
if(tdx < d)
{
int ai = offset*(tdx2p)-1;
int bi = offset*(tdx2+2)-1;
temp[bi] += temp[ai];
}
offset *= 2;
}
int last = temp[nextpow2 - 1];
if(tdx == 0) temp[nextpow2 - 1] = 0;
for(int d = 1; d < nextpow2; d *= 2) {
offset >>= 1;
__syncthreads();
if(tdx < d )
{
int ai = offset*(tdx2p)-1;
int bi = offset*(tdx2+2)-1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
if(tdx2 < w) output[tdx2] = temp[tdx2];
if(tdx2p < w) output[tdx2p] = temp[tdx2p];
if(tdx2p < w) output[w] = last;
}
__global__ void KernPrefixSumRows(int *out, int *in, int height, int width)
{
const int row = blockIdx.y;
PrefixSum(out+row*width-1, in+row*width, width, 2*blockDim.x );
}
__global__ void KernPrefixSumRowsTrans(int *out, int *in, int height, int width)
{
const int row = blockIdx.y;
PrefixSum(out+row*(width+1)+(width+1), in+row*width, width, 2*blockDim.x );
}
void PrefixSumRows(int *out, int *in, int *outT, int height, int width)
{
dim3 blockDim = dim3( 1, 1);
while(blockDim.x < ceil(width/2.0f)) blockDim.x <<= 1;
dim3 gridDim = dim3( 1, height );
KernPrefixSumRows<<<gridDim,blockDim,2*sizeof(int)*blockDim.x>>>(out,in,height,width);
cudaDeviceSynchronize();
dim3 gridSize, blockSize;
gridSize.x = (int)((width + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
gridSize.y = (int)((height + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
blockSize.x = THREADS_PER_BLOCK;
blockSize.y = THREADS_PER_BLOCK;
transpose<<<gridSize, blockSize>>>(out, outT, width, height);
cudaDeviceSynchronize();
memset(out, 0, (HEIGHT+1)*sizeof(int));
blockDim = dim3( 1, 1);
while(blockDim.x < ceil((height)/2.0f)) blockDim.x <<= 1;
gridDim = dim3( 1, width );
KernPrefixSumRowsTrans<<<gridDim,blockDim,2*sizeof(int)*blockDim.x>>>(out,outT,width,height);
cudaDeviceSynchronize();
gridSize.x = (int)((height+1 + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
gridSize.y = (int)((width+1 + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK);
blockSize.x = THREADS_PER_BLOCK;
blockSize.y = THREADS_PER_BLOCK;
transpose<<<gridSize, blockSize>>>(out, outT, height+1, width+1);
cudaDeviceSynchronize();
}
void ComputeIntegrals(const unsigned char *Img, int *Integral) {
const int SUM_WIDTH_STEP = (WIDTH+1);
#define SUM_TYPE int
int iW = WIDTH; // image dimensions
int iH = HEIGHT;
int sW = WIDTH+1; // sum dimensions
unsigned char *ImgPtr = 0;
SUM_TYPE *IntegPtr = 0;
// write zeros to first row
memset(Integral, 0, (WIDTH+1)*sizeof(int));
//#if WITH_CUDA
// CudaComputeIntegralImages(Img, Integral, TiltedIntegral, SUM_WIDTH_STEP, cudaComputeStream);
//#else
{
int yy=1;
ImgPtr = (unsigned char *)(Img + WIDTH*(yy-1));
IntegPtr = (SUM_TYPE *)(Integral + SUM_WIDTH_STEP*yy);
SUM_TYPE *IntegPtrA = IntegPtr - 1;
SUM_TYPE *IntegPtrB = IntegPtr - sW - 1;
SUM_TYPE *IntegPtrC = IntegPtr - sW;
*IntegPtr++ = (SUM_TYPE)0.0;
IntegPtrA++;
IntegPtrB++;
IntegPtrC++;
for(int xx=1; xx<iW; xx++){
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr++);
*IntegPtr++ = fTemp
+ *IntegPtrA++
- *IntegPtrB++
+ *IntegPtrC++;
}
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr);
*IntegPtr = fTemp
+ *IntegPtrA
- *IntegPtrB
+ *IntegPtrC;
}
// compute regular integral and first pass of tilted
for(int yy=2; yy<=iH; yy++){
ImgPtr = (unsigned char *)(Img + WIDTH*(yy-1));
IntegPtr = (SUM_TYPE *)(Integral + SUM_WIDTH_STEP*yy);
SUM_TYPE *IntegPtrA = IntegPtr - 1;
SUM_TYPE *IntegPtrB = IntegPtr - sW - 1;
SUM_TYPE *IntegPtrC = IntegPtr - sW;
*IntegPtr++ = (SUM_TYPE)0.0;
IntegPtrA++;
IntegPtrB++;
IntegPtrC++;
for(int xx=1; xx<iW; xx++){
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr++);
*IntegPtr++ = fTemp
+ *IntegPtrA++
- *IntegPtrB++
+ *IntegPtrC++;
}
SUM_TYPE fTemp = (SUM_TYPE)*(ImgPtr);
*IntegPtr = fTemp
+ *IntegPtrA
- *IntegPtrB
+ *IntegPtrC;
}
printf("\n\n");
//#endif
}
int main() {
unsigned char *Img=0;
int *ImgInt=0;
int *Integral=0;
int *IntegralTransposed=0;
clock_t start, end;
CUDA_CHECK( cudaMallocManaged((void **) &Img, WIDTH*HEIGHT) );
CUDA_CHECK( cudaMallocManaged((void **) &ImgInt, WIDTH*HEIGHT*sizeof(int)) );
CUDA_CHECK( cudaMallocManaged((void **) &Integral, (WIDTH+1)*(HEIGHT+1)*sizeof(int)) );
CUDA_CHECK( cudaMallocManaged((void **) &IntegralTransposed, (WIDTH+1)*(HEIGHT+1)*sizeof(int)) );
for (int i=0; i<WIDTH*HEIGHT; i++) Img[i] = 1;
for (int i=0; i<WIDTH*HEIGHT; i++) ImgInt[i] = 1;
for (int i=0; i<(WIDTH+1)*(HEIGHT+1); i++) Integral[i] = 1;
for (int i=0; i<(WIDTH+1)*(HEIGHT+1); i++) IntegralTransposed[i] = 1;
start = clock();
ComputeIntegrals(Img, Integral);
end = clock();
printf("CPU Time Taken: %f\n", ((double)(end-start))/CLOCKS_PER_SEC);
int *IntegPtr;
unsigned char *ImgPtr;
// input
printf("Input\n\n");
for (int i=0; i<HEIGHT; i++) {
for (int j=0; j<WIDTH; j++) {
ImgPtr = Img + i * WIDTH + j;
printf("%d ", *ImgPtr);
}
printf("\n");
}
printf("\n\n");
printf("Output CPU");
printf("\n\n");
for (int i=0; i<(HEIGHT+1); i++) {
for (int j=0; j<(WIDTH+1); j++) {
IntegPtr = Integral + i * (WIDTH+1) + j;
printf("%d ", *IntegPtr);
}
printf("\n");
}
printf("\n\n");
printf("OUTPUT GPU");
printf("\n\n");
for (int i=0; i<(WIDTH+1)*(HEIGHT+1); i++) Integral[i] = 0;
//CudaComputeIntegralImages(ImgInt, Integral, IntegralTransposed);
start = clock();
PrefixSumRows(Integral, ImgInt, IntegralTransposed, HEIGHT, WIDTH);
end = clock();
printf("GPU Time Taken: %f\n", ((double)(end-start))/CLOCKS_PER_SEC);
for (int i=0; i<(HEIGHT+1); i++) {
for (int j=0; j<(WIDTH+1); j++) {
IntegPtr = IntegralTransposed + i * (WIDTH+1) + j;
printf("%d ", *IntegPtr);
}
printf("\n");
}
cudaFree(Img);
cudaFree(ImgInt);
cudaFree(Integral);
cudaFree(IntegralTransposed);
return 0;
}
|
12,147 | // RUN: %run_test hipify "%s" "%t" %hipify_args %clang_args
/*
Copyright (c) 2015-2016 Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <stdio.h>
#include <cuda_runtime.h>
#define CHECK(cmd) \
{\
cudaError_t error = cmd;\
if (error != cudaSuccess) { \
fprintf(stderr, "error: '%s'(%d) at %s:%d\n", cudaGetErrorString(error), error,__FILE__, __LINE__); \
exit(EXIT_FAILURE);\
}\
}
/*
* Square each element in the array A and write to array C.
*/
template <typename T>
__global__ void
vector_square(T *C_d, const T *A_d, size_t N)
{
size_t offset = (blockIdx.x * blockDim.x + threadIdx.x);
size_t stride = blockDim.x * gridDim.x;
for (size_t i=offset; i<N; i+=stride) {
C_d[i] = A_d[i] * A_d[i];
}
}
int main(int argc, char *argv[])
{
float *A_d, *C_d;
float *A_h, *C_h;
size_t N = 1000000;
size_t Nbytes = N * sizeof(float);
// CHECK: hipDeviceProp_t props;
cudaDeviceProp props;
// CHECK: CHECK(hipGetDeviceProperties(&props, 0/*deviceID*/));
CHECK(cudaGetDeviceProperties(&props, 0/*deviceID*/));
printf ("info: running on device %s\n", props.name);
printf ("info: allocate host mem (%6.2f MB)\n", 2*Nbytes/1024.0/1024.0);
A_h = (float*)malloc(Nbytes);
// CHECK: CHECK(A_h == 0 ? hipErrorMemoryAllocation : hipSuccess );
CHECK(A_h == 0 ? cudaErrorMemoryAllocation : cudaSuccess );
C_h = (float*)malloc(Nbytes);
// CHECK: CHECK(C_h == 0 ? hipErrorMemoryAllocation : hipSuccess );
CHECK(C_h == 0 ? cudaErrorMemoryAllocation : cudaSuccess );
// Fill with Phi + i
for (size_t i=0; i<N; i++)
{
A_h[i] = 1.618f + i;
}
printf ("info: allocate device mem (%6.2f MB)\n", 2*Nbytes/1024.0/1024.0);
// CHECK: CHECK(hipMalloc(&A_d, Nbytes));
// CHECK: CHECK(hipMalloc(&C_d, Nbytes));
CHECK(cudaMalloc(&A_d, Nbytes));
CHECK(cudaMalloc(&C_d, Nbytes));
printf ("info: copy Host2Device\n");
// CHECK: CHECK ( hipMemcpy(A_d, A_h, Nbytes, hipMemcpyHostToDevice));
CHECK ( cudaMemcpy(A_d, A_h, Nbytes, cudaMemcpyHostToDevice));
const unsigned blocks = 512;
const unsigned threadsPerBlock = 256;
printf ("info: launch 'vector_square' kernel\n");
// CHECK: hipLaunchKernelGGL(vector_square, dim3(blocks), dim3(threadsPerBlock), 0, 0, C_d, A_d, N);
vector_square <<<blocks, threadsPerBlock>>> (C_d, A_d, N);
printf ("info: copy Device2Host\n");
// CHECK: CHECK ( hipMemcpy(C_h, C_d, Nbytes, hipMemcpyDeviceToHost));
CHECK ( cudaMemcpy(C_h, C_d, Nbytes, cudaMemcpyDeviceToHost));
printf ("info: check result\n");
for (size_t i=0; i<N; i++) {
if (C_h[i] != A_h[i] * A_h[i]) {
// CHECK: CHECK(hipErrorUnknown);
CHECK(cudaErrorUnknown);
}
}
printf ("PASSED!\n");
}
|
12,148 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define N 10000
#define BlX 100
__global__ void Cong(int *a, int *b, int *c) {
int i;
i = blockIdx.x*blockDim.x + threadIdx.x;
*(c + i) = *(a + i) + *(b + i);
}
int main()
{
int *Ah, *Bh, *Ch, *Ad, *Bd, *Cd;
int size = N * sizeof(int);
Ah = (int*)malloc(size);
Bh = (int*)malloc(size);
Ch = (int*)malloc(size);
cudaMalloc((void**)&Ad, size);
cudaMalloc((void**)&Bd, size);
cudaMalloc((void**)&Cd, size);
for (int i = 0; i < N; i++)
{
*(Ah + i) = i;
*(Bh + i) = 2 * i;
}
cudaMemcpy(Ad, Ah, size, cudaMemcpyHostToDevice);
cudaMemcpy(Bd, Bh, size, cudaMemcpyHostToDevice);
dim3 dimBlock(BlX, 1, 1);
dim3 dimGrid(N / BlX, 1, 1);
Cong<<<dimGrid, dimBlock>>>(Ad, Bd, Cd);
cudaMemcpy(Ch, Cd, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < 20; i++)
{
printf("%d\n", *(Ch + i));
}
free(Ah); free(Bh); free(Ch);
cudaFree(Ad); cudaFree(Bd); cudaFree(Cd);
return 0;
} |
12,149 | //#include <cuda.h>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
static void HandleError(cudaError_t err,
const char *file,
int line)
{
if (err != cudaSuccess)
{
printf("%s in %s at line %d\n", cudaGetErrorString(err),
file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR(err) (HandleError(err, __FILE__, __LINE__))
#define HANDLE_NULL(a) \
{ \
if (a == NULL) \
{ \
printf("Host memory failed in %s at line %d\n", \
__FILE__, __LINE__); \
exit(EXIT_FAILURE); \
} \
}
__global__ void add(int a, int b, int *c)
{
*c = a + b;
}
void chapter_1_to_3()
{
int c;
int *device_c;
cudaMalloc((void **)&device_c, sizeof(int));
add<<<1, 1>>>(2, 7, device_c);
cudaMemcpy(&c, device_c, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(device_c);
printf("2 + 7 = %i\nComputed via CUDA, yaaay.\n", c);
int cudaDeviceCount;
cudaGetDeviceCount(&cudaDeviceCount);
printf("This computer contains %i CUDA enabled GPU.\n", cudaDeviceCount);
std::vector<cudaDeviceProp> deviceInfos;
for (size_t i = 0; i < cudaDeviceCount; i++)
{
cudaDeviceProp info = {};
cudaGetDeviceProperties(&info, i);
deviceInfos.push_back(info);
printf("Loaded info about %s\n", info.name);
}
// We can request device with certain capabilities like this:
cudaDeviceProp requirement;
memset(&requirement, 0, sizeof(cudaDeviceProp));
// Request version 6.0
requirement.major = 6;
requirement.minor = 0;
int returnedDevice;
HANDLE_ERROR(cudaChooseDevice(&returnedDevice, &requirement));
printf("cudaChooseDevice returned: %i\n", returnedDevice);
HANDLE_ERROR(cudaSetDevice(returnedDevice));
}
constexpr int arrSize = 1000;
__global__ void device_add(int *a, int *b, int *c)
{
int blockId = blockIdx.x;
if (blockId < arrSize)
c[blockId] = a[blockId] + b[blockId];
}
void chapter_4()
{
int a[arrSize], b[arrSize], c[arrSize];
int *device_a, *device_b, *device_c;
cudaMalloc((void **)&device_a, arrSize * sizeof(int));
cudaMalloc((void **)&device_b, arrSize * sizeof(int));
cudaMalloc((void **)&device_c, arrSize * sizeof(int));
for (size_t i = 0; i < arrSize; i++)
{
a[i] = -i;
b[i] = i * i;
}
cudaMemcpy(device_a, a, arrSize * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(device_b, b, arrSize * sizeof(int), cudaMemcpyHostToDevice);
device_add<<<arrSize, 1>>>(device_a, device_b, device_c);
cudaMemcpy(c, device_c, arrSize * sizeof(int), cudaMemcpyDeviceToHost);
for (size_t i = 0; i < arrSize; i++)
{
printf("%i + %i = %i\n", a[i], b[i], c[i]);
}
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_c);
}
int main(void)
{
// Chapter 1 - 3
//chapter_1_to_3();
chapter_4();
return 0;
}
|
12,150 | #include<stdio.h>
#include<stdlib.h>
#include<unistd.h>
#include<stdbool.h>
#include<iostream>
#include<cuda.h>
#include<cuda_runtime.h>
typedef unsigned int uint;
typedef unsigned short ushort;
// Result from last compute of world.
unsigned char *d_resultData=NULL;
// Current state of world.
unsigned char *d_data=NULL;
// Host copy of the world.
unsigned char *h_data=NULL;
// Current width of world.
size_t g_worldWidth=0;
/// Current height of world.
size_t g_worldHeight=0;
/// Current data length (product of width and height)
size_t g_dataLength=0; // g_worldWidth * g_worldHeight
static inline void HL_initialiaze( size_t worldWidth, size_t worldHeight )
{
g_worldWidth = worldWidth;
g_worldHeight = worldHeight;
g_dataLength = g_worldWidth * g_worldHeight;
cudaMalloc(&d_data, (g_dataLength * sizeof(unsigned char)));
cudaMalloc(&d_resultData, (g_dataLength * sizeof(unsigned char)));
h_data = (unsigned char *)calloc(g_dataLength, sizeof(unsigned char));
}
static inline void HL_initAllZeros( size_t worldWidth, size_t worldHeight )
{
// calloc init's to all zeros
}
static inline void HL_initAllOnes( size_t worldWidth, size_t worldHeight )
{
// set all rows of world to true
for(int i = 0; i < g_dataLength; i++)
{
h_data[i] = 1;
}
}
static inline void HL_initOnesInMiddle( size_t worldWidth, size_t worldHeight )
{
// set first 1 rows of world to true
for(int i = 10*g_worldWidth; i < 11*g_worldWidth; i++)
{
if( (i >= ( 10*g_worldWidth + 10)) && (i < (10*g_worldWidth + 20)))
{
h_data[i] = 1;
}
}
}
static inline void HL_initOnesAtCorners( size_t worldWidth, size_t worldHeight )
{
h_data[0] = 1; // upper left
h_data[worldWidth-1]=1; // upper right
h_data[(worldHeight * (worldWidth-1))]=1; // lower left
h_data[(worldHeight * (worldWidth-1)) + worldWidth-1]=1; // lower right
}
static inline void HL_initSpinnerAtCorner( size_t worldWidth, size_t worldHeight )
{
h_data[0] = 1; // upper left
h_data[1] = 1; // upper left +1
h_data[worldWidth-1]=1; // upper right
}
static inline void HL_initReplicator( size_t worldWidth, size_t worldHeight )
{
size_t x, y;
x = worldWidth/2;
y = worldHeight/2;
h_data[x + y*worldWidth + 1] = 1;
h_data[x + y*worldWidth + 2] = 1;
h_data[x + y*worldWidth + 3] = 1;
h_data[x + (y+1)*worldWidth] = 1;
h_data[x + (y+2)*worldWidth] = 1;
h_data[x + (y+3)*worldWidth] = 1;
}
static inline void HL_initMaster( unsigned int pattern, size_t worldWidth, size_t worldHeight )
{
HL_initialiaze( worldWidth, worldHeight );
switch(pattern)
{
case 0:
HL_initAllZeros( worldWidth, worldHeight );
break;
case 1:
HL_initAllOnes( worldWidth, worldHeight );
break;
case 2:
HL_initOnesInMiddle( worldWidth, worldHeight );
break;
case 3:
HL_initOnesAtCorners( worldWidth, worldHeight );
break;
case 4:
HL_initSpinnerAtCorner( worldWidth, worldHeight );
break;
case 5:
HL_initReplicator( worldWidth, worldHeight );
break;
default:
printf("Pattern %u has not been implemented \n", pattern);
exit(-1);
}
}
// swap the pointers of pA and pB.
static inline void HL_swap( unsigned char **pA, unsigned char **pB)
{
unsigned char *temp = *pA;
*pA = *pB;
*pB = temp;
}
// number of alive cells at 3x3 grid (excluding center)
__device__ static inline unsigned int HL_countAliveCells(const unsigned char* data,
size_t x0,
size_t x1,
size_t x2,
size_t y0,
size_t y1,
size_t y2)
{
return (uint)data[x0+y0] + (uint)data[x0+y1] + (uint)data[x0+y2] + (uint)data[x1+y0] + (uint)data[x1+y2] + (uint)data[x2+y0] + (uint)data[x2+y1] + (uint)data[x2+y2];
}
// Don't Modify this function or your submitty autograding will not work
static inline void HL_printWorld(size_t iteration)
{
int i, j;
printf("Print World - Iteration %zu \n", iteration);
for( i = 0; i < g_worldHeight; i++)
{
printf("Row %2d: ", i);
for( j = 0; j < g_worldWidth; j++)
{
printf("%u ", (unsigned int)h_data[(i*g_worldWidth) + j]);
}
printf("\n");
}
printf("\n\n");
}
// CUDA kernel
__global__ void HL_kernel(const unsigned char* d_data,
unsigned int worldWidth,
unsigned int worldHeight,
unsigned char* d_resultData)
{
unsigned int index = blockIdx.x *blockDim.x + threadIdx.x;
size_t x = index % worldWidth;
size_t y = (int)(index / worldWidth);
// calculate positions around current square
size_t y0 = ((y + worldHeight - 1) % worldHeight) * worldWidth;
size_t y1 = y * worldWidth;
size_t y2 = ((y + 1) % worldHeight) * worldWidth;
size_t x1 = x;
size_t x0 = (x1 + worldWidth - 1) % worldWidth;
size_t x2 = (x1 + 1) % worldWidth;
// count alive cells around current square
uint count = HL_countAliveCells(d_data, x0, x1, x2, y0, y1, y2);
// compute if d_resultsData[y1 + x] is 0 or 1
if (d_data[(y * worldWidth) + x] == 1) {
if (count == 2 || count == 3)
d_resultData[(y * worldWidth) + x] = 1;
else
d_resultData[(y * worldWidth) + x] = 0;
} else {
if (count == 3 || count == 6)
d_resultData[(y * worldWidth) + x] = 1;
else
d_resultData[(y * worldWidth) + x] = 0;
}
}
// Launch the kernel for a number of iterations
bool HL_kernelLaunch(unsigned char** d_data,
unsigned char** d_resultData,
size_t worldWidth,
size_t worldHeight,
size_t iterationsCount,
ushort threadsCount)
{
dim3 threadsPerBlock(threadsCount);
dim3 blocksPerGrid(g_dataLength / threadsCount);
for (int i = 0; i < iterationsCount; i++) {
HL_kernel<<<blocksPerGrid, threadsPerBlock>>>(*d_data, worldWidth, worldHeight, *d_resultData);
cudaDeviceSynchronize();
HL_swap(d_resultData, d_data);
}
cudaDeviceSynchronize();
return true;
}
int main(int argc, char *argv[])
{
unsigned int pattern = 0;
unsigned int worldSize = 0;
unsigned int iterations = 0;
unsigned int blocksize = 0;
printf("This is the HighLife running in parallel on a GPU.\n");
if( argc != 5 )
{
printf("HighLife requires 3 arguments, 1st is pattern number, 2nd the sq size of the world and 3rd is the number of itterations, 4th is the blocksize e.g. ./highlife 0 32 2 8 \n");
exit(-1);
}
pattern = atoi(argv[1]);
worldSize = atoi(argv[2]);
iterations = atoi(argv[3]);
blocksize = atoi(argv[4]);
HL_initMaster(pattern, worldSize, worldSize);
// printf("AFTER INIT IS............\n");
// HL_printWorld(0);
cudaMemcpy(d_data, h_data, g_dataLength, cudaMemcpyHostToDevice);
HL_kernelLaunch(&d_data, &d_resultData, g_worldWidth, g_worldHeight, iterations, blocksize);
memset(h_data, 0, g_dataLength);
cudaMemcpy(h_data, d_data, g_dataLength, cudaMemcpyDeviceToHost);
// printf("######################### FINAL WORLD IS ###############################\n");
// HL_printWorld(iterations);
free(h_data);
cudaFree(d_data);
cudaFree(d_resultData);
return true;
}
|
12,151 | #include "includes.h"
__device__ int translate_idx(int ii, int d1, int d2, int d3, int d4, int scale_factor_t, int scale_factor_xy)
{
int x, y, t, z, w;
w = ii % d4;
ii = ii/d4;
z = ii % d3;
ii = ii/d3;
t = ii % d2;
ii = ii/d2;
y = ii % d1;
ii = ii/d1;
x = ii;
w = w/scale_factor_xy;
z = z/scale_factor_xy;
t = t/scale_factor_t;
d2 /= scale_factor_t;
d3 /= scale_factor_xy;
d4 /= scale_factor_xy;
return (((((x*d1+y)*d2)+t)*d3)+z)*d4+w;
}
__global__ void upscale(float *input, float *output, long no_elements, int scale_factor_t, int scale_factor_xy, int d1, int d2, int d3, int d4)
{
// output offset:
long ii = threadIdx.x + blockDim.x * blockIdx.x;
ii += threadIdx.y + blockDim.y * (blockDim.x * gridDim.x) * blockIdx.y;
if (ii >= no_elements) return;
int ipidx = translate_idx(ii, d1, d2, d3, d4, scale_factor_t, scale_factor_xy);
output[ii]=input[ipidx];
} |
12,152 | #include <stdio.h>
#include <cuda.h>
#include <iostream>
class dreference {
public:
dreference(int *memloc) {
this->memloc = memloc;
}
int operator ()() {
return getval();
}
int operator = (int newval) {
//printf("Writing %d at %p\n", newval, memloc);
cudaMemcpy(memloc, &newval, sizeof(int), cudaMemcpyHostToDevice);
return newval; // can return self-reference to allow cascaded =.
}
int getval() {
int val;
cudaMemcpy(&val, memloc, sizeof(int), cudaMemcpyDeviceToHost);
return val;
}
private:
int *memloc;
};
class dvector {
public:
dvector(unsigned size);
~dvector();
dreference operator [](unsigned ii);
void print();
private:
int *arr;
int size;
};
dvector::dvector(unsigned size) {
cudaMalloc(&arr, size * sizeof(int));
this->size = size;
//printf("arr points to %p\n", arr);
}
dvector::~dvector() {
cudaFree(arr);
arr = NULL;
}
dreference dvector::operator [](unsigned ii) {
return dreference(arr + ii);
}
void dvector::print() {
int aval;
for (int ii = 0; ii < size; ++ii) {
cudaMemcpy(&aval, arr + ii, sizeof(int), cudaMemcpyDeviceToHost);
std::cout << aval << ", ";
}
std::cout << std::endl;
}
std::ostream & operator <<(std::ostream &os, dreference dd) {
return os << dd.getval();
}
int main() {
dvector dv(10);
dv[0] = 1;
dv[1] = 2;
dv[5] = 2;
std::cout << dv[0] << ", " << dv[1] << std::endl;
dv.print();
return 0;
}
|
12,153 | #include <iostream>
#include <stdio.h>
#include <unistd.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <assert.h>
#include <fcntl.h>
#include <errno.h>
#include <vector>
#include <time.h>
#include <sys/time.h>
using std::cout;
using std::cerr;
using std::endl;
using std::cin;
using std::vector;
constexpr size_t IM_X = 1300;
constexpr size_t IM_Y = 600;
constexpr size_t IM_V = sizeof(float2);
constexpr size_t IM_SIZE = IM_X * IM_Y * IM_V;
constexpr size_t XSR = 10;
constexpr size_t YSR = 5;
__device__
inline float2 mul(float s, float2 v) {
v.x *= s;
v.y *= s;
return v;
}
__device__
inline float2 add(float2 v1, float2 v2) {
v1.x += v2.x;
v1.y += v2.y;
return v1;
}
__global__
void integrate(float2* out, cudaTextureObject_t vecs, float dt, size_t steps) {
float2 k1, k2, k3, k4, p, q;
// Initial position
p.x = blockIdx.x * blockDim.x + threadIdx.x;
p.y = blockIdx.y * blockDim.y + threadIdx.y;
// Output location
size_t idx = (blockDim.x * gridDim.x * (int)p.y + (int)p.x) * steps;
// Apply sample rate
p.x *= XSR;
p.y *= YSR;
// Initial output
out[idx++] = p;
// Integrate forward
for (size_t i = 1; i < steps; i++) {
k1 = mul(dt, tex2D<float2>(vecs, p.x, p.y));
q = add(p, mul(0.5, k1));
k2 = mul(dt, tex2D<float2>(vecs, q.x, q.y));
q = add(p, mul(0.5, k2));
k3 = mul(dt, tex2D<float2>(vecs, q.x, q.y));
q = add(p, k3);
k4 = mul(dt, tex2D<float2>(vecs, q.x, q.y));
p.x += (1.0/6.0)*(k1.x + 2*k2.x + 2*k3.x + k4.x);
p.y += (1.0/6.0)*(k1.y + 2*k2.y + 2*k3.y + k4.y);
out[idx++] = p;
}
}
__host__
cudaError_t checkCuda(cudaError_t result) {
if (result != cudaSuccess) {
cerr << "CUDA Runtime Error: " << cudaGetErrorString(result) << endl;
abort();
}
return result;
}
__host__
int checkLinux(int result) {
if (result == -1) {
cerr << "Linux Runtime Error: (" << errno << ") " << strerror(errno) << endl;
abort();
}
return result;
}
__host__
void writeCSV(char* file, float2* output, size_t num_particles, size_t steps) {
const size_t file_size = num_particles * steps * (20 + 9 + 9 + 3);
umask(0111);
int fd = checkLinux(open(file, O_RDWR | O_CREAT | O_TRUNC, 06666));
checkLinux(ftruncate(fd, file_size));
char* map = (char*) mmap(NULL, file_size, PROT_WRITE, MAP_SHARED, fd, 0);
checkLinux((int)(size_t)map);
char* cur = map;
const char* header = "line_id, coordinate_x, coordinate_y\n";
checkLinux(write(fd, header, strlen(header)));
for (size_t i = 0; i < num_particles; i++)
for (size_t s = 0; s < steps; s++) {
float2 p = output[i * steps + s];
cur += sprintf(cur, "%llu,%.7f,%.7f\n", i, p.x, p.y);
}
msync(map, file_size, MS_SYNC);
munmap(map, file_size);
checkLinux(ftruncate(fd, cur - map));
checkLinux(close(fd));
}
vector<const char*> names;
vector<timespec> wall;
vector<timespec> proc;
vector<size_t> levels;
size_t cur_level = 0;
__host__
static inline void stime(const char* name) {
timespec cur_wall, cur_proc;
clock_gettime(CLOCK_REALTIME, &cur_wall);
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cur_proc);
names.push_back(name);
levels.push_back(cur_level++);
wall.push_back(cur_wall);
proc.push_back(cur_proc);
}
__host__
static inline void ftime() {
timespec cur_wall, cur_proc;
clock_gettime(CLOCK_REALTIME, &cur_wall);
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cur_proc);
levels.push_back(--cur_level);
wall.push_back(cur_wall);
proc.push_back(cur_proc);
}
// from https://gist.github.com/diabloneo/9619917
__host__
static inline void timespecDiff(timespec& a, timespec& b, timespec& result) {
result.tv_sec = a.tv_sec - b.tv_sec;
result.tv_nsec = a.tv_nsec - b.tv_nsec;
if (result.tv_nsec < 0) {
--result.tv_sec;
result.tv_nsec += 1000000000L;
}
}
__host__
static inline double timespecToMs(const timespec& t) {
return (double)t.tv_sec * 1000.0 + (double)t.tv_nsec / 1000000.0;
}
__host__
static size_t ptime(const char* name, size_t n = 0, size_t i = 0, size_t l = 0) {
while (n < names.size() and levels[i] == l) {
size_t j = i + 1;
auto& sw = wall[i];
auto& sp = proc[i];
int jumped = j;
while (l < levels[j]) j++;
auto& fw = wall[j];
auto& fp = proc[j];
timespec w, p;
timespecDiff(fw, sw, w);
timespecDiff(fp, sp, p);
for (size_t k = 0; k < l; k++)
printf("\t");
printf("\"%s\", \"%s\", %.3f, %.3f\n",
name,
names[n++],
timespecToMs(w),
timespecToMs(p));
if (jumped < j)
n = ptime(name, n, jumped, l + 1);
i = j + 1;
}
return n;
}
__host__
int main(int argc, char **argv) {
stime("Program");
stime("Setup");
if (argc != 3) {
ftime();
ftime();
printf("Usage: ./main image output\n");
return 0;
}
float dt = 1;
//cout << "Enter delta time: ";
//cin >> dt;
size_t steps = 100;
//cout << "Enter number of steps: ";
//cin >> steps;
// Opening file
stime("Read input");
int fd = checkLinux(open(argv[1], O_RDONLY));
// Allocating + Mapping host memory
float2 *im;
cudaArray* im_d;
float2 *output_d;
float2 *output;
// Memory mapping does not provide a performance boost.
// It trades off between copy time to GPU or copy to RAM.
checkCuda(cudaMallocHost(&im, IM_SIZE));
checkLinux(read(fd, im, IM_SIZE));
close(fd);
ftime();
// Modified basic cuda texture manipulation obtained from
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html
// Allocate CUDA array in device memory
stime("Copy to GPU");
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 32, 0, 0,
cudaChannelFormatKindFloat);
checkCuda(cudaMallocArray(&im_d, &channelDesc, IM_X, IM_Y));
checkCuda(cudaMemcpyToArray(im_d, 0, 0, im, IM_SIZE, cudaMemcpyHostToDevice));
ftime();
// Specify texture
stime("Initialize Texture");
struct cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = im_d;
// Specify texture object parameters
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeBorder;
texDesc.addressMode[1] = cudaAddressModeBorder;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode = cudaReadModeElementType;
texDesc.maxAnisotropy = 2;
texDesc.normalizedCoords = false;
// Create texture object
cudaTextureObject_t imTex = 0;
checkCuda(cudaCreateTextureObject(&imTex, &resDesc, &texDesc, NULL));
ftime();
dim3 block(26, 24, 1);
dim3 grid(5, 5, 1);
// dim3 block(1, 24, 1);
// dim3 grid(1, 25, 1);
const size_t num_particles = block.x * grid.x * block.y * grid.y;
const size_t out_size = num_particles * sizeof(float2) * steps;
stime("Allocate Output");
checkCuda(cudaMalloc(&output_d, out_size));
ftime();
ftime();
stime("Computation");
integrate<<<grid, block>>>(output_d, imTex, dt, steps);
ftime();
// Copying from device to host
stime("Copy to host");
checkCuda(cudaMallocHost(&output, out_size));
checkCuda(cudaMemcpy(output, output_d, out_size, cudaMemcpyDeviceToHost));
ftime();
stime("Free device memory");
checkCuda(cudaFree(output_d));
checkCuda(cudaDestroyTextureObject(imTex));
checkCuda(cudaFreeArray(im_d));
ftime();
//stime("Write");
//writeCSV(argv[2], output, num_particles, steps);
//ftime();
stime("Free host memory");
checkCuda(cudaFreeHost(im));
checkCuda(cudaFreeHost(output));
ftime();
ftime();
ptime("GPU");
return 0;
} |
12,154 | #include <stdio.h>
__global__ void hello_kernel() {
// calculate global thread identifier, note blockIdx.x = 0 here
const int thid = blockDim.x * blockIdx.x + threadIdx.x;
// print a greeting message
printf("Hello from thread %d\n", thid);
}
int main(int argc, char* argv[]) {
// set the ID of the CUDA device
cudaSetDevice(0);
// invoke kernel using 4 threads executed in 1 thread block
hello_kernel<<<1, 4>>>();
// synchronize the GPU preventing premature termination
cudaDeviceSynchronize();
}
|
12,155 | #include<stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void helloFromGPU() {
if (threadIdx.x==5) {
printf("Hello World from GPU thread %d\n",threadIdx.x);
}
}
int main(int argc, char **argv) {
printf("Hello from cpu\n");
helloFromGPU<<<1, 10>>>();
cudaDeviceReset();
return 0;
} |
12,156 | #include <cuda.h>
#include <iostream>
#include <stdio.h>
#define N 1024
int a[N][N],b[N][N],c[N][N];
using namespace std;
__global__ void addMatrix(int a[][N], int b[][N], int c[][N], int n){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row<n && col<n)
c[row][col] = a[row][col] + b[row][col];
}
void random_int(int a[][N], int n)
{
int i,j;
for (i = 0; i < n; ++i)
for (j = 0; j < n; ++j)
a[i][j] = rand() % 101;
}
int main(void)
{
int (*pA)[N], (*pB)[N], (*pC)[N];
random_int(a,N);
random_int(b,N);
cudaMalloc((void**)&pA, (N*N)*sizeof(int));
cudaMalloc((void**)&pB, (N*N)*sizeof(int));
cudaMalloc((void**)&pC, (N*N)*sizeof(int));
cudaMemcpy(pA, a, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(pB, b, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(pC, c, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
dim3 dimBlock(64, 64);
dim3 dimGrid(N/dimBlock.x, N/dimBlock.y);
addMatrix<<<dimGrid,dimBlock>>>(pA,pB,pC,N);
cudaMemcpy(c, pC, (N*N)*sizeof(int), cudaMemcpyDeviceToHost);
int i, j;
/*
printf("C = \n");
for(i=0;i<N;i++){
for(j=0;j<N;j++){
printf("%d ", c[i][j]);
}
printf("\n");
}
*/
cudaFree(pA);
cudaFree(pB);
cudaFree(pC);
printf("\n");
return 0;
}
|
12,157 | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#define array_size 268435456
__global__ void vector_add(float *out, float *a, float *b, int n){
int index = threadIdx.x;
int stride = blockDim.x;
for(int i = index; i < n; i += stride){
out[i] = a[i] + b[i];}
}
extern double mysecond();
int main(){
float *a, *b, *out;
float *d_a, *d_b, *d_out;
double t;
// Allocate host memory
a = (float*)malloc(sizeof(float) * array_size);
b = (float*)malloc(sizeof(float) * array_size);
out = (float*)malloc(sizeof(float) * array_size);
// Initialize array
for(int i = 0; i < array_size; i++){
a[i] = 1.0f;
b[i] = 2.0f;
}
// Allocate device memory
cudaMalloc((void**)&d_a,sizeof(float)*array_size);
cudaMalloc((void**)&d_b,sizeof(float)*array_size);
cudaMalloc((void**)&d_out,sizeof(float)*array_size);
t = mysecond();
// Transfer data from host to device memory
cudaMemcpy(d_a,a, sizeof(float)*array_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b, sizeof(float)*array_size, cudaMemcpyHostToDevice);
t = (mysecond() - t);
printf ("\nElapsed time for copy from host to device = %g\n", t);
int block_size = 256;
t = mysecond();
// Vector addition
vector_add<<<1,block_size>>>(d_out, d_a, d_b, array_size);
cudaDeviceSynchronize();
t = (mysecond() - t);
printf ("\nElapsed time for vector addition in 1 block = %g\n", t);
t = mysecond();
// Transfer data from device to host memory
cudaMemcpy(out, d_out, sizeof(float)*array_size, cudaMemcpyDeviceToHost);
t = (mysecond() - t);
printf ("\nElapsed time for copy from device to host = %g\n", t);
// Deallocate device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_out);
// Deallocate host memory
free(a);
free(b);
free(out);
printf ("\nBLock size (number of threads): %d \n", block_size);
printf ("\nNumber of blocks : 1 \n");
}
double mysecond()
{
struct timeval tp;
struct timezone tzp;
gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6);
}
|
12,158 | #include "includes.h"
__global__ void cuda_kernel(double *A, double *B, double *C, int arraySize) {
// Get thread ID.
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Check if thread is within array bounds.
if (tid < arraySize) {
// Add a and b.
C[tid] = A[tid] + B[
tid];
}
} |
12,159 | #include "includes.h"
__device__ unsigned int concatenate(float* array)
{
unsigned int rvalue=0;
unsigned int sign;
for (int i = 0; i < 32; i++)
{
sign = (array[i]>=0);
rvalue = rvalue | (sign<<i);
}
return rvalue;
}
__global__ void concatenate_rows_kernel(float *a, unsigned int *b, int size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<size) b[i] = concatenate(&a[i*32]);
} |
12,160 | extern "C"
__device__
int rt_add(int x, int y) {
return x + y;
}
extern "C"
__device__
void *_rt_alloc_array(int elemSize, int length) {
int buffSz = elemSize * length + 8;
char *buff = (char *)malloc(buffSz);
memset(buff, 0, buffSz);
*((int *)&buff[0]) = length;
*((int *)&buff[4]) = elemSize;
return (void *)&buff[8];
}
|
12,161 | #ifndef PARTICLE_GPU_CU
#define PARTICLE_GPU_CU
struct Particle_gpu
{
int nId;
int x, y, z;
bool _bCollided[3];
float _currRho;
float _pos[3];
float _vel[3];
float _color[3];
float x2d;
float y2d;
};
#endif |
12,162 | /*Includes*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <sys/time.h>
/*Macros*/
#define VERTICES (1024) //number of vertices for graph
#define MIN_EDGES_VERTEX (25) //minimum no. of edges for each vertex
#define MAX_DIST (1000) //maximum possible distance
#define INF_DIST (10000000) //Initial "infinite" distance value for each node
#define BLOCKSIZE (256) //Threads per block
/*Variable to calculate time taken*/
struct timeval initial, final;
/*Function Prototypes*/
/*This function initializes array to a int value*/
void Initialize_Array(int* Input_Array,int Value);
/*This function initializes array to a float value*/
void Initialize_Dist_Array(float* Input_Array,float Value);
/*This function initializes graph*/
void Initialize_Graph(float* Graph,float Value);
/*This function assigns random distance between nodes*/
void Set_Graph_Dist_Random(float* Graph, int* Edges_Per_Vertex);
/*This function finds the next closest node serially*/
int Shortest_Distance_Node(float* Node_Shortest_Dist, int* Completed_Node);
/*This function finds the shortest path from source node to all nodes serially*/
void Shortest_Path_Computation_Serial(float* Graph, float* Node_Shortest_Dist, int* Parent_Node, int* Completed_Node, int Source,int* Edges_Per_Vertex);
/*This function calculates the time difference*/
double timetaken();
/*This function calculates the next closest node parallely*/
//__global__ void Shortest_Distance_Node_CUDA(float* Node_Shortest_Dist, int* Completed_Node,int* closest_node);
//This function was working working for 1 block of 1 thread which had a lot of overhead,
//couldn't make it work with multiple threads. For multiple threads, it gives an incorrect result.
/*This fuction calculates the shortest path from source node to all nodes parallely*/
__global__ void Shortest_Path_Computation_CUDA(float* Graph, float* Node_Shortest_Dist, int* Parent_Node, int* Completed_Node, int* closest_node);
int main(){
printf("Running Dijkstra Algorithm\n");
srand(8421);
/*Variables to initialize array and graphs*/
int Integer_Array = VERTICES * sizeof(int);
int Float_Array = VERTICES * sizeof(float);
int64_t Size_Graph = VERTICES * VERTICES * sizeof(float);
/*Host Memory Allocation*/
float* Graph = (float*)malloc(Size_Graph);
float* Node_Shortest_Dist_1 = (float*)malloc(Float_Array);
float* Node_Shortest_Dist_2 = (float*)malloc(Float_Array);
int* Parent_Node = (int*)malloc(Integer_Array);
int* Edges_Per_Vertex = (int*)malloc(Integer_Array);
int* Completed_Node = (int*)malloc(Integer_Array);
int* closest_node= (int*)malloc(sizeof(int));
/*Variables for device memory allocation*/
float* cuda_Graph;
float* cuda_Node_Shortest_Dist;
int* cuda_Parent_Node;
int* cuda_Completed_Node;
int* cuda_closest_node;
/*Device Memory Allocation*/
cudaMalloc((void**)&cuda_Graph,Size_Graph);
cudaMalloc((void**)&cuda_Node_Shortest_Dist,Float_Array);
cudaMalloc((void**)&cuda_Parent_Node,Integer_Array);
cudaMalloc((void**)&cuda_Completed_Node,Integer_Array);
cudaMalloc((void**)&cuda_closest_node,sizeof(int));
printf("\nVertices: %d", VERTICES);
printf("\nThreads Per Block: %d",BLOCKSIZE);
/*Take a random source value*/
int src=(rand()%VERTICES);
/*Get the start time for cpu computation*/
gettimeofday(&initial,NULL);
printf("\nPerforming CPU compuatation");
/*This fuction calculate the shortest path from source node to all node serially*/
Shortest_Path_Computation_Serial(Graph,Node_Shortest_Dist_1,Parent_Node,Completed_Node,src,Edges_Per_Vertex);
/*Get the stop time for cpu computation*/
gettimeofday(&final,NULL);
double diff=0;
/*Calculate the time taken*/
diff=timetaken();
printf("\nTime taken for logic computation by CPU in seconds is %0.6f",diff);
/*Clear the previous values completed node and parent node*/
/*This function initializes parent node to a initial value of -1*/
Initialize_Array(Parent_Node,(int)-1);
/*This function initializes completed node to a initial value of 0*/
Initialize_Array(Completed_Node,(int)0);
/*This function initializes Node_Shortest_Dist_2 to a very high initial value*/
Initialize_Dist_Array(Node_Shortest_Dist_2,INF_DIST);
Node_Shortest_Dist_2[src]=0;
closest_node[0]=-1;
/*Host to device transfer*/
/*Get the start time for host to device transfer*/
gettimeofday(&initial,NULL);
cudaMemcpy(cuda_Graph, Graph, Size_Graph, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_Node_Shortest_Dist, Node_Shortest_Dist_2, Float_Array, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_Parent_Node, Parent_Node, Integer_Array, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_Completed_Node, Completed_Node, Integer_Array, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_closest_node,closest_node, sizeof(int), cudaMemcpyHostToDevice);
/*Get the stop time for host to device transfer*/
gettimeofday(&final,NULL);
double diff2=0;
/*Calculate the time taken for host to device transfer*/
diff2=timetaken();
printf("\nTime taken for host to device transfer in seconds is %0.6f",diff2);
/*Get the start time for GPU computation*/
gettimeofday(&initial,NULL);
for(int i=0;i<VERTICES;i++){
/*This kernel launching 1 block of 1 thread at a time, although produced correct result but had a lot of overhead,
*if we increased the threads and blocks and modified the kernel function aacordingly , it produces an incorrect
* result,So in order to get correct result with less overhead, we find the next closest node serially, and find
* the shortest path from source node to all nodes parallely. This still has an overhead of transfering from
* host to device memory and vice versa repeatedly in a for loop. But this overhead is still less than launching
* kernel of 1 block of 1 thread repeatedly.*/
//Shortest_Distance_Node_CUDA <<<1,1>>>(cuda_Node_Shortest_Dist,cuda_Completed_Node,cuda_closest_node,node_distance,node);
/*This function calculate the closest node serially*/
closest_node[0]=Shortest_Distance_Node(Node_Shortest_Dist_2, Completed_Node);
cudaMemcpy(cuda_closest_node,closest_node, sizeof(int), cudaMemcpyHostToDevice);
/*This function calculates the shortest distance from source node to all other nodes parallely*/
Shortest_Path_Computation_CUDA <<<(VERTICES+BLOCKSIZE-1/BLOCKSIZE),BLOCKSIZE>>>(cuda_Graph,cuda_Node_Shortest_Dist,cuda_Parent_Node,cuda_Completed_Node,cuda_closest_node);
cudaMemcpy(Node_Shortest_Dist_2,cuda_Node_Shortest_Dist, Float_Array, cudaMemcpyDeviceToHost);
}
/*Get the stop time for GPU computation*/
gettimeofday(&final,NULL);
double diff1=0;
/*Caluclate the time taken for GPU computation*/
diff1=timetaken();
printf("\nTime taken for logic computation by GPU in seconds is %0.6f",diff1);
/*Device to host trasfer*/
/*Get the start time for device to host memory transfer*/
gettimeofday(&initial,NULL);
cudaMemcpy(Node_Shortest_Dist_2,cuda_Node_Shortest_Dist, Float_Array, cudaMemcpyDeviceToHost);
cudaMemcpy(Parent_Node, cuda_Parent_Node, Integer_Array, cudaMemcpyDeviceToHost);
cudaMemcpy(Completed_Node, cuda_Completed_Node, Integer_Array, cudaMemcpyDeviceToHost);
/*Get the stop time for device to host transfer*/
gettimeofday(&final,NULL);
/*Calculate the time taken for device to host transfer*/
double diff3=timetaken();
printf("\nTime taken for memory transfer from device to host in seconds is %0.6f",diff3);
printf("\nTotal time taken for memory transfer is %0.6f",(diff2+diff3));
/*Compare CPU and GPU result*/
int k=0;
int match=0;
for(k=0;k<VERTICES;k++){
if(Node_Shortest_Dist_1[k]==Node_Shortest_Dist_2[k]){
match++;
}
}
if(match==VERTICES){
printf("\nThe cpu and gpu results match\n");
}
/*Free host memory*/
free(Graph);
free(Node_Shortest_Dist_1);
free(Node_Shortest_Dist_2);
free(Parent_Node);
free(Completed_Node);
free(closest_node);
/*Free device memory*/
cudaFree(cuda_Graph);
cudaFree(cuda_Node_Shortest_Dist);
cudaFree(cuda_Parent_Node);
cudaFree(cuda_Completed_Node);
cudaFree(cuda_closest_node);
}
/*This function initializes graph*/
void Initialize_Graph(float* Graph,float Value){
int i,j;
for(i=0;i<VERTICES;i++){
for(j=0;j<VERTICES;j++){
Graph[i*VERTICES + j] = Value;
}
}
}
/*This function initializes array to a int value*/
void Initialize_Array(int* Input_Array,int Value){
int i;
for(i=0;i<VERTICES;i++){
Input_Array[i]=Value;
}
}
/*This function initializes array to a float value*/
void Initialize_Dist_Array(float* Input_Array,float Value){
int i;
for(i=0;i<VERTICES;i++){
Input_Array[i]=Value;
}
}
/*Ths function assigns random distance between nodes with a minimum of 25 edges per vertex*/
void Set_Graph_Dist_Random(float* Graph, int* Edges_Per_Vertex){
int i,Current_Edges,Random_Vertex;
float Random_Dist;
for(i=1;i<VERTICES;i++){
Random_Vertex = (rand() % i);
Random_Dist =(rand() % MAX_DIST) + 1;
Graph[Random_Vertex*VERTICES + i] = Random_Dist;
Graph[Random_Vertex + i*VERTICES] = Random_Dist;
Edges_Per_Vertex[i] += 1;
Edges_Per_Vertex[Random_Vertex] += 1;
}
for(i=0;i<VERTICES;i++){
Current_Edges = Edges_Per_Vertex[i];
while(Current_Edges < MIN_EDGES_VERTEX){
Random_Vertex = (rand() % VERTICES);
Random_Dist = (rand() % MAX_DIST) + 1;
if((Random_Vertex != i)&&(Graph[Random_Vertex + i*VERTICES] == 0)){
Graph[Random_Vertex + i*VERTICES] = Random_Dist;
Graph[Random_Vertex*VERTICES + i] = Random_Dist;
Edges_Per_Vertex[i] += 1;
Current_Edges += 1;
}
}
}
}
/*This function calculates the shortest path serially*/
void Shortest_Path_Computation_Serial(float* Graph, float* Node_Shortest_Dist, int* Parent_Node, int* Completed_Node, int Source,int* Edges_Per_Vertex){
/*Initialize array and graph*/
Initialize_Graph(Graph,(float)0);
Initialize_Array(Edges_Per_Vertex,(int)0);
Set_Graph_Dist_Random(Graph,Edges_Per_Vertex);
free(Edges_Per_Vertex);
Initialize_Array(Parent_Node,(int)-1);
Initialize_Array(Completed_Node,(int)0);
Initialize_Dist_Array(Node_Shortest_Dist,INF_DIST);
Node_Shortest_Dist[Source]=0;
int i,j;
for(i=0;i<VERTICES;i++){
/*This function finds the next closest node and returns it*/
int current_node=Shortest_Distance_Node(Node_Shortest_Dist,Completed_Node);
Completed_Node[current_node]=1;
for(j=0;j<VERTICES;j++){
int new_distance=Node_Shortest_Dist[current_node] + Graph[current_node*VERTICES + j];
if ((Completed_Node[j] != 1) && (Graph[current_node*VERTICES + j] != (float)(0)) && (new_distance < Node_Shortest_Dist[j])){
Node_Shortest_Dist[j] = new_distance;
Parent_Node[j] = current_node;
}
}
}
}
/*This function calculates the shortest path from source node to all other nodes in parallel*/
__global__ void Shortest_Path_Computation_CUDA(float* Graph, float* Node_Shortest_Dist, int* Parent_Node, int* Completed_Node, int* closest_node){
Completed_Node[closest_node[0]]=1;
int tid=blockIdx.x*blockDim.x+threadIdx.x;
if(tid>VERTICES)
return;
int current_node=closest_node[0];
int new_distance;
new_distance = Node_Shortest_Dist[current_node] + Graph[current_node*VERTICES + tid];
if ((Completed_Node[tid] != 1) && (Graph[current_node*VERTICES + tid] != (float)(0)) && (new_distance < Node_Shortest_Dist[tid])){ //each thread get different j & new_distance
Node_Shortest_Dist[tid] = new_distance;
Parent_Node[tid] = current_node;
}
}
/*This function calculates the shortest distance node serially*/
int Shortest_Distance_Node(float* Node_Shortest_Dist, int* Completed_Node){
int node_distance=INF_DIST;
int node=-1;
int i;
for(i=0;i<VERTICES;i++){
if((Node_Shortest_Dist[i]<node_distance) && (Completed_Node[i]==0)){
node_distance=Node_Shortest_Dist[i];
node=i;
}
}
Completed_Node[node]=1;
return node;
}
/* We tried different ways to find next closest node in parallel using multiple threads but failed to do that
*There is a race condition between threads to modify same variable node_distance and node, so we tried to find
*smallest distance for each block and then smallest distance among all block, but it gives an incorrect result
*The below function works correctly for 1 block of 1 thread*/
/*__global__ void Shortest_Distance_Node_CUDA(float* Node_Shortest_Dist,int* Completed_Node,int* closest_node){
int node_distance = INF_DIST;
int node = -1;
int tid= threadIdx.x;
int gid=blockIdx.x*blockDim.x+threadIdx.x;
int i;
for (i = 0; i < VERTICES; i++) {
if((Node_Shortest_Dist[gid] < node_distance[tid]) && (Completed_Node[gid] == 0)){
node_distance= Node_Shortest_Dist[gid];
node = gid;
}
}
Completed_Node[node]=1;
closest_node[0]=node;
}
*/
/*This function calculates the time difference in seconds*/
double timetaken(){
double initial_s,final_s;
double diff_s;
initial_s= (double)initial.tv_sec*1000000 + (double)initial.tv_usec;
final_s= (double)final.tv_sec*1000000 + (double)final.tv_usec;
diff_s=(final_s-initial_s)/1000000;
return diff_s;
}
|
12,163 | #include <cuda_runtime.h>
#include <stdio.h>
#include <time.h>
#include <unistd.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
__global__ void gpu_sleep(const int sleep_time)
{
int tmp = 0;
for (int i=sleep_time; i<sleep_time; i++)
tmp += i;
printf("GPU job threadId (%d) done, sleep for %d seconds.\n", threadIdx.x, sleep_time);
}
int main(int argc, char **argv)
{
// set up device.
int dev_count;
int dev = 0;
cudaDeviceProp dprop;
CHECK(cudaGetDeviceCount(&dev_count));
CHECK(cudaGetDeviceProperties(&dprop, dev));
printf("There are %d devices in the system. \n", dev_count);
printf("%s start at device %d: %s \n", argv[0], dev, dprop.name);
CHECK(cudaSetDevice(dev));
int sleep_time = 1;
if (argc > 1)
{
sleep_time = atoi(argv[1]);
}
int blocksize = 1;
if (argc > 2)
{
blocksize = atoi(argv[2]);
}
// execution configuration
dim3 block (blocksize);
dim3 grid (1);
// kernel: sleep.
gpu_sleep <<<grid, block>>> (sleep_time);
sleep(sleep_time);
// reset device.
CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
12,164 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
typedef long ulint;
typedef long long ulint64;
int banyakdata = 1024;
int dimensigrid = 8;
int dimensiblok = 128;
ulint modexp(ulint a, ulint b, ulint c) {
ulint64 s = a;
ulint64 ans = 1;
while (b != 0) {
if (b % 2 == 1) {
ans = ans * s % c;
b--;
}
b /= 2;
if (b != 0) {
s = s * s %c;
}
}
return ans;
}
void enkripsi(ulint m, ulint n, ulint *res) {
*res = m*m % n;
}
void dekripsi(ulint c, ulint p, ulint q, ulint pi, ulint qi, ulint n, ulint *res) {
ulint mp = modexp(c, (p+1)/4, p);
ulint mq = modexp(c, (q+1)/4, q);
*res = (pi * p * mq + qi * q * mp) % n;
*(res+1) = (pi * p * mq - qi * q * mp) % n;
}
void kernelenk(ulint *m, ulint n, ulint *res) {
for (int i = 0; i < banyakdata; i++)
{
enkripsi(m[i], n, res + i);
}
}
void kerneldek(ulint *c, ulint p, ulint q, ulint pi, ulint qi, ulint n, ulint *res) {
for (int i = 0; i < banyakdata; i++)
{
dekripsi(c[i], p, q, pi, qi, n, res + 2*i);
}
}
void enkripsiCUDA(ulint *m, ulint n, ulint *res) {
clock_t begin = clock();
kernelenk(m,n,res);
clock_t end = clock();
double time_spent = (double)(end - begin);
printf("Durasi : %f milliseconds\n", time_spent/1000);
}
void dekripsiCUDA(ulint *c, ulint p, ulint q, ulint pi, ulint qi, ulint n, ulint *res2) {
clock_t begin = clock();
kerneldek(c,p,q,pi,qi,n,res2);
clock_t end = clock();
double time_spent = (double)(end - begin);
printf("Durasi : %f milliseconds\n", time_spent/1000);
}
int *extendedEuclid (int a, int b){
int *dxy = (int *)malloc(sizeof(int) *3);
if (b ==0){
dxy[0] =a; dxy[1] =1; dxy[2] =0;
return dxy;
}
else{
int t, t2;
dxy = extendedEuclid(b, (a%b));
t =dxy[1];
t2 =dxy[2];
dxy[1] =dxy[2];
dxy[2] = t - a/b *t2;
return dxy;
}
}
void initenkripsi(ulint *m){
srand(2018);
for (int i = 0; i < banyakdata; i++) {
m[i] = rand() % 256;
}
}
int main(){
ulint *m, p, q, pi, qi, n, *res, *res2;
m = (ulint*)malloc(banyakdata * sizeof(ulint));
res = (ulint*)malloc(banyakdata * sizeof(ulint));
res2 = (ulint*)malloc(banyakdata * 2 * sizeof(ulint));
p = 13;
q = 23;
n = 299;
int *invers = extendedEuclid(p,q);
pi = invers[1];
qi = invers[2];
initenkripsi(m);
printf("<<<<<<<<<<<<<<Pesan Asli>>>>>>>>>>>>>>>\n");
for (int i = 0; i < 4; i++) {
printf("m[%d] = %ld\n", i, m[i]);
}
printf("m[...]\n");
printf("m[%d] = %ld\n", banyakdata-1, m[banyakdata-1]);
enkripsiCUDA(m,n,res);
printf("<<<<<<<<<<<<<<Hasil Enkripsi>>>>>>>>>>>>>>>\n");
for (int i = 0; i < 4; i++) {
printf("c[%d] = %ld\n", i, res[i]);
}
printf("c ...\n");
printf("c[%d] = %ld\n", banyakdata-1, res[banyakdata-1]);
dekripsiCUDA(res,p,q,pi,qi,n,res2);
printf("<<<<<<<<<<<<<<Hasil Dekripsi>>>>>>>>>>>>>>>\n");
for (int i = 0; i < 4; i++) {
printf("m[%d] = %ld m[%d] = %ld\n", 2*i, res2[2*i], 2*i+1, res2[2*i+1]);
}
printf("c ...\n");
printf("c[%d] = %ld c[%d] = %ld\n", banyakdata * 2-2, res2[banyakdata * 2-2], banyakdata *2-1,res2[banyakdata*2-1]);
free(m);
free(res);
free(res2);
return 0;
} |
12,165 | #include "includes.h"
__global__ void grayscale(unsigned char *src, unsigned char *dest, int width, int height, int nChannels) {
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(y < height && x < width) {
int pos = (y * width + x) * nChannels;
float r = src[pos + 2];
float g = src[pos + 1];
float b = src[pos + 0];
dest[pos + 2] = ((0.393f * r + 0.769f * g + 0.189f * b) > 255) ? 255 : (0.393f * r + 0.769f * g + 0.189f * b);
dest[pos + 1] = ((0.349f * r + 0.686f * g + 0.168f * b) > 255) ? 255 : (0.349f * r + 0.686f * g + 0.168f * b);
dest[pos + 0] = ((0.272f * r + 0.534f * g + 0.131f * b) > 255) ? 255 : (0.272f * r + 0.534f * g + 0.131f * b);
}
} |
12,166 | // FPS with local density condition
__global__ void farthestpointsamplingKernel (int b,int n,int m, float r, int minnum,const float * dataset,float * temp,int * idxs, float * cores){
if (m<=0)
return;
const int BlockSize=1024;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
__shared__ int num_neighbor[BlockSize];
const int BufferSize=3072;
__shared__ float buf[BufferSize*3];
int old=0; // The last sampled point id
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[j]=1e6;
}
for (int j=threadIdx.x;j<min(BufferSize,n)*3;j+=blockDim.x){
buf[j]=dataset[j];
}
__syncthreads();
int j=0;
while (j<m){
num_neighbor[threadIdx.x]=0;
int besti=0;
float best=-1;
float x1=dataset[old*3+0];
float y1=dataset[old*3+1];
float z1=dataset[old*3+2];
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[k];
float x2,y2,z2;
if (k<BufferSize){
x2=buf[k*3+0];
y2=buf[k*3+1];
z2=buf[k*3+2];
}else{
x2=dataset[k*3+0];
y2=dataset[k*3+1];
z2=dataset[k*3+2];
}
float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
if (d<=r*r){
num_neighbor[threadIdx.x]++;
}
float d2=min(d,td);
if (d2!=td)
temp[k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
num_neighbor[i1] = num_neighbor[i1] + num_neighbor[i2];
}
}
__syncthreads();
if (num_neighbor[0]>=minnum){
if (threadIdx.x==0){
idxs[j]=old;
cores[j*3+0]=dataset[old*3+0];
cores[j*3+1]=dataset[old*3+1];
cores[j*3+2]=dataset[old*3+2];
}
j++;
}
old=dists_i[0];
__syncthreads();
}
}
// Original code of FPS in PointNet++
__global__ void farthestpointsamplingallKernel(int b,int n,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){
if (m<=0)
return;
const int BlockSize=512;
__shared__ float dists[BlockSize];
__shared__ int dists_i[BlockSize];
const int BufferSize=3072;
__shared__ float buf[BufferSize*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
int old=0;
if (threadIdx.x==0)
idxs[i*m+0]=old;
for (int j=threadIdx.x;j<n;j+=blockDim.x){
temp[blockIdx.x*n+j]=1e38;
}
for (int j=threadIdx.x;j<min(BufferSize,n)*3;j+=blockDim.x){
buf[j]=dataset[i*n*3+j];
}
__syncthreads();
for (int j=1;j<m;j++){
int besti=0;
float best=-1;
float x1=dataset[i*n*3+old*3+0];
float y1=dataset[i*n*3+old*3+1];
float z1=dataset[i*n*3+old*3+2];
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float td=temp[blockIdx.x*n+k];
float x2,y2,z2;
if (k<BufferSize){
x2=buf[k*3+0];
y2=buf[k*3+1];
z2=buf[k*3+2];
}else{
x2=dataset[i*n*3+k*3+0];
y2=dataset[i*n*3+k*3+1];
z2=dataset[i*n*3+k*3+2];
}
float d=(x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
float d2=min(d,td);
if (d2!=td)
temp[blockIdx.x*n+k]=d2;
if (d2>best){
best=d2;
besti=k;
}
}
dists[threadIdx.x]=best;
dists_i[threadIdx.x]=besti;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dists[i1]<dists[i2]){
dists[i1]=dists[i2];
dists_i[i1]=dists_i[i2];
}
}
}
__syncthreads();
old=dists_i[0];
if (threadIdx.x==0)
idxs[i*m+j]=old;
}
}
}
// input: dataset (b,n,3), cores (b,m,3), dist (b,n), flag (b,n)
__global__ void knearkernel (int b,int n,int m,const float * dataset,float * cores,float * dist,int * flag){
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float x1 = dataset[k*3+0];
float y1 = dataset[k*3+1];
float z1 = dataset[k*3+2];
dist[k] = 1e3;
for (int i=0; i<m; i++){
float x2 = cores[i*3+0];
float y2 = cores[i*3+1];
float z2 = cores[i*3+2];
float d = (x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1);
if (d<dist[k]){
dist[k] = d;
flag[k] = i;
}
}
}
__syncthreads();
}
// input: dataset (b,n,3), cores (b,m,3), flag (b,n)
// temp_cluster: (m,1024,3), dist_temp: (m, 1024), dist_temp_id: (m, 1024), temp_x: (m, 1024), output_r: (b,m)
__global__ void rbskernel (int b,int n,int m,const float * dataset,float * cores,int * flag,float * output_r){
__shared__ float temp_x[1024];
__shared__ float temp_y[1024];
__shared__ float temp_z[1024];
__shared__ int temp_x_id[1024];
__shared__ int temp_y_id[1024];
__shared__ int temp_z_id[1024];
__shared__ float dist_temp[1024];
__shared__ int dist_temp_id[1024];
__shared__ float temp_cluster[1024*3];
//assign points to block
__shared__ int cnt;
// ** On cuda 11.1 and tensorflow 2.4.1: When blockIdx.x=0, block cannot update shared variable **
if (blockIdx.x>0){
if (threadIdx.x==0){
for (int k=0;k<n;k++){
if (blockIdx.x-1==flag[k]){
temp_cluster[cnt*3+0] = dataset[k*3+0];
temp_cluster[cnt*3+1] = dataset[k*3+1];
temp_cluster[cnt*3+2] = dataset[k*3+2];
cnt+=1;
}
}
}
__syncthreads();
// compute min/max xyz
if (threadIdx.x<cnt){
temp_x[threadIdx.x] = temp_cluster[threadIdx.x*3+0];
temp_y[threadIdx.x] = temp_cluster[threadIdx.x*3+1];
temp_z[threadIdx.x] = temp_cluster[threadIdx.x*3+2];
temp_x_id[threadIdx.x] = threadIdx.x;
temp_y_id[threadIdx.x] = threadIdx.x;
temp_z_id[threadIdx.x] = threadIdx.x;
}
else{
temp_x[threadIdx.x] = temp_cluster[0];
temp_y[threadIdx.x] = temp_cluster[1];
temp_z[threadIdx.x] = temp_cluster[2];
}
__syncthreads();
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2+0)<<u;
int i2=(threadIdx.x*2+1)<<u;
int i3=((threadIdx.x*2+1)<<u)-1;
int i4=((threadIdx.x*2+2)<<u)-1;
float min_x = min(temp_x[i1], temp_x[i2]);
float max_x = max(temp_x[i4], temp_x[i3]);
int x_i3_id = temp_x_id[i3];
if (min_x == temp_x[i2]){
temp_x_id[i1] = temp_x_id[i2];
}
if (max_x == temp_x[i3]){
temp_x_id[i4] = x_i3_id;
}
temp_x[i1] = min_x;
temp_x[i4] = max_x;
float min_y = min(temp_y[i1], temp_y[i2]);
float max_y = max(temp_y[i4], temp_y[i3]);
int y_i3_id = temp_y_id[i3];
if (min_y == temp_y[i2]){
temp_y_id[i1] = temp_y_id[i2];
}
if (max_y == temp_y[i3]){
temp_y_id[i4] = y_i3_id;
}
temp_y[i1] = min_y;
temp_y[i4] = max_y;
float min_z = min(temp_z[i1], temp_z[i2]);
float max_z = max(temp_z[i4], temp_z[i3]);
int z_i3_id = temp_z_id[i3];
if (min_z == temp_z[i2]){
temp_z_id[i1] = temp_z_id[i2];
}
if (max_z == temp_z[i3]){
temp_z_id[i4] = z_i3_id;
}
temp_z[i1] = min_z;
temp_z[i4] = max_z;
}
}
__syncthreads();
if (threadIdx.x==0){
float min_x_x = temp_cluster[temp_x_id[0]*3+0];
float min_x_y = temp_cluster[temp_x_id[0]*3+1];
float min_x_z = temp_cluster[temp_x_id[0]*3+2];
float max_x_x = temp_cluster[temp_x_id[1023]*3+0];
float max_x_y = temp_cluster[temp_x_id[1023]*3+1];
float max_x_z = temp_cluster[temp_x_id[1023]*3+2];
float min_y_x = temp_cluster[temp_y_id[0]*3+0];
float min_y_y = temp_cluster[temp_y_id[0]*3+1];
float min_y_z = temp_cluster[temp_y_id[0]*3+2];
float max_y_x = temp_cluster[temp_y_id[1023]*3+0];
float max_y_y = temp_cluster[temp_y_id[1023]*3+1];
float max_y_z = temp_cluster[temp_y_id[1023]*3+2];
float min_z_x = temp_cluster[temp_z_id[0]*3+0];
float min_z_y = temp_cluster[temp_z_id[0]*3+1];
float min_z_z = temp_cluster[temp_z_id[0]*3+2];
float max_z_x = temp_cluster[temp_z_id[1023]*3+0];
float max_z_y = temp_cluster[temp_z_id[1023]*3+1];
float max_z_z = temp_cluster[temp_z_id[1023]*3+2];
float d_x = (min_x_x-max_x_x)*(min_x_x-max_x_x)+(min_x_y-max_x_y)*(min_x_y-max_x_y)+(min_x_z-max_x_z)*(min_x_z-max_x_z);
float d_y = (min_y_x-max_y_x)*(min_y_x-max_y_x)+(min_y_y-max_y_y)*(min_y_y-max_y_y)+(min_y_z-max_y_z)*(min_y_z-max_y_z);
float d_z = (min_z_x-max_z_x)*(min_z_x-max_z_x)+(min_z_y-max_z_y)*(min_z_y-max_z_y)+(min_z_z-max_z_z)*(min_z_z-max_z_z);
float max_d = max(max(d_x,d_y),d_z);
output_r[(blockIdx.x-1)] = sqrt(max_d)/2.0;
if (max_d==d_x){
cores[(blockIdx.x-1)*3+0] = 0.5*(min_x_x+max_x_x);
cores[(blockIdx.x-1)*3+1] = 0.5*(min_x_y+max_x_y);
cores[(blockIdx.x-1)*3+2] = 0.5*(min_x_z+max_x_z);
}
if (max_d==d_y){
cores[(blockIdx.x-1)*3+0] = 0.5*(min_y_x+max_y_x);
cores[(blockIdx.x-1)*3+1] = 0.5*(min_y_y+max_y_y);
cores[(blockIdx.x-1)*3+2] = 0.5*(min_y_z+max_y_z);
}
if (max_d==d_z){
cores[(blockIdx.x-1)*3+0] = 0.5*(min_z_x+max_z_x);
cores[(blockIdx.x-1)*3+1] = 0.5*(min_z_y+max_z_y);
cores[(blockIdx.x-1)*3+2] = 0.5*(min_z_z+max_z_z);
}
}
__syncthreads();
// compute rbs
__shared__ int break_flag;
while (break_flag==0) {
float x0 = cores[(blockIdx.x-1)*3+0];
float y0 = cores[(blockIdx.x-1)*3+1];
float z0 = cores[(blockIdx.x-1)*3+2];
if (threadIdx.x<cnt){
float x1 = temp_cluster[threadIdx.x*3+0];
float y1 = temp_cluster[threadIdx.x*3+1];
float z1 = temp_cluster[threadIdx.x*3+2];
dist_temp[threadIdx.x] = (x0-x1)*(x0-x1)+(y0-y1)*(y0-y1)+(z0-z1)*(z0-z1);
dist_temp_id[threadIdx.x] = threadIdx.x;
}
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2+0)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dist_temp[i1]<dist_temp[i2]){
dist_temp[i1]=dist_temp[i2];
dist_temp_id[i1]=dist_temp_id[i2];
}
}
}
__syncthreads();
if (threadIdx.x==0){
float outlier_dist = sqrt(dist_temp[0]);
if (outlier_dist>output_r[blockIdx.x-1]){
int outlier_id = dist_temp_id[0];
float outlier_x = temp_cluster[outlier_id*3+0];
float outlier_y = temp_cluster[outlier_id*3+1];
float outlier_z = temp_cluster[outlier_id*3+2];
float coef = 0.5/outlier_dist*(outlier_dist-output_r[blockIdx.x-1]);
cores[(blockIdx.x-1)*3+0] = cores[(blockIdx.x-1)*3+0] + (outlier_x-cores[(blockIdx.x-1)*3+0])*coef;
cores[(blockIdx.x-1)*3+1] = cores[(blockIdx.x-1)*3+1] + (outlier_y-cores[(blockIdx.x-1)*3+1])*coef;
cores[(blockIdx.x-1)*3+2] = cores[(blockIdx.x-1)*3+2] + (outlier_z-cores[(blockIdx.x-1)*3+2])*coef;
output_r[blockIdx.x-1] = 1.05*0.5*(outlier_dist+output_r[blockIdx.x-1]);
}
else{
break_flag=1;
}
}
__syncthreads();
}
}
}
// input: dataset (b,n,3), cores (b,m,3), output_r: (b,m), dist2cores: (b,m,10240), max_temp: (b,m,10)
__global__ void updateradius(int b,int n,int m,const float * dataset,float * cores,float * output_r){
if (blockIdx.x>0){
__shared__ float dist2core[1024];
int cluster_id = 1e2;
float max_dist = 0.0;
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float x1 = dataset[k*3+0];
float y1 = dataset[k*3+1];
float z1 = dataset[k*3+2];
float dist_old = 1e3;
for (int i=0; i<m; i++){
float x0 = cores[i*3+0];
float y0 = cores[i*3+1];
float z0 = cores[i*3+2];
float dist = sqrt((x0-x1)*(x0-x1)+(y0-y1)*(y0-y1)+(z0-z1)*(z0-z1));
if (dist<dist_old){
cluster_id = i;
dist_old = dist;
}
}
if ( (cluster_id==(blockIdx.x-1)) && (dist_old>max_dist) ){
max_dist = dist_old;
}
}
dist2core[threadIdx.x] = max_dist;
for (int u=0;(1<<u)<blockDim.x;u++){
__syncthreads();
if (threadIdx.x<(blockDim.x>>(u+1))){
int i1=(threadIdx.x*2)<<u;
int i2=(threadIdx.x*2+1)<<u;
if (dist2core[i1]<dist2core[i2]){
dist2core[i1]=dist2core[i2];
}
}
}
__syncthreads();
if (threadIdx.x==0) {
output_r[blockIdx.x-1] = max(0.15,dist2core[0]);
}
}
}
// input: dataset (b,n,3), cores (b,m,3), output_r: (b,m), count: (b,m), local_region(b,m,1024,3)
__global__ void ballquery (int b,int n,int m,const float * dataset,float * cores,float * output_r,float * local_region,int * count){
__shared__ float dist2cores[10240];
if (blockIdx.x>0){
count[blockIdx.x-1] = 0;
float x0 = cores[(blockIdx.x-1)*3+0];
float y0 = cores[(blockIdx.x-1)*3+1];
float z0 = cores[(blockIdx.x-1)*3+2];
for (int k=threadIdx.x;k<n;k+=blockDim.x){
float x1 = dataset[k*3+0];
float y1 = dataset[k*3+1];
float z1 = dataset[k*3+2];
float d = (x0-x1)*(x0-x1)+(y0-y1)*(y0-y1)+(z0-z1)*(z0-z1);
dist2cores[k] = sqrt(d);
}
__syncthreads();
if (threadIdx.x==0){
for (int i=0;i<n;i++){
if (dist2cores[i]<=output_r[blockIdx.x-1]){
local_region[(blockIdx.x-1)*1024*3+count[blockIdx.x-1]*3+0]=dataset[i*3+0];
local_region[(blockIdx.x-1)*1024*3+count[blockIdx.x-1]*3+1]=dataset[i*3+1];
local_region[(blockIdx.x-1)*1024*3+count[blockIdx.x-1]*3+2]=dataset[i*3+2];
count[blockIdx.x-1] += 1;
}
}
}
__syncthreads();
}
}
void farthestpointsamplingallLauncher(int b,int n,int m,const float * inp,float * temp,int * out){
farthestpointsamplingallKernel<<<32,512>>>(b,n,m,inp,temp,out);
}
void farthestpointsamplingLauncher(int b,int n,int m,float r,int minnum,const float * dataset,float * temp,int * idxs,float * cores){
farthestpointsamplingKernel<<<1,1024>>>(b,n,m,r,minnum,dataset,temp,idxs,cores);
}
void samplegroupLauncher (int b,int n,int m,float r,int minnum,const float * dataset, float * temp, int * idxs,float * cores, float * dist, int * flag,
float * output_r, float * local_region, int * cnt){
farthestpointsamplingKernel<<<1,1024>>>(b,n,m,r,minnum,dataset,temp,idxs,cores);
knearkernel<<<1,1024>>>(b,n,m,dataset,cores,dist,flag);
rbskernel<<<m+1, 1024>>>(b,n,m,dataset,cores,flag,output_r);
updateradius<<<m+1, 1024>>>(b,n,m,dataset,cores,output_r);
ballquery<<<m+1, 1024>>>(b,n,m,dataset,cores,output_r,local_region,cnt);
}
|
12,167 | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void add_one_v1(int n, float* x) {
int i = threadIdx.x;
int j = threadIdx.y;
x[i + j * n] += 1;
}
__global__ void add_one_v2(int n, float* x) {
int left_up_of_block_x = blockIdx.x * blockDim.x;
int left_up_of_block_y = blockIdx.y * blockDim.y;
int i = left_up_of_block_x + threadIdx.x;
int j = left_up_of_block_y + threadIdx.y;
if (i < n) {
x[i + j * n] += 1;
}
}
void initialize_input(float* h_A, int n) {
for (int i = 0; i < n * n; i++) {
h_A[i] = i;
}
}
int main(void) {
int N = 16;
size_t size = N * N * sizeof(float);
// Allocate input vectors h_A in host memory
float* h_A = (float*)malloc(size);
initialize_input(h_A, N);
// Allocate vectors in device memory
float* d_A;
cudaMalloc(&d_A, size);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
add_one_v1<<<1, N>>>(N, d_A);
// Copy result from device memory to host memory
cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost);
printf("result: %f,%f,%f,%f\n", h_A[0], h_A[1], h_A[N * N - 2], h_A[N * N - 1]);
dim3 threadsPerBlock(8, 8);
dim3 numBlocks(N / threadsPerBlock.x, N / threadsPerBlock.y);
add_one_v2<<<numBlocks, threadsPerBlock>>>(N, d_A);
// Copy result from device memory to host memory
cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost);
printf("result: %f,%f,%f,%f\n", h_A[0], h_A[1], h_A[N * N - 2], h_A[N * N - 1]);
// Free device memory
cudaFree(d_A);
// Free host memory
free(h_A);
}
|
12,168 | // Berat Postalcioglu
/* OUTPUT
Enter number of children: 5
15
3
14
13
12
cpu average: 11.4
gpu average: 11.4
Average is calculated correctly for 5 children as 11.4
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdlib>
#include <ctime>
#include <iostream>
#include <cmath>
using namespace std;
void init_arr(int** arr, int size)
{
for (int i = 0; i < size; i++)
{
*(*arr + i) = rand() % 16 + 1;
cout << *(*arr + i) << endl;
}
}
double calculate_average_oncpu(int** arr, int size)
{
int total = 0;
for (int i = 0; i < size; i++)
{
total += *(*arr + i);
}
return (double)total / size;
}
__global__ void calculate_average_ongpu(int *arr, int *size, double *res)
{
int total = 0;
for (int i = 0; i < *size; i++)
{
total += arr[i];
}
*res = ((double)total / *size);
}
int main()
{
srand(time(NULL));
int *arr;
int size;
cout << "Enter number of children: ";
cin >> size;
arr = new int[size];
init_arr(&arr, size);
double cpu_av = calculate_average_oncpu(&arr, size);
cout << "cpu average: " << cpu_av << endl;
// cuda
int *gpu_arr, *gpu_size;
double *gpu_av;
cudaMalloc((void**)&gpu_arr, size * sizeof(int));
cudaMemcpy((void*)gpu_arr, (const void*)arr, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&gpu_size, sizeof(int));
cudaMemcpy((void*)gpu_size, (const void*)&size, sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&gpu_av, sizeof(double));
calculate_average_ongpu<<<1, 1>>> (gpu_arr, gpu_size, gpu_av);
double *_gpu_av = new double;
cudaMemcpy((void*)_gpu_av, (const void*)gpu_av, sizeof(double), cudaMemcpyDeviceToHost);
cout << "gpu average: " << *_gpu_av << endl;
if (*_gpu_av == cpu_av)
{
cout << "Average is calculated correctly for " << size << " children as " << cpu_av << endl;
}
}
|
12,169 | #include <assert.h>
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <cuda.h>
#include <cuda_runtime.h>
#include <sys/time.h>
const double EPS = 1.e-15;
const int CHUNK = 8;
__global__ void vecAdd(double *d_a, double *d_b, double *d_c, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < (N / CHUNK)) {
// int width = gridDim.x * blockDim.x;
int width = N / CHUNK;
for (int i = 0; i < CHUNK; i++)
if (idx % 2 == 0)
d_c[idx + i * width] = d_a[idx + i * width] + d_b[idx + i * width];
else
d_c[idx + i * width] = d_a[idx + i * width] - d_b[idx + i * width];
}
}
double timer() {
struct timeval tp;
struct timezone tzp;
gettimeofday(&tp, &tzp);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-06);
};
int main (int argc, char**argv) {
assert(argc == 2);
int n = atoi(argv[1]);
assert(n % CHUNK == 0);
//host vecs
double *h_a, *h_b, *h_c;
size_t bytes = n * sizeof(double);
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
for (int i = 0; i < n; i++) {
h_a[i] = sin(i) * sin(i);
h_b[i] = cos(i) * cos(i);
}
double t1 = timer();
for (int i = 0; i < n; i++) {
h_c[i] = h_a[i] + h_b[i];
}
double t2 = timer();
std::cout << "cpu time is: " << t2-t1 << std::endl;
//dev imput vecs
double *d_a, *d_b, *d_c;
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize = 1024;
int gridSize = ((n-1)/CHUNK)/blockSize + 1;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float ms = 0;
cudaEventElapsedTime(&ms, start, stop);
std::cout << "Gpu time is: " << ms << std::endl;
for (int i = 0; i < n; i++) {
if (abs(h_c[i]-h_a[i]-h_b[i]) >= EPS) {
std::cout << "CHECK FAILED!\t";
std::cout << "DIFFER: " << abs(h_c[i] - h_a[i] - h_b[i]) << std::endl;
break;
} // else std::cout << "CHECK PASSED" << std::endl;
}
std::cout << "Check completed!" << std::endl;
free(h_a);
free(h_b);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
12,170 | #include <stdlib.h>
#include "cuda_runtime.h"
#include "GPUCompute.cuh"
#define THREADS_IN_BLOCK (1024)
#define NUM_BLOCKS (512)
#define UNUSED(EXPR) do { (void)(EXPR); } while (0)
//#define _BEST_BLOCK_NUMBER
/**********************************************************************/
/**********************************************************************/
__global__ static void KernelSAXPY(float *p_input1, float *p_input2,
float value, int length)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
for (; i < length; i += gridDim.x * blockDim.x)
p_input2[i] = p_input1[i] * value + p_input2[i];
}
/**********************************************************************/
/**********************************************************************/
typedef struct
{
cudaStream_t stream;
float *p_dev_input1;
float *p_dev_input2;
cudaEvent_t start_including_copy;
cudaEvent_t stop_including_copy;
cudaEvent_t start_excluding_copy;
cudaEvent_t stop_excluding_copy;
} GPUComputeHandle;
/**********************************************************************/
int GPUSAXPYSynchronousDeepCopy(CUDAHandle handle, int length, float A,
float *p_input1, float *p_input2)
{
GPUComputeHandle *p_compute_handle;
p_compute_handle = (GPUComputeHandle*)handle;
if (NULL == p_compute_handle)
return -1;
float *p_host_input1 = p_input1;
float *p_host_input2 = p_input2;
cudaStream_t stream = p_compute_handle->stream;
float *p_dev_input1 = p_compute_handle->p_dev_input1;
float *p_dev_input2 = p_compute_handle->p_dev_input2;
cudaEventRecord(p_compute_handle->start_including_copy, stream);
cudaMemcpy(p_dev_input1, p_host_input1, length * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(p_dev_input2, p_host_input2, length * sizeof(float),
cudaMemcpyHostToDevice);
cudaEventRecord(p_compute_handle->start_excluding_copy, stream);
#ifdef _BEST_BLOCK_NUMBER
int block_num;
block_num = (length + (THREADS_IN_BLOCK - 1)) / THREADS_IN_BLOCK;
block_num *= THREADS_IN_BLOCK;
#else
int block_num = NUM_BLOCKS;
#endif
KernelSAXPY << < THREADS_IN_BLOCK, block_num, 0, stream >> > (
p_dev_input1, p_dev_input2, A, length);
cudaEventRecord(p_compute_handle->stop_excluding_copy, stream);
cudaMemcpy(p_host_input2, p_dev_input2, length * sizeof(float),
cudaMemcpyDeviceToHost);
cudaEventRecord(p_compute_handle->stop_including_copy, stream);
cudaEventSynchronize(p_compute_handle->stop_including_copy);
return 0;
}
/**********************************************************************/
int GPUSAXPYSynchronousZeroCopy(CUDAHandle handle, int length, float A,
float *p_input1, float *p_input2)
{
GPUComputeHandle *p_compute_handle;
p_compute_handle = (GPUComputeHandle*)handle;
if (NULL == p_compute_handle)
return -1;
float *p_host_input1 = p_input1;
float *p_host_input2 = p_input2;
cudaStream_t stream = p_compute_handle->stream;
float *p_dev_input1 = p_compute_handle->p_dev_input1;
float *p_dev_input2 = p_compute_handle->p_dev_input2;
cudaEventRecord(p_compute_handle->start_including_copy, stream);
cudaHostGetDevicePointer(&p_dev_input1, p_host_input1, 0);
cudaHostGetDevicePointer(&p_dev_input2, p_host_input2, 0);
cudaEventRecord(p_compute_handle->start_excluding_copy, stream);
#ifdef _BEST_BLOCK_NUMBER
int block_num;
block_num = (length + (THREADS_IN_BLOCK - 1)) / THREADS_IN_BLOCK;
block_num *= THREADS_IN_BLOCK;
#else
int block_num = NUM_BLOCKS;
#endif
KernelSAXPY << < THREADS_IN_BLOCK, block_num, 0, stream >> > (
p_dev_input1, p_dev_input2, A, length);
cudaEventRecord(p_compute_handle->stop_excluding_copy, stream);
cudaEventRecord(p_compute_handle->stop_including_copy, stream);
cudaEventSynchronize(p_compute_handle->stop_including_copy);
return 0;
}
/**********************************************************************/
int GPUSAXPYAsynchronousCopyHostToDevice(CUDAHandle handle,
int length, float A, float *p_input1, float *p_input2)
{
UNUSED(A);
GPUComputeHandle *p_compute_handle;
p_compute_handle = (GPUComputeHandle*)handle;
if (NULL == p_compute_handle)
return -1;
float *p_host_input1 = p_input1;
float *p_host_input2 = p_input2;
cudaStream_t stream = p_compute_handle->stream;
float *p_dev_input1 = p_compute_handle->p_dev_input1;
float *p_dev_input2 = p_compute_handle->p_dev_input2;
cudaEventRecord(p_compute_handle->start_including_copy, stream);
cudaMemcpyAsync(p_dev_input1, p_host_input1, length * sizeof(float),
cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(p_dev_input2, p_host_input2, length * sizeof(float),
cudaMemcpyHostToDevice, stream);
return 0;
}
/**********************************************************************/
int GPUSAXPYAsynchronousCompute(CUDAHandle handle,
int length, float A, float *p_input1, float *p_input2)
{
GPUComputeHandle *p_compute_handle;
p_compute_handle = (GPUComputeHandle*)handle;
if (NULL == p_compute_handle)
return -1;
float *p_host_input1 = p_input1;
float *p_host_input2 = p_input2;
cudaStream_t stream = p_compute_handle->stream;
float *p_dev_input1 = p_compute_handle->p_dev_input1;
float *p_dev_input2 = p_compute_handle->p_dev_input2;
cudaEventRecord(p_compute_handle->start_excluding_copy, stream);
#ifdef _BEST_BLOCK_NUMBER
int block_num;
int threads_in_block = THREADS_IN_BLOCK ;
block_num = (length + (threads_in_block - 1)) / threads_in_block;
block_num *= threads_in_block;
#else
int block_num = NUM_BLOCKS;
#endif
KernelSAXPY << < THREADS_IN_BLOCK, block_num, 0, stream >> > (
p_dev_input1, p_dev_input2, A, length);
cudaEventRecord(p_compute_handle->stop_excluding_copy, stream);
return 0;
}
/**********************************************************************/
int GPUSAXPYAsynchronousCopyDeviceToHost(CUDAHandle handle,
int length, float A, float *p_input1, float *p_input2)
{
UNUSED(A);
GPUComputeHandle *p_compute_handle;
p_compute_handle = (GPUComputeHandle*)handle;
if (NULL == p_compute_handle)
return -1;
float *p_host_input1 = p_input1;
float *p_host_input2 = p_input2;
cudaStream_t stream = p_compute_handle->stream;
float *p_dev_input1 = p_compute_handle->p_dev_input1;
float *p_dev_input2 = p_compute_handle->p_dev_input2;
cudaMemcpyAsync(p_host_input2, p_dev_input2, length * sizeof(float),
cudaMemcpyDeviceToHost, stream);
cudaEventRecord(p_compute_handle->stop_including_copy, stream);
return 0;
}
/**********************************************************************/
int GPUSAXPYAsynchronous(CUDAHandle handle,
int length, float A, float *p_input1, float *p_input2)
{
GPUComputeHandle *p_compute_handle;
p_compute_handle = (GPUComputeHandle*)handle;
if (NULL == p_compute_handle)
return -1;
GPUSAXPYAsynchronousCopyHostToDevice(handle,
length, A, p_input1, p_input2);
GPUSAXPYAsynchronousCompute(handle,
length, A, p_input1, p_input2);
GPUSAXPYAsynchronousCopyDeviceToHost(handle,
length, A, p_input1, p_input2);
return 0;
}
/**********************************************************************/
int WaitComputingDone(CUDAHandle handle)
{
GPUComputeHandle *p_compute_handle;
p_compute_handle = (GPUComputeHandle*)handle;
if (NULL == p_compute_handle)
return -1;
cudaEventSynchronize(p_compute_handle->stop_including_copy);
return 0;
}
/**********************************************************************/
bool IsComputeDone(CUDAHandle handle)
{
GPUComputeHandle *p_compute_handle;
p_compute_handle = (GPUComputeHandle*)handle;
if (NULL == p_compute_handle)
return false;
cudaError_t err;
err = cudaEventQuery(p_compute_handle->stop_including_copy);
if (cudaSuccess == err)
return true;
return false;
}
/**********************************************************************/
static __global__ void KernelWarmUp(void)
{
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
float ia, ib;
ia = ib = 0.0f;
ib += ia + tid;
}
/**********************************************************************/
CUDAHandle InitGPUCompute(int length)
{
GPUComputeHandle *p_compute_handle;
p_compute_handle = (GPUComputeHandle*)malloc(sizeof(GPUComputeHandle));
cudaError_t err;
err = cudaStreamCreate(&p_compute_handle->stream);
cudaMalloc(&p_compute_handle->p_dev_input1, length * sizeof(float));
cudaMalloc(&p_compute_handle->p_dev_input2, length * sizeof(float));
cudaEventCreate(&p_compute_handle->start_including_copy);
cudaEventCreate(&p_compute_handle->stop_including_copy);
cudaEventCreate(&p_compute_handle->start_excluding_copy);
cudaEventCreate(&p_compute_handle->stop_excluding_copy);
KernelWarmUp << < THREADS_IN_BLOCK, NUM_BLOCKS,
0, p_compute_handle->stream >> >();
cudaDeviceSynchronize();
return p_compute_handle;
}
/**********************************************************************/
void CloseGPUCompute(CUDAHandle handle)
{
GPUComputeHandle *p_compute_handle;
p_compute_handle = (GPUComputeHandle*)handle;
if (NULL == p_compute_handle)
return;
cudaEventDestroy(p_compute_handle->start_including_copy);
cudaEventDestroy(p_compute_handle->stop_including_copy);
cudaEventDestroy(p_compute_handle->start_excluding_copy);
cudaEventDestroy(p_compute_handle->stop_excluding_copy);
cudaError_t err;
err = cudaStreamDestroy(p_compute_handle->stream);
cudaFree(&p_compute_handle->p_dev_input1);
cudaFree(&p_compute_handle->p_dev_input2);
free(p_compute_handle);
}
/**********************************************************************/
void GetElaspedTime(CUDAHandle handle, float *p_elasped_time_including_copy_in_ms,
float *p_elasped_time_excluding_copy_in_ms)
{
GPUComputeHandle *p_compute_handle;
p_compute_handle = (GPUComputeHandle*)handle;
if (NULL == p_compute_handle)
return;
float elasped_time_including_copy_in_ms;
cudaEventElapsedTime(&elasped_time_including_copy_in_ms,
p_compute_handle->start_including_copy,
p_compute_handle->stop_including_copy);
float elasped_time_excluding_copy_in_ms;
cudaEventElapsedTime(&elasped_time_excluding_copy_in_ms,
p_compute_handle->start_excluding_copy,
p_compute_handle->stop_excluding_copy);
*p_elasped_time_including_copy_in_ms = elasped_time_including_copy_in_ms;
*p_elasped_time_excluding_copy_in_ms = elasped_time_excluding_copy_in_ms;
}
/**********************************************************************/
|
12,171 | #include "includes.h"
//
// Assignment 1: ParallelSine
// CSCI 415: Networking and Parallel Computation
// Spring 2017
// Name(s): Jaron Pollman
//
// Sine implementation derived from slides here: http://15418.courses.cs.cmu.edu/spring2016/lecture/basicarch
// standard imports
// problem size (vector length) N
static const int N = 12345678; //#of threads?
// Number of terms to use when approximating sine
static const int TERMS = 6; //# of blocks
// kernel function (CPU - Do not modify)
__global__ void paralellSine(float *input, float *output)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x; //Proper indexing of elements.
float value = input[idx];
float numer = input[idx] * input[idx] * input[idx];
int denom = 6;
int sign = -1;
for (int j=1; j<=TERMS; j++)
{
value += sign * numer/denom;
numer *= input[idx] * input[idx];
denom *= (2 * j + 2) * (2 * j + 3);
sign *= -1;
}
output[idx] = value;
} |
12,172 | #include <cuda_profiler_api.h>
#include <stdio.h>
#include <cuda.h>
#include <time.h>
#define BLOCK_SIZE 256
//#define Ni 4096 //input layers
//#define Nn 1024 //output layers
__global__
void kernel(float *vec, float *mat, float *out, const int Ni, const int Nn){
const unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
__shared__ float shared_vec[12000];
float sum=0.0f;
if(tid<Nn){
shared_vec[tid] = vec[tid];
__syncthreads();
#pragma unroll
for(int i=0; i<Ni; i++)
sum += shared_vec[i]*mat[(i*Nn)+tid];
__syncthreads();
out[tid]=sum;
}
}
// helper functions
void init_array(float *a, const int Ni, const int val);
void init_mat(float *a, const int Ni, const int Nn, const int val);
void print_array(float *a, const int Ni, char *d);
void print_mat(float *a, const int Ni, const int Nn, char *d);
int main (void) {
srand( time(NULL) );
//int Ni = 25088;
//int Nn = 4096;
int Ni = 4096;
int Nn = 1024;
float *a, *b, *c;
float *dev_a, *dev_b, *dev_c;
a=(float*)malloc(sizeof(float)*Ni);
b=(float*)malloc(sizeof(float)*Ni*Nn);
c=(float*)malloc(sizeof(float)*Nn);
init_array(a, Ni, 1.0f);
init_mat(b, Ni, Nn, 2.0f);
init_array(c, Nn, 0.0f);
/* printf("<<<<<<<<<< initial data:\n");
print_array(a, Ni, "in-vector");
print_mat(b, Ni, Nn, "matrix");
print_array(c, Nn, "out-vector");
*/
//allocate device memory
cudaMalloc((void**)&dev_a, sizeof(float)*Ni);
cudaMalloc((void**)&dev_b, sizeof(float)*Ni*Nn);
cudaMalloc((void**)&dev_c, sizeof(float)*Nn);
//transfer host to device memory
cudaMemcpy(dev_a, a, sizeof(float)*Ni, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, sizeof(float)*Ni*Nn, cudaMemcpyHostToDevice);
// printf("\n\nRunning Kernel...\n\n");
kernel<<<Nn/256+1, 256>>>(dev_a, dev_b, dev_c, Ni, Nn);
//printf("error code: %s\n",cudaGetErrorString(cudaGetLastError()));
//cudaDeviceSynchronization();
//get output from device to host
cudaMemcpy(c, dev_c, sizeof(float)*Nn, cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
free(a);
free(b);
free(c);
// printf(">>>>>>>>>> final data:\n");
// print_array(c, Nn, "out-vector");
cudaError_t err = cudaGetLastError(); // add
if (err != cudaSuccess)
printf("CUDA Error: %s;", cudaGetErrorString(err));
cudaProfilerStop();
return 0;
};
void init_array(float *a, const int Ni, const int val) {
int i;
for(i=0; i<Ni; i++)
a[i] = val;
}
void init_mat(float *a, const int Ni, const int Nn, const int val) {
int i, j;
for(i=0; i<Ni; i++)
for(j=0; j<Nn; j++)
a[i*Nn+j] = val;
}
void print_array(float *a, const int Ni, char *d) {
int i;
for(i=0; i<Ni; i++)
printf("\n%s[%d]: %f",d, i, a[i]);
printf("\n");
}
void print_mat(float *a, const int Ni, const int Nn, char *d) {
int i, j;
for(i=0; i<Ni; i++){
printf("\n%s[%d]:", d, i);
for (j=0; j<Nn; j++)
printf("\t%6.4f", a[i*Nn+j]);
}
printf("\n");
}
|
12,173 | /*
Our source have format ARGB per pixel, so we can test two approches here.
1. Kernel calculates all values per pixel
2. Kernel calculates one value (A/R/G/B) separately
*/
extern "C" __device__
void
sort(unsigned char src[], int size = 9)
{
// as long we have only 9 elements there is no reason to implement more efficient sorting algorithm than bubble sort.
unsigned char temp;
for (int i = 0; i < size - 1; ++i)
{
for(int j = 0; j < size - i - 1; ++j)
{
if(src[j] > src[j+1])
{
temp = src[j];
src[j] = src[j+1];
src[j+1] = temp;
}
}
}
}
// We want to have one kernel per pixel. Not for per color. So we need move every i * 4;
extern "C" __global__ void
CalculatePerPixel(const unsigned char *source, unsigned char *destination, int totalSize, int height, int width, int stride)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
i *= 4;
if(i > totalSize) return;
//Our (X,Y)
int h = i / stride;
int w = (i - h * stride) / 4;
if(h+1 >= height || h - 1 < 0) return;
if(w+1 >= width || w - 1 < 0) return;
unsigned char A[9];
unsigned char R[9];
unsigned char G[9];
unsigned char B[9];
//GetKernel
//byte B, byte G, byte R, byte A
int pos = 0;
int index = 0;
for(int y = h - 1; y <= h + 1; ++y)
{
for(int x = w - 1; x <= w + 1; ++x)
{
pos = y * stride + 4 * x;
B[index] = source[pos];
G[index] = source[pos+1];
R[index] = source[pos+2];
A[index] = source[pos+3];
index++;
}
}
sort(A);
sort(R);
sort(B);
sort(G);
*(destination + i++) = B[4];
*(destination + i++) = G[4];
*(destination + i++) = R[4];
*(destination + i++) = A[4];
}
|
12,174 | /*
Copyright (c) 2015-present Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
//#include<iostream>
#include <stdint.h>
#include <stdio.h>
#include <iostream>
#include <cuda_runtime.h>
#include <cooperative_groups.h>
#define CHECK(cmd) \
{\
cudaError_t error = cmd;\
if (error != cudaSuccess) { \
fprintf(stderr, "error: '%s'(%d) at %s:%d\n", cudaGetErrorString(error), error,__FILE__, __LINE__); \
exit(EXIT_FAILURE);\
}\
}
__device__ void tile_partition(cooperative_groups::thread_group g)
{
cooperative_groups::thread_group tile8 = cooperative_groups::tiled_partition(g,16);
//printf("tile8 size is: %d\n",tile8.size());
size_t offset = (blockIdx.x * blockDim.x + threadIdx.x);
//printf("offset: %d\n",offset);
printf("thread rank is: %d\n",tile8.thread_rank());
if(offset<8)
//if(tile8.thread_rank() <8)
{
//__syncthreads();
tile8.sync();
printf("I am after tile8.sync()\n");
printf("I am in offset<8\n");
}
else if((offset>7) && (offset<16))
{
printf("I am in offset<16\n");
}
else if((offset>15) && (offset<24))
{
printf("I am in offset<24\n");
}
tile8.sync();
//__syncthreads();
}
__global__ void
//vector_square(float *C_d, float *A_d, size_t N)
thread_partition()
{
/* cooperative_groups::grid_group grid = cooperative_groups::this_grid();
unsigned int rank = grid.thread_rank();
unsigned int grid_size = grid.size();*/
cooperative_groups::thread_group g = cooperative_groups::this_thread_block();
tile_partition(g);
size_t offset = (blockIdx.x * blockDim.x + threadIdx.x);
}
int main(int argc, char *argv[])
{
CHECK(cudaSetDevice(2));
// float *A_d, *C_d;
// float *A_h, *C_h;
//size_t N = 1000000;
size_t N = 32;
size_t Nbytes = N * sizeof(float);
cudaDeviceProp props;
CHECK(cudaGetDeviceProperties(&props, 0/*deviceID*/));
printf ("info: running on device %s\n", props.name);
/* printf ("info: copy Host2Device\n");
CHECK ( cudaMemcpy(A_d, A_h, Nbytes, cudaMemcpyHostToDevice));
int max_blocks_per_sm;
CHECK( cudaOccupancyMaxActiveBlocksPerMultiprocessor(&max_blocks_per_sm,
vector_square, 32, 0));*/
const unsigned threadsPerBlock = 64;
const unsigned blocks = N/threadsPerBlock;
printf ("info: launch 'vector_square' kernel\n");
// vector_square <<<blocks, threadsPerBlock>>> (C_d, A_d, N);
//CHECK(cudaDeviceSynchronize());
void *coop_params=NULL;
/*coop_params[0]=(void*)&C_d,
coop_params[1]=(void*)&A_d;
coop_params[2]=(void*)&N;
cudaStream_t stream;
CHECK(cudaStreamCreate(&stream));*/
cudaError_t errval=(cudaLaunchCooperativeKernel((void*)thread_partition,blocks,threadsPerBlock,&coop_params,0,0));
//cudaError_t errval=(cudaLaunchCooperativeKernel((void*)thread_partition,blocks,threadsPerBlock,NULL,0,0));
CHECK(cudaDeviceSynchronize());
std::cout<<"errval: "<<cudaGetErrorString(errval)<<std::endl;
if (errval != cudaSuccess)
{
std::cout << "CUDA error: " << cudaGetErrorString(errval);
std::cout << std::endl;
std::cout << " Location: " << __FILE__ << ":" << __LINE__ << std::endl;
exit(errval);
}
printf ("DONE!\n");
return 0;
}
|
12,175 | #include "device_launch_parameters.h"
__global__
void doubleToFloatMemCpyKernel(int n, int coord, double* source, float* target) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) {
target[i*3 + coord] = source[i];
}
}
__global__
void elementWiseMultiplyKernel(int n, double* a, double* b) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) {
a[i] = a[i] * b[i];
}
}
void doubleToFloatDeviceCpy(int n, int coord, double* source, float* target) {
doubleToFloatMemCpyKernel<<< (n + 255) / 256, 256 >>>(n, coord, source, target);
}
void elementWiseMultiply(int n, double * a, double * b)
{
elementWiseMultiplyKernel<<< (n + 255) / 256, 256 >>> (n, a, b);
}
|
12,176 | #include "includes.h"
__global__ void ApplyBrainsMovement( float *CCXY, int dim_XY, float *movement, int dim_movement, int max_clusters ){
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id<max_clusters){
//--- move in XY
if (dim_movement>=2){
CCXY[id*dim_XY] -= movement[0];
CCXY[id*dim_XY+1] -= movement[1];
}
//--- apply rotation in X
if (dim_movement>=3){
}
}
} |
12,177 | #include "includes.h"
__global__ void add(int *a, int *b, int *c) {
//c[threadIdx.x]= a[threadIdx.x]+ b[threadIdx.x];
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
} |
12,178 | #include <cstdio>
#include <cstdlib>
#include <vector>
__global__ void init(int *a, int n){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n) return;
a[i] = 0;
}
__global__ void packing(int *key, int *bucket, int n){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n) return;
atomicAdd(&bucket[key[i]], 1);
}
__global__ void scan(int *a, int *b, int n){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n) return;
for(int j = 1;j < n;j <<= 1){
b[i] = a[i];
__syncthreads();
if(i-j >= 0) a[i] += b[i-j];
__syncthreads();
}
}
__global__ void unpacking(int *key, int *bucket, int n, int range){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n) return;
int top = range-1;
int bottom = -1;
int middle;
while(top-bottom > 1){
middle = (top+bottom)/2;
if(i >= bucket[middle]) bottom = middle;
else top = middle;
}
key[i] = top;
}
int main() {
int n = 50;
const int m = 1024;
int range = 5;
int *key;
cudaMallocManaged(&key, n*sizeof(int));
for (int i=0; i<n; i++) {
key[i] = rand() % range;
printf("%d ",key[i]);
}
printf("\n");
int *bucket;
cudaMallocManaged(&bucket, range*sizeof(int));
init<<<(n+m-1)/m, m>>>(bucket, n);
packing<<<(n+m-1)/m, m>>>(key, bucket, n);
int *scan_mem;
cudaMallocManaged(&scan_mem, range*sizeof(int));
scan<<<(range+m-1)/m, m>>>(bucket, scan_mem, range);
unpacking<<<(n+m-1)/m, m>>>(key, bucket, n, range);
cudaDeviceSynchronize();
for (int i=0; i<n; i++) {
printf("%d ",key[i]);
}
printf("\n");
}
|
12,179 | //pass
//--warp-sync=32 --blockDim=32 --gridDim=1 --equality-abstraction --no-inline
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#define N 2//32
__global__ void foo(int * A, int * B) {
A[threadIdx.x] = 1;
volatile int x = A[threadIdx.x];
B[threadIdx.x] = 1;
volatile int y = A[threadIdx.x];
assert(x==y);
}
int main(){
int *a, *b;
int *dev_a, *dev_b;
int size = N*sizeof(int);
cudaMalloc((void**)&dev_a, size);
cudaMalloc((void**)&dev_b, size);
a = (int*)malloc(size);
b = (int*)malloc(size);
for (int i = 0; i < N; i++)
a[i] = 0;
for (int i = 0; i < N; i++)
b[i] = 2;
cudaMemcpy(dev_a,a,size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,size, cudaMemcpyHostToDevice);
foo<<<1,N>>>(dev_a, dev_b);
//ESBMC_verify_kernel(foo, 1, N, dev_a, dev_b);
cudaMemcpy(a,dev_a,size,cudaMemcpyDeviceToHost);
cudaMemcpy(b,dev_b,size,cudaMemcpyDeviceToHost);
free(a); free(b);
cudaFree(dev_a); cudaFree(dev_b);
return 0;
}
|
12,180 | #include<stdio.h>
#include<cuda.h>
#include<iostream>
#include<fstream>
#include<chrono>
using namespace std;
__global__ void parallelReduction(int *d_array , int numberOfElements, int elementsPerThread,int numberOfThreadsPerBlock,int numberOfBlocks,int *d_global)
{
int index = blockIdx.x * blockDim.x + threadIdx.x ;
int sum = 0;
int j=0;
for(int i=index;i<numberOfElements;i = i+(numberOfBlocks*numberOfThreadsPerBlock))
{
sum = sum + d_array[i];
j++;
}
d_global[index] = sum;
}
void parallelReduceHost(int *h_array ,int *d_array ,int numberOfElements,int elementsPerThread , int numberOfThreadsPerBlock , int numberOfBlocks)
{
int *d_global;
cudaMalloc(&d_global, sizeof(int)*numberOfBlocks*numberOfThreadsPerBlock);
parallelReduction<<<numberOfBlocks,numberOfThreadsPerBlock>>>(d_array,numberOfElements,elementsPerThread,numberOfThreadsPerBlock,numberOfBlocks,d_global);
int *d_global1;
cudaMalloc(&d_global1,sizeof(int)*numberOfThreadsPerBlock*numberOfBlocks);
parallelReduction<<<numberOfBlocks,numberOfThreadsPerBlock>>>(d_global,2560*64,elementsPerThread,64,80,d_global1);
int *h_global = new int[64*80];
cudaMemcpy(h_global,d_global1,sizeof(int)*64*80,cudaMemcpyDeviceToHost);
int sum=0;
for(int i=0;i<64*80;i++)
{
sum =sum+h_global[i];
}
cout<<sum;
}
int main()
{
int numberOfElements;
ifstream inFile;
inFile.open("random");
int x;
int i=0;
inFile >>x ;
numberOfElements = x;
int *h_array = new int[numberOfElements];
while(inFile >> x)
{
h_array[i] = x;
i++;
}
int *d_array;
cudaMalloc(&d_array , sizeof(int)*numberOfElements);
cudaMemcpy(d_array, h_array , sizeof(int)*numberOfElements, cudaMemcpyHostToDevice);
//serialReduceHost(h_array, d_array ,numberOfElements);
int elementsPerThread, numberOfBlocks , numberOfThreadsPerBlock;
elementsPerThread = 0 ;
numberOfThreadsPerBlock = 64;
numberOfBlocks =2560;
parallelReduceHost(h_array,d_array,numberOfElements,elementsPerThread,numberOfThreadsPerBlock ,numberOfBlocks);
}
|
12,181 | #include <stdio.h>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <cstdlib>
#include <ctime>
#include <iostream>
#define TILE_WIDTH 4
__global__ void matmulNBlok(float* matA, float* matB, float* matC, int width){
int row = blockIdx.y * TILE_WIDTH + threadIdx.y;
int col = blockIdx.x * TILE_WIDTH + threadIdx.x;
float pVal = 0;
for(int i=0; i<width; ++i){
float elementMatA = matA[row*width+i];
float elementMatB = matB[i*width+col];
pVal += elementMatA * elementMatB;
}
matC[threadIdx.y*width+threadIdx.x] = pVal;
}
void matriksMulHost(float* M, float* N, float* P, int Width){
for(int i=0; i<Width; i++){
for(int j=0; j<Width; j++){
float sum = 0;
for(int k=0; k<Width; k++){
float a = M[i*Width + k];
float b = N[k*Width + j];
sum += a * b;
}
P[i*Width+j] = sum;
}
}
}
void matriksMulNBlok(float* mA, float* mB, float* mC, int width){
//Device pointer
float* a_d, *b_d, *c_d;
//Matriks size
int size = width * width *sizeof(float) ;
//allocate dan copy matriks a
int cudaError = cudaMalloc((void**)&a_d, size);
if (cudaError != cudaSuccess)
{
fprintf(stderr, "Error invoking cudaMemcpy (ERRCODE %d)\n", cudaError);
}
fprintf(stderr, "cudaMemcpy (ERRCODE %d)\n", cudaError);
cudaMemcpy(a_d, mA, size , cudaMemcpyHostToDevice );
//allocate dan copy matriks b
cudaMalloc((void**)&b_d, size);
cudaMemcpy(b_d, mB, size , cudaMemcpyHostToDevice );
//allocate memory to device c
cudaMalloc((void**)&c_d, size);
dim3 dimGrid(TILE_WIDTH/width, TILE_WIDTH/width);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
matmulNBlok<<<dimGrid,dimBlock>>>(a_d,b_d,c_d,width);
cudaMemcpy(mC,c_d,size, cudaMemcpyDeviceToHost );
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
}
int main(void){
void matriksMulNBlok(float *, float *, float *, int);
const int width = 4;
float* M, *N, *P;
size_t size = width * width *sizeof(float);
// allocate arrays on host
M = (float *) malloc(size);
N = (float *) malloc(size);
P = (float *) malloc(size);
// float M[width*width], N[width*width], P[width*width];
for(int i = 0; i < (width*width) ; i++) {
M[i] = i;
N[i] = width*width - i;
P[i] = 0.f;
printf("%.3f %.3f %.3f\n", M[i], N[i], P[i]);
}
matriksMulNBlok(M, N, P, width);
for(int i = 0; i < (width*width) ; i++) {
if( i%width ==0){
printf("\n");
}
printf("%.3f ", P[i]);
}
free(M);
free(N);
free(P);
return 0;
}
|
12,182 | #include <stdio.h>
#include <errno.h>
#include "cs_header.h"
#include "cs_dbg.h"
#define CUDA_DBG
#define DBG_BUF_SIZE (1024 * 1024)
int *dbg_bufp, dbg_size ;
static char dbg_msg[ 200 ] ;
void dbg_pdata_ll( const char *s, long long *dp, int size ) ;
int
dbg_init( int size )
{
if (!( dbg_bufp = ( int * ) malloc ( size )))
{
fprintf( stderr, "dbg_init: malloc failed(%d): %s \n", errno, strerror(errno) ) ;
return ( 0 ) ;
}
dbg_size = size ;
return ( 1 ) ;
}
void
dbg_clear_buf( int *cp, int size )
{
memset ( cp, 0, size ) ;
}
void
dbg_set_buf( int *cp, int size, int set )
{
while ( size-- )
*cp++ = set++ ;
}
void
dbg_p_d_data_ll ( const char *s, long long *dp, int size )
{
fprintf( stderr, "%s: %s size %d dp %p\n",
__func__, s, size, dp ) ;
if ( size > dbg_size )
size = dbg_size ;
fprintf( stderr, "%s: %s size %d dp %p\n",
__func__, s, size, dp ) ;
dbg_get_d_data (( char *)dp, ( char *)dbg_bufp, size ) ;
size /= sizeof ( long long ) ;
dbg_pdata_ll ( s, ( long long *)dbg_bufp, size ) ;
}
void
dbg_p_data_i_mn ( const char *s, int *dp, int size, int m, int n, int doprint )
{
int *otp, *tp, i, j ;
fprintf( stderr, "%s: %s size %d m %d n %d doprint %d dp %p\n",
__func__, s, size, m, n, doprint, dp ) ;
size /= ( m * n ) ;
otp = dp ;
while ( size-- )
{
for ( i = 0 ; i < n ; i++ )
{
tp = otp ;
for ( j = 0 ; j < doprint ; j++ )
printf("%d ", *tp++ ) ;
printf("\n") ;
otp += m ;
}
printf("\n") ;
}
}
void
dbg_p_data_md_f_mn ( const char *s, int *dp, int size, int m, int n, int doprint )
{
int *otp, *tp, i, j ;
float *fp ;
fprintf( stderr, "%s: %s size %d m %d n %d doprint %d dp %p\n",
__func__, s, size, m, n, doprint, dp ) ;
size /= ( m * n ) ;
otp = dp ;
while ( size-- )
{
for ( i = 0 ; i < n ; i++ )
{
tp = otp ;
for ( j = 0 ; j < doprint ; j++ )
{
if (!(( j + 1 ) % 4 ))
{
fp = ( float *)tp++ ;
printf("%f ", *fp ) ;
} else
printf("%d ", *tp++ ) ;
}
printf("\n") ;
otp += m ;
}
printf("\n") ;
}
}
void
dbg_p_d_data_c_mn ( const char *s, char *dp, int size, int m, int n, int doprint )
{
char *otp, *tp ;
int i, j ;
if ( size > dbg_size )
size = dbg_size ;
fprintf( stderr, "%s: %s size %d m %d n %d dp %p\n",
__func__, s, size, m, n, dp ) ;
dbg_get_d_data (( char *)dp, ( char *)dbg_bufp, size ) ;
size /= ( m * n ) ;
otp = ( char *)dbg_bufp ;
while ( size-- )
{
for ( i = 0 ; i < n ; i++ )
{
tp = otp ;
for ( j = 0 ; j < doprint ; j++ )
printf("%d ", *tp++ ) ;
printf("\n") ;
otp += m ;
}
printf("\n") ;
}
}
void
dbg_p_d_data_i_mn_skip ( const char *s, int *dp, int size, int m, int n, int z, int doprint, int perm_size )
{
int ii, *otp, *fp, *tp, i, j, k ;
size <<= 2 ;
if ( size > dbg_size )
size = dbg_size ;
fprintf( stderr, "%s: %s m %d n %d size %d dp %p perm_size %d\n",
__func__, s, m, n, size, dp, perm_size ) ;
if (( m * n * z ) > perm_size )
{
fprintf( stderr, "%s: err m %d n %d z %d > perm %d \n",
__func__, m, n, z, perm_size ) ;
return ;
}
dbg_get_d_data (( char *)dp, ( char *)dbg_bufp, size ) ;
size >>= 2 ;
size /= perm_size ;
fp = dbg_bufp ;
for ( ii = 0 ; ii < size ; ii++ )
{
printf("perm === %d\n", ii ) ;
otp = fp ;
for ( k = 0 ; k < z ; k++ )
{
printf("perm %d z %d \n", ii, k ) ;
for ( i = 0 ; i < n ; i++ )
{
printf("z %d y %d\n", k, i ) ;
tp = otp ;
for ( j = 0 ; j < doprint ; j++ )
printf("%d ", *tp++ ) ;
printf("\n") ;
otp += m ;
}
printf("\n") ;
}
fp += perm_size ;
}
}
void
dbg_p_d_data_f_mn_skip ( const char *s, float *dp, int size, int m, int n, int z, int doprint, int perm_size )
{
int ii, i, j, k ;
float *fp, *tp, *otp ;
size <<= 2 ;
if ( size > dbg_size )
size = dbg_size ;
fprintf( stderr, "%s: %s m %d n %d size %d dp %p perm_size %d\n",
__func__, s, m, n, size, dp, perm_size ) ;
if (( m * n * z ) > perm_size )
{
fprintf( stderr, "%s: err m %d n %d z %d > perm %d \n",
__func__, m, n, z, perm_size ) ;
return ;
}
dbg_get_d_data (( char *)dp, ( char *)dbg_bufp, size ) ;
size >>= 2 ;
size /= perm_size ;
fp = ( float *)dbg_bufp ;
for ( ii = 0 ; ii < size ; ii++ )
{
printf("perm === %d\n", ii ) ;
otp = fp ;
for ( k = 0 ; k < z ; k++ )
{
printf("perm %d z %d \n", ii, k ) ;
for ( i = 0 ; i < n ; i++ )
{
printf("z %d y %d\n", k, i ) ;
tp = otp ;
for ( j = 0 ; j < doprint ; j++ )
printf("%.4f ", *tp++ ) ;
printf("\n") ;
otp += m ;
}
printf("\n") ;
}
fp += perm_size ;
}
}
void
dbg_p_d_data_f_mn ( const char *s, float *dp, int size, int m, int n, int doprint )
{
float *otp, *tp ;
int i, j ;
fprintf( stderr, "%s: %s m %d n %d size %d dp %p dbgsize %d\n",
__func__, s, m, n, size, dp, dbg_size ) ;
size <<= 2 ;
if ( size > dbg_size )
size = dbg_size ;
fprintf( stderr, "%s: %s m %d n %d size %d dp %p dbgsize %d\n",
__func__, s, m, n, size, dp, dbg_size ) ;
dbg_get_d_data (( char *)dp, ( char *)dbg_bufp, size ) ;
size >>= 2 ;
size /= ( m * n ) ;
otp = ( float *)dbg_bufp ;
while ( size-- )
{
for ( i = 0 ; i < n ; i++ )
{
tp = otp ;
for ( j = 0 ; j < doprint ; j++ )
printf("%.2f ", *tp++ ) ;
printf("\n") ;
otp += m ;
}
printf("\n") ;
}
}
void
dbg_p_d_data_f_mn ( const char *s, float *dp, int size, int m, int n, int doprint, int printrow )
{
float *otp, *tp ;
int i, j, k ;
fprintf( stderr, "%s: %s m %d n %d size %d dp %p dbgsize %d printrow %d\n",
__func__, s, m, n, size, dp, dbg_size, printrow ) ;
if ( printrow > n )
printrow = n ;
size <<= 2 ;
if ( size > dbg_size )
size = dbg_size ;
fprintf( stderr, "%s: %s m %d n %d size %d dp %p dbgsize %d\n",
__func__, s, m, n, size, dp, dbg_size ) ;
dbg_get_d_data (( char *)dp, ( char *)dbg_bufp, size ) ;
size >>= 2 ;
size /= ( m * n ) ;
fprintf( stderr, "%s: size %d \n", __func__, size ) ;
otp = ( float *)dbg_bufp ;
for ( k = 0 ; k < size ; k++ )
{
printf("\n%s BLK ::: %d \n", __func__, k ) ;
for ( i = 0 ; i < printrow ; i++ )
{
tp = otp ;
for ( j = 0 ; j < doprint ; j++ )
printf("%.2f ", *tp++ ) ;
printf("\n") ;
otp += m ;
}
otp += ( n - printrow ) * m ;
printf("\n") ;
}
}
void
dbg_p_d_data_i_mn ( const char *s, int *dp, int size, int m, int n, int doprint )
{
int *otp, *tp, i, j ;
fprintf( stderr, "%s: %s m %d n %d size %d dp %p dbgsize %d\n",
__func__, s, m, n, size, dp, dbg_size ) ;
size <<= 2 ;
if ( size > dbg_size )
size = dbg_size ;
fprintf( stderr, "%s: %s m %d n %d size %d dp %p dbgsize %d\n",
__func__, s, m, n, size, dp, dbg_size ) ;
dbg_get_d_data (( char *)dp, ( char *)dbg_bufp, size ) ;
size >>= 2 ;
size /= ( m * n ) ;
otp = dbg_bufp ;
while ( size-- )
{
for ( i = 0 ; i < n ; i++ )
{
tp = otp ;
for ( j = 0 ; j < doprint ; j++ )
printf("%d ", *tp++ ) ;
printf("\n") ;
otp += m ;
}
printf("\n") ;
}
}
void
dbg_p_data_i_mn_v2 ( const char *s, int *hp, int size, int doprint,
struct cube *dp, int blk_in_x, int blk_in_y )
{
int ddoprint, tt, t, ii, k, xyz_size, idx, m, n, *btp, *otp, *tp, i, j ;
size <<= 2 ;
if ( size > dbg_size )
size = dbg_size ;
fprintf( stderr, "%s: %s size %d dp %p blk x/y %d %d\n",
__func__, s, size, hp, blk_in_x, blk_in_y ) ;
dbg_get_d_data (( char *)hp, ( char *)dbg_bufp, size ) ;
size >>= 2 ;
xyz_size = dp[0].x * dp[0].y * dp[0].z ;
size /= xyz_size ;
btp = dbg_bufp ;
printf("%s: size %d xyz %d \n", __func__, size, xyz_size ) ;
while ( 1 )
{
for ( j = 0 ; j < blk_in_y ; j++ )
{
for ( i = 0 ; i < blk_in_x ; i++ )
{
otp = btp ;
if (( i == 0 ) || ( i == ( blk_in_x - 1 )))
{
if (( j == 0 ) || ( j == ( blk_in_y - 1 )))
idx = 2 ;
else
idx = 1 ;
} else
{
if (( j == 0 ) || ( j == ( blk_in_y - 1 )))
idx = 1 ;
else
idx = 0 ;
}
m = dp[idx].x ;
n = dp[idx].y ;
t = dp[idx].z ;
printf("%s: i %d j %d m/n/t %d %d %d \n",
__func__, i, j, m, n, t ) ;
ddoprint = ( doprint > m ) ? m : doprint ;
for ( tt = 0 ; tt < t ; tt++ )
{
for ( ii = 0 ; ii < n ; ii++ )
{
tp = otp ;
for ( k = 0 ; k < ddoprint ; k++ )
printf("%d ", *tp++ ) ;
printf("\n") ;
otp += m ;
}
printf("\n") ;
}
printf("\n") ;
btp += xyz_size ;
if ( --size == 0 )
return ;
}
}
}
}
// this is exactly the same as dbg_p_d_data_i_mn_v2 ... other than the printf(). template?
void
dbg_p_d_data_f_mn_v2 ( const char *s, float *devp, int size, int doprint,
struct cube *dp, int blk_in_x, int blk_in_y )
{
int ddoprint, tt, t, ii, k, xyz_size, idx, m, n, i, j ;
float *btp, *otp, *tp ;
size <<= 2 ;
if ( size > dbg_size )
size = dbg_size ;
fprintf( stderr, "%s: %s size %d dp %p blk x/y %d %d\n",
__func__, s, size, devp, blk_in_x, blk_in_y ) ;
dbg_get_d_data (( char *)devp, ( char *)dbg_bufp, size ) ;
size >>= 2 ;
xyz_size = dp[0].x * dp[0].y * dp[0].z ;
size /= xyz_size ;
btp = ( float *)dbg_bufp ;
printf("%s: size %d xyz %d \n", __func__, size, xyz_size ) ;
while ( 1 )
{
for ( j = 0 ; j < blk_in_y ; j++ )
{
for ( i = 0 ; i < blk_in_x ; i++ )
{
otp = btp ;
if (( i == 0 ) || ( i == ( blk_in_x - 1 )))
{
if (( j == 0 ) || ( j == ( blk_in_y - 1 )))
idx = 2 ;
else
idx = 1 ;
} else
{
if (( j == 0 ) || ( j == ( blk_in_y - 1 )))
idx = 1 ;
else
idx = 0 ;
}
m = dp[idx].x ;
n = dp[idx].y ;
t = dp[idx].z ;
printf("%s: x %d y %d m/n/t %d %d %d otp %p\n",
__func__, i, j, m, n, t, otp ) ;
ddoprint = ( doprint > m ) ? m : doprint ;
for ( tt = 0 ; tt < t ; tt++ )
{
for ( ii = 0 ; ii < n ; ii++ )
{
tp = otp ;
for ( k = 0 ; k < ddoprint ; k++ )
printf("%.4f ", *tp++ ) ;
printf("\n") ;
otp += m ;
}
printf("\n") ;
}
printf("\n") ;
btp += xyz_size ;
if ( --size == 0 )
return ;
}
}
}
}
void
dbg_p_d_data_i_mn_v2 ( const char *s, int *devp, int size, int doprint,
struct cube *dp, int blk_in_x, int blk_in_y )
{
int ddoprint, tt, t, ii, k, xyz_size, idx, m, n, *btp, *otp, *tp, i, j ;
size <<= 2 ;
if ( size > dbg_size )
size = dbg_size ;
fprintf( stderr, "%s: %s size %d dp %p blk x/y %d %d\n",
__func__, s, size, devp, blk_in_x, blk_in_y ) ;
dbg_get_d_data (( char *)devp, ( char *)dbg_bufp, size ) ;
size >>= 2 ;
xyz_size = dp[0].x * dp[0].y * dp[0].z ;
size /= xyz_size ;
btp = dbg_bufp ;
printf("%s: size %d xyz %d \n", __func__, size, xyz_size ) ;
while ( 1 )
{
for ( j = 0 ; j < blk_in_y ; j++ )
{
for ( i = 0 ; i < blk_in_x ; i++ )
{
otp = btp ;
if (( i == 0 ) || ( i == ( blk_in_x - 1 )))
{
if (( j == 0 ) || ( j == ( blk_in_y - 1 )))
idx = 2 ;
else
idx = 1 ;
} else
{
if (( j == 0 ) || ( j == ( blk_in_y - 1 )))
idx = 1 ;
else
idx = 0 ;
}
m = dp[idx].x ;
n = dp[idx].y ;
t = dp[idx].z ;
printf("%s: i %d j %d m/n/t %d %d %d otp %p\n",
__func__, i, j, m, n, t, otp ) ;
ddoprint = ( doprint > m ) ? m : doprint ;
for ( tt = 0 ; tt < t ; tt++ )
{
for ( ii = 0 ; ii < n ; ii++ )
{
tp = otp ;
for ( k = 0 ; k < ddoprint ; k++ )
printf("%d ", *tp++ ) ;
printf("\n") ;
otp += m ;
}
printf("\n") ;
}
printf("\n") ;
btp += xyz_size ;
if ( --size == 0 )
return ;
}
}
}
}
/* print double content from device ... size is in char */
void
dbg_p_d_data_d ( const char *s, float *dp, int size )
{
if ( size > dbg_size )
size = dbg_size ;
fprintf( stderr, "%s: %s size %d dp %p\n",
__func__, s, size, dp ) ;
dbg_get_d_data (( char *)dp, ( char *)dbg_bufp, size ) ;
size >>= 2 ;
dbg_pdata_d ( s, ( float *)dbg_bufp, size ) ;
}
void
dbg_p_d_data_i ( const char *s, int *dp, int size )
{
size <<= 2 ;
if ( size > dbg_size )
size = dbg_size ;
fprintf( stderr, "%s: %s size %d dp %p\n",
__func__, s, size, dp ) ;
dbg_get_d_data (( char *)dp, ( char *)dbg_bufp, size ) ;
size >>= 2 ;
dbg_pdata_i ( s, ( int *)dbg_bufp, size ) ;
}
void
dbg_p_d_data_f ( const char *s, float *dp, int size )
{
size <<= 2 ; // work with float and int
if ( size > dbg_size )
size = dbg_size ;
fprintf( stderr, "%s: %s size %d dp %p\n",
__func__, s, size, dp ) ;
dbg_get_d_data (( char *)dp, ( char *)dbg_bufp, size ) ;
size >>= 2 ;
dbg_pdata_f ( s, ( float *)dbg_bufp, size ) ;
}
void
dbg_p_d_data_c ( const char *s, char *dp, int size )
{
if ( size > dbg_size )
size = dbg_size ;
fprintf( stderr, "%s: %s size %d dp %p\n",
__func__, s, size, dp ) ;
dbg_get_d_data ( dp, ( char *)dbg_bufp, size ) ;
dbg_pdata_c ( s, ( char *)dbg_bufp, size ) ;
}
int
dbg_copy_d_data ( char *dtp, char *dfp, int size )
{
int i ;
if (( i = cudaMemcpy( dtp, dfp, size, cudaMemcpyDeviceToDevice)) !=
cudaSuccess )
{
fprintf( stderr, "dbg_copy_d_data: failed %d\n", i ) ;
return ( 0 ) ;
}
return ( 1 ) ;
}
int
dbg_put_d_data ( char *dp, char *hp, int size )
{
int i ;
if (( i = cudaMemcpy( dp, hp, size, cudaMemcpyHostToDevice)) !=
cudaSuccess )
{
fprintf( stderr, "dbg_put_d_data: failed %d\n", i ) ;
return ( 0 ) ;
}
return ( 1 ) ;
}
int
dbg_get_d_data ( char *dp, char *hp, int size )
{
int i ;
if (( i = cudaMemcpy( hp, dp, size, cudaMemcpyDeviceToHost)) !=
cudaSuccess )
{
fprintf(stderr, "dbg_get_d_data: hp %p dp %p size %d failed %d\n", hp, dp, size, i ) ;
return ( 0 ) ;
}
return ( 1 ) ;
}
void
dbg_pdata_ll( const char *s, long long *dp, int size )
{
int i ;
fprintf( stderr, "dbg_pdata_ll: %s\n", s ) ;
for ( i = 0 ; i < size ; )
{
fprintf( stderr, "%d -- %p -- %lld 0x%llx\n", i, dp, *dp, *dp ) ;
i++ ;
dp++ ;
}
}
void
dbg_pdata_d( const char *s, const float *dp, int size )
{
int i ;
fprintf( stderr, "%s: %s\n", __func__, s ) ;
for ( i = 0 ; i < size ; )
{
fprintf( stderr, "%d -- %p == %f\n", i, dp, *dp ) ;
i++ ;
dp++ ;
}
}
void
dbg_pdata_i( const char *s, const int *dp, int size )
{
int i ;
fprintf( stderr, "%s: %s\n", __func__, s ) ;
for ( i = 0 ; i < size ; )
{
fprintf( stderr, "%d -- %8.8x %d\n", i, *dp, *dp ) ;
i++ ;
dp++ ;
}
}
void
dbg_pdata_f( const char *s, const float *dp, int size )
{
int i ;
fprintf( stderr, "%s: %s dp %p size %d\n", __func__, s, dp, size ) ;
for ( i = 0 ; i < size ; )
{
fprintf( stderr, "%d -- %f\n", i, *dp ) ;
i++ ;
dp++ ;
}
}
void
dbg_pdata_c( const char *s, const char *dp, int size )
{
int i ;
unsigned char *cp = ( unsigned char *)dp ;
fprintf( stderr, "%s: %s\n", __func__, s ) ;
for ( i = 0 ; i < size ; )
{
fprintf( stderr, "%d -- %2.2x %d\n", i, *cp, *cp) ;
i++ ;
cp++ ;
}
}
void
dbg_mdata( int *dp, int size )
{
int cnt, k, i ;
cnt = 0 ;
k = 0 ;
while ( size-- )
{
if ( k != EOF )
k = scanf("%d", &i ) ;
if ( k == EOF )
*dp++ = 0 ;
else
{
cnt++ ;
*dp++ = i ;
}
}
// printf("makedata: data cnt %d\n", cnt ) ;
}
float *
dbg_d_malloc_f ( int size )
{
float *cp ;
int i ;
if (( i = cudaMalloc( &cp, size * sizeof( float ))) != cudaSuccess )
{
printf("%s: 2 cudaMalloc failed %d\n", __func__, i ) ;
return ( NULL ) ;
}
return ( cp ) ;
}
int *
dbg_d_malloc_i ( int size )
{
int *cp ;
int i ;
if (( i = cudaMalloc( &cp, size * sizeof( int ))) != cudaSuccess )
{
printf("%s: 2 cudaMalloc failed %d\n", __func__, i ) ;
return ( NULL ) ;
}
return ( cp ) ;
}
char *
dbg_d_malloc_c ( int size )
{
char *cp ;
int i ;
if (( i = cudaMalloc( &cp, size )) != cudaSuccess )
{
printf("%s: 2 cudaMalloc failed %d\n", __func__, i ) ;
return ( NULL ) ;
}
return ( cp ) ;
}
void
dbg_p_data_i_cube ( const char *s, int *dp, int vx, int hy, int tz )
{
int i, j, k, l ;
fprintf( stderr, "%s: %s x/y/t %d %d %d\n", __func__, s, vx, hy, tz ) ;
for ( i = 0 ; i < tz ; i++ )
{
printf("T = %d \n", i ) ;
for ( j = 0 ; j < vx ; j++ )
{
printf("T %d, X %d \n", i, j ) ;
for ( k = 0 ; k < hy ; k++ )
{
for ( l = 0 ; (( l < 8 ) && ( k < hy )) ; l++ )
{
printf("%8.8x ", *dp++ ) ;
k++ ;
}
if ( k < hy )
k-- ;
printf("\n") ;
// fprintf( stderr, "%d -- %p == %f\n", i, dp, *dp ) ;
}
printf("\n") ;
}
printf("\n") ;
}
}
// size in sizeof( int )
void
dbg_p_d_data_i_cube ( const char *s, int *devp, int vx, int hy, int tz )
{
int size = vx * hy * tz ;
size *= sizeof( int ) ;
dbg_get_d_data (( char *)devp, ( char *)dbg_bufp, size ) ;
dbg_p_data_i_cube ( s, dbg_bufp, vx, hy, tz ) ;
}
void
dbg_p_data_i_cube ( const char *s, float *dp, int vx, int hy, int tz )
{
int i, j, k, l ;
fprintf( stderr, "%s float: %s x/y/t %d %d %d\n", __func__, s, vx, hy, tz ) ;
for ( i = 0 ; i < tz ; i++ )
{
printf("T = %d \n", i ) ;
for ( j = 0 ; j < vx ; j++ )
{
printf("T %d, X %d \n", i, j ) ;
for ( k = 0 ; k < hy ; k++ )
{
for ( l = 0 ; (( l < 8 ) && ( k < hy )) ; l++ )
{
printf("%8.4f ", *dp++ ) ;
k++ ;
}
if ( k < hy )
k-- ;
printf("\n") ;
// fprintf( stderr, "%d -- %p == %f\n", i, dp, *dp ) ;
}
printf("\n") ;
}
printf("\n") ;
}
}
// size in sizeof( float )
void
dbg_p_d_data_i_cube ( const char *s, float *devp, int vx, int hy, int tz )
{
int size = vx * hy * tz ;
size *= sizeof( float ) ;
dbg_get_d_data (( char *)devp, ( char *)dbg_bufp, size ) ;
dbg_p_data_i_cube ( s, ( float *)dbg_bufp, vx, hy, tz ) ;
}
// test of this func is done in test/cs_random_test
int
dbg_ck_unique ( char *s, int *dp, int size )
{
int err, k, i, *oip, *ip, *dip ;
i = ( size << 2 ) ;
i <<= 1 ; // split the dbg_size
if ( i > dbg_size )
{
fprintf( stderr, "%s: err size %d > dbg_size %d \n", s, i, dbg_size ) ;
return ( 0 ) ;
}
oip = ip = dbg_bufp ;
dip = ip + size ;
dbg_get_d_data (( char * )dp, ( char * ) dip, size * sizeof ( int )) ;
i = size ;
while ( i-- )
*ip++ = 0 ;
err = 0 ;
ip = oip ;
i = size ;
while ( i-- )
ip[ *dip++ ]++ ;
k = 0 ;
for ( i = 0 ; i < size ; i++ )
{
if ( *ip != 1 )
{
err++ ;
fprintf( stderr, "%s : %s : idx %d val %d \n", __func__, s, i, *ip ) ;
}
k += *ip++ ;
}
if ( k != size )
fprintf( stderr, "%s : %s k %d size %d \n", __func__, s, k, size ) ;
if ( err )
fprintf( stderr, "%s : %s err %d\n", __func__, s, err ) ;
else
fprintf( stderr, "%s : %s good\n", __func__, s ) ;
return ( !err ) ;
}
void
dbg_pr_h_first_last ( char *s, char *h_p, int len, int pr_size )
{
int i ;
i = len >> 1 ;
if ( pr_size > i )
pr_size = i ;
sprintf( dbg_msg, "%s : FIRST %d OF %d", s, pr_size, len ) ;
dbg_pdata_c ( dbg_msg, h_p, pr_size ) ;
sprintf( dbg_msg, "%s : CENTER %d AFTER %d", s, pr_size, i ) ;
dbg_pdata_c ( dbg_msg, h_p + i, pr_size ) ;
sprintf( dbg_msg, "%s : LAST %d AFTER %d", s, pr_size, len - pr_size ) ;
dbg_pdata_c ( dbg_msg, h_p + ( len - pr_size ), pr_size ) ;
}
void
dbg_pr_h_first_last ( char *s, int *h_p, int len, int pr_size )
{
int i ;
i = len >> 1 ;
if ( pr_size > i )
pr_size = i ;
sprintf( dbg_msg, "%s : FIRST %d OF %d", s, pr_size, len ) ;
dbg_pdata_i ( dbg_msg, h_p, pr_size ) ;
sprintf( dbg_msg, "%s : CENTER %d AFTER %d", s, pr_size, i ) ;
dbg_pdata_i ( dbg_msg, h_p + i, pr_size ) ;
sprintf( dbg_msg, "%s : LAST %d AFTER %d", s, pr_size, len - pr_size ) ;
dbg_pdata_i ( dbg_msg, h_p + ( len - pr_size ), pr_size ) ;
}
void
dbg_pr_first_last ( char *s, int *d_p, int len, int pr_size )
{
int i ;
i = len >> 1 ;
if ( pr_size > i )
pr_size = i ;
sprintf( dbg_msg, "%s : FIRST %d OF %d", s, pr_size, len ) ;
dbg_p_d_data_i ( dbg_msg, d_p, pr_size ) ;
sprintf( dbg_msg, "%s : CENTER %d AFTER %d", s, pr_size, i ) ;
dbg_p_d_data_i ( dbg_msg, d_p + i, pr_size ) ;
sprintf( dbg_msg, "%s : LAST %d AFTER %d", s, pr_size, len - pr_size ) ;
dbg_p_d_data_i ( dbg_msg, d_p + ( len - pr_size ), pr_size ) ;
}
void
dbg_pr_first_last ( char *s, float *d_p, int len, int pr_size )
{
int i ;
i = len >> 1 ;
if ( pr_size > i )
pr_size = i ;
sprintf( dbg_msg, "%s : FIRST %d OF %d", s, pr_size, len ) ;
dbg_p_d_data_f ( dbg_msg, d_p, pr_size ) ;
sprintf( dbg_msg, "%s : CENTER %d AFTER %d", s, pr_size, i ) ;
dbg_p_d_data_f ( dbg_msg, d_p + i, pr_size ) ;
sprintf( dbg_msg, "%s : LAST %d AFTER %d", s, pr_size, len - pr_size ) ;
dbg_p_d_data_f ( dbg_msg, d_p + ( len - pr_size ), pr_size ) ;
}
// size in number of int
int
dbg_perm_ck ( int callid, int callid2, int zero_in_zero, int *d_bp, int size )
{
int bsize, *oip, i, j, *fip ;
fprintf( stderr, "%s: call %d id %d zero %d bp %p size %d\n",
__func__, callid, callid2, zero_in_zero, d_bp, size ) ;
bsize = size << 2 ;
if ( bsize > dbg_size )
{
fprintf( stderr, "dbg_perm_ck: err size %d too big dbg_size %d \n", size, dbg_size ) ;
return ( 0 ) ;
}
dbg_get_d_data (( char *)d_bp, ( char *)dbg_bufp, bsize ) ;
oip = dbg_bufp + size ;
memset ( oip, 0, bsize ) ;
fip = dbg_bufp ;
if ( zero_in_zero )
{
if ( *fip )
{
fprintf( stderr, "%s: err zero : %d \n",
__func__, *fip ) ;
dbg_pdata_i ( "dbg_perm_ck", dbg_bufp, size ) ;
return ( 0 ) ;
}
fip++ ;
oip[0] = 1 ;
i = 1 ;
} else
i = 0 ;
for ( ; i < size ; i++ )
{
j = *fip++ ;
if (( j < 0 ) || ( j > size ))
{
fprintf( stderr, "%s: err too big : idx %d entry %d double \n",
__func__, i, j ) ;
dbg_pdata_i ( "dbg_perm_ck", dbg_bufp, size ) ;
return ( 0 ) ;
}
if ( oip[j] )
{
fprintf( stderr, "%s: err idx %d entry %d double \n",
__func__, i, j ) ;
dbg_pdata_i ( "dbg_perm_ck", dbg_bufp, size ) ;
return ( 0 ) ;
}
oip[j]++ ;
}
for ( i = 0 ; i < size ; i++ )
{
if ( !oip[i] )
{
fprintf( stderr, "%s: err missing idx %d\n",
__func__, i ) ;
dbg_pdata_i ( "dbg_perm_ck", dbg_bufp, size ) ;
return ( 0 ) ;
}
}
return ( 1 ) ;
}
// record_size does not include the NUM_OF_HVT_INDEX
void
cs_p_d_tvh ( const char *s, int *dmem, int record_size, int num_rec, int do_print, int do_skip )
{
int do_print2, i, j, *ip, *oip ;
float *fp ;
printf("%s ::: dmem %p record_size %d num_rec %d doprint %d do_skip %d\n",
s, dmem, record_size, num_rec, do_print, do_skip ) ;
dbg_get_d_data (( char *)dmem, ( char *)dbg_bufp,
sizeof( int ) * ( record_size + 3 ) * num_rec ) ;
if ( do_print > record_size )
do_print = record_size ;
if ( do_skip > record_size )
do_skip = 0 ;
if ( do_skip )
{
do_print2 = (( record_size - do_skip ) > do_print ) ? do_print : ( record_size - do_skip ) ;
}
oip = dbg_bufp ;
for ( i = 0 ; i < num_rec ; i++ )
{
ip = oip ;
printf("IDX record %d t %d v %d h %d ", i, ip[0], ip[1], ip[2] ) ;
fp = ( float *)oip + 3 ; // is NUM_OF_HVT_INDEX
printf(" --- %f \n", *fp++ ) ;
for ( j = 1 ; j < do_print ; j++ )
{
printf("%.2f ", *fp++ ) ;
if (!(( j + 1 ) % 8 ))
printf("\n") ;
}
printf("\n") ;
if ( do_skip )
{
printf("after skip --- %d print %d\n ", do_skip, do_print2 ) ;
fp = ( float *)oip + 3 + do_skip ; // is NUM_OF_HVT_INDEX
for ( j = 0 ; j < do_print2 ; j++ )
{
printf("%.2f ", *fp++ ) ;
if (!(( j + 1 ) % 8 ))
printf("\n ") ;
}
printf("\n") ;
}
oip += record_size + 3 ; // is NUM_OF_HVT_INDEX
}
}
// start is to skip a number of rows
void
cs_p_d_tvh ( const char *s, int *dmem, int record_size, int num_rec, int do_print, int do_skip,
int start )
{
printf("%s ::: dmem %p record_size %d num_rec %d doprint %d start %d skip %d\n",
s, dmem, record_size, num_rec, do_print, start, do_skip ) ;
dmem += ( start * ( record_size + 3 )) ;
cs_p_d_tvh ( s, dmem, record_size, num_rec, do_print, do_skip ) ;
}
|
12,183 | #include "includes.h"
__global__ void KerComputeAceMod(unsigned n,const float3 *ace,float *acemod)
{
unsigned p=blockIdx.x*blockDim.x + threadIdx.x; //-Number of particle.
if(p<n){
const float3 r=ace[p];
acemod[p]=r.x*r.x+r.y*r.y+r.z*r.z;
}
} |
12,184 | #include "includes.h"
__global__ void Match8blocked2(float *d_pts1, float *d_pts2, float *d_score, int *d_index)
{
#define NRX 2
#define NUM (NRX*M7R) // 32*8 threads
__shared__ float4 buffer1[M7W*NDIM/4]; // 32*32
__shared__ float4 buffer2[M7H*NUM]; // 32*8
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M7W*blockIdx.x;
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7W;j+=M7H/M7R)
buffer1[j*NDIM/4 + (d + j)%(NDIM/4)] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
float max_score[NRX];
int index[NRX];
for (int i=0;i<NRX;i++) {
max_score[i] = 0.0f;
index[i] = -1;
}
int idx = ty*M7W + tx;
int ix = idx%(M7W/NRX);
int iy = idx/(M7W/NRX);
for (int bp2=0;bp2<NPTS;bp2+=M7H) {
float score[M7R][NRX];
for (int dy=0;dy<M7R;dy++)
for (int i=0;i<NRX;i++)
score[dy][i] = 0.0f;
for (int dp=0;dp<NDIM/4;dp+=NUM) {
int d = (idx%NUM);
int j = (idx/NUM);
buffer2[j*NUM + d] = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + dp + d];
__syncthreads();
if (idx<M7W*M7H/M7R/NRX) {
for (int d=0;d<NUM;d++) {
float4 v1[NRX];
for (int i=0;i<NRX;i++)
v1[i] = buffer1[((M7W/NRX)*i + ix)*NDIM/4 + (dp + d + (M7W/NRX)*i + ix)%(NDIM/4)];
for (int dy=0;dy<M7R;dy++) {
float4 v2 = buffer2[(M7R*iy + dy)*NUM + d];
for (int i=0;i<NRX;i++) {
score[dy][i] += v1[i].x*v2.x;
score[dy][i] += v1[i].y*v2.y;
score[dy][i] += v1[i].z*v2.z;
score[dy][i] += v1[i].w*v2.w;
}
}
}
}
__syncthreads();
}
for (int dy=0;dy<M7R;dy++) {
for (int i=0;i<NRX;i++) {
if (score[dy][i]>max_score[i]) {
max_score[i] = score[dy][i];
index[i] = bp2 + M7R*iy + dy;
}
}
}
__syncthreads();
}
float *scores = (float*)buffer1;
int *indices = (int*)&scores[M7W*M7H/M7R];
if (idx<M7W*M7H/M7R/NRX) {
for (int i=0;i<NRX;i++) {
scores[iy*M7W + (M7W/NRX)*i + ix] = max_score[i];
indices[iy*M7W + (M7W/NRX)*i + ix] = index[i];
}
}
__syncthreads();
if (ty==0) {
float max_score = scores[tx];
int index = indices[tx];
for (int y=0;y<M7H/M7R;y++)
if (scores[y*M7W + tx]>max_score) {
max_score = scores[y*M7W + tx];
index = indices[y*M7W + tx];
}
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
} |
12,185 | #include <stdio.h> // For use of the printf function
#include <sys/time.h> // For use of gettimeofday function
#define A 77.0f // Constant A to be used in SAXPY computations
#define ARRAY_SIZE 10000 // Size of arrays to be used in SAXPY computations
#define TPB 256 // Threads PER block
// SAXPY means "Single-Precision A*X PLUS Y" where A is a constant and X, Y are
// arrays
/**
* Implementation of SAXPY to be performed on device
*
* @param x Array X of SAXPY
* @param y Array Y of SAXPY
* @param a Constant A of SAXPY
* @param n Number of elements in each array X and Y
*/
__global__ void saxpyKernel(float *x, float *y, float a, int n) {
// Unique ID of the current thread to determine what work to compute
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
// This thread has no work to do, exit
if (threadId > n) return;
// Compute SAXPY on one element of x and y based on the id of this thread
float saxpyResult = (a * x[threadId]) + y[threadId];
// Save the result of single element SAXPY
y[threadId] = saxpyResult;
}
/**
* Implementation of SAXPY to be performed on host
*
* @param x Array X of SAXPY
* @param y Array Y of SAXPY
* @param a Constant A of SAXPY
* @param n Number of elements in each array X and Y
*/
__host__ void saxpyHost(float *x, float *y, float a, int n) {
// Simple for-loop solution for the host version
for (int index = 0; index < n; index++) {
y[index] = (a * x[index]) + y[index];
}
}
/**
* Compare the SAXPY results of the device and host implementations.
*
* @param deviceOut Outcome of SAXPY from the device implementation
* @param hostOut Outcome of SAXPY from the host implementation
* @param n The size of deviceOut and hostOut
*/
void compareSaxpyResults(float *deviceOut, float *hostOut, int n) {
bool resultsAreEqual = true;
printf("Comparing the output for each implementation... ");
for (int index = 0; index < n; index++) {
float diff = deviceOut[index] - hostOut[index];
// Difference is larger than rounding-error tolerance of .1, means
// the outcomes are too different
if (diff > .1 || diff < -.1) {
resultsAreEqual = false;
break;
}
}
// The outcomes of SAXPY for the device and host implementations are equal
if (resultsAreEqual) {
printf("Correct!\n");
} else {
printf("INCORRECT!!!\n");
}
}
/**
* Fill the given array with n random floats.
*
* @param array Array to populate with floats.
* @param n Number of floats to populate the array with.
*/
void populateArrayWithFloats(float *array, int n) {
for (int index = 0; index < n; index++) {
// Generate random floats in the range -10,000 to 10,000
array[index] = 10000.0 * ((float) rand() / (float) RAND_MAX);
}
}
/**
* Return a timestamp with double precision.
*/
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
// Entry point into the program, run each implementation of SAXPY and compare
// the results
int main() {
// Allocate memory on the host
float *hostX = (float*) malloc(ARRAY_SIZE * sizeof(float));
float *hostY = (float*) malloc(ARRAY_SIZE * sizeof(float));
// Allocate memory on the device
float *devX, *devY;
cudaMalloc(&devX, ARRAY_SIZE * sizeof(float));
cudaMalloc(&devY, ARRAY_SIZE * sizeof(float));
// Fill hostX and hostY arrays with random floats
populateArrayWithFloats(hostX, ARRAY_SIZE);
populateArrayWithFloats(hostY, ARRAY_SIZE);
// Copy hostX and hostY onto the GPU
cudaMemcpy(devX, hostX, ARRAY_SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(devY, hostY, ARRAY_SIZE * sizeof(float), cudaMemcpyHostToDevice);
printf("Computing SAXPY on the CPU... ");
double startTime = cpuSecond();
saxpyHost(hostX, hostY, A, ARRAY_SIZE);
printf("%f seconds\n", cpuSecond() - startTime);
printf("Computing SAXPY on the GPU... ");
startTime = cpuSecond();
// Round-up to the nearest multiple of TPB that can hold at least ARRAY_SIZE
// threads
saxpyKernel <<<(ARRAY_SIZE + TPB - 1) / TPB, TPB>>> (
devX, devY, A, ARRAY_SIZE);
// Wait until all the threads on the GPU have finished before continuing!!!
cudaDeviceSynchronize();
printf("%f seconds\n", cpuSecond() - startTime);
// Copy the result of SAXPY on the device back to the host into hostX
cudaMemcpy(hostX, devY, ARRAY_SIZE * sizeof(float), cudaMemcpyDeviceToHost);
// Compare the results of SAXPY on device and host
compareSaxpyResults(hostX, hostY, ARRAY_SIZE);
// Free the allocated memory!!!
free(hostX);
free(hostY);
cudaFree(devX);
cudaFree(devY);
return 0;
}
|
12,186 | //pass
//--gridDim=64 --blockDim=256
template<class TData> __global__ void testKernel(TData *d_odata, TData *d_idata, int numElements);
template __global__ void testKernel<int>(int *d_odata, int *d_idata, int numElements);
template<class TData> __global__ void testKernel(
TData *d_odata,
TData *d_idata,
int numElements
)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
for (int pos = tid; pos < numElements; pos += numThreads)
{
d_odata[pos] = d_idata[pos];
}
}
|
12,187 | #include <cstdio>
#include <cstdlib>
#include <fstream>
#include <iterator>
#include <iostream>
using namespace std;
const int INF = 1000000000;
const int V = 20000;
int n;
unsigned int m;
static int Dist[V * V];
int* result;
__global__ void phase_one(int r, int n, int B, int* Dist)
{
extern __shared__ int shared_Dist[];
int x = threadIdx.x;
int y = threadIdx.y;
int pivot_i = r*B + y;
int pivot_j = r*B + x;
// copy to shared memory
shared_Dist[y*B + x] = (pivot_i<n && pivot_j<n) ? Dist[pivot_i*n + pivot_j] : INF;
__syncthreads();
//floyd-algo
#pragma unroll
for(int k=0; k<B; ++k){
if(shared_Dist[y * B + x] > shared_Dist[y * B + k] + shared_Dist[k * B + x]){
shared_Dist[y * B + x] = shared_Dist[y * B + k] + shared_Dist[k * B + x];
}
__syncthreads();
}
// update global memory
if(pivot_i<n && pivot_j<n){
Dist[pivot_i * n + pivot_j] = shared_Dist[y * B + x];
}
}
__global__ void phase_two(int r, int n, int B, int* Dist)
{
// pivot
if(blockIdx.x == r) return;
extern __shared__ int shared_mem[];
int* shared_pivot = shared_mem;
int* shared_Dist = shared_mem + B * B;
int x = threadIdx.x;
int y = threadIdx.y;
int pivot_i = r*B + y;
int pivot_j = r*B + x;
// pivot copy to shared memory
shared_pivot[y*B + x] = (pivot_i < n && pivot_j < n)? Dist[pivot_i*n + pivot_j] : INF;
__syncthreads();
int block_i, block_j;
// same row
if(blockIdx.y == 0){
block_i = pivot_i;
block_j = blockIdx.x*B + x;
}else{
//same col
block_i = blockIdx.x*B + y;
block_j = pivot_j;
}
if(block_i >= n || block_j >= n) return;
shared_Dist[y*B + x] = (block_i<n && block_j<n)? Dist[block_i*n + block_j] : INF;
__syncthreads();
// same row
if(blockIdx.y == 0){
#pragma unroll
for(int k=0; k<B; ++k){
if(shared_Dist[y*B + x] > shared_pivot[y*B + k] + shared_Dist[k*B + x]){
shared_Dist[y*B + x] = shared_pivot[y*B + k] + shared_Dist[k*B + x];
}
__syncthreads();
}
}else{
// same col
#pragma unroll
for(int k=0; k<B; ++k){
if(shared_Dist[y*B + x] > shared_Dist[y*B + k] + shared_pivot[k*B + x]){
shared_Dist[y*B + x] = shared_Dist[y*B + k] + shared_pivot[k*B + x];
}
__syncthreads();
}
}
// update global
if(block_i<n && block_j<n){
Dist[block_i*n + block_j] = shared_Dist[y*B + x];
}
}
__global__ void phase_three(int r, int n, int B, int* Dist)
{
// pivot or same row or col
if(blockIdx.x == r || blockIdx.y == r) return;
extern __shared__ int shared_mem[];
int* shared_row = shared_mem;
int* shared_col = shared_mem + B*B;
int x = threadIdx.x;
int y = threadIdx.y;
int block_i = blockIdx.y*B + y;
int block_j = blockIdx.x*B + x;
int row_i = r*B + y;
int row_j = block_j;
int col_i = block_i;
int col_j = r*B + x;
// copy same row,col with pivot to shared memory
shared_row[y*B + x] = (row_i<n && row_j<n)? Dist[row_i*n + row_j] : INF;
shared_col[y*B + x] = (col_i<n && col_j<n)? Dist[col_i*n + col_j] : INF;
__syncthreads();
if(block_i >= n || block_j >= n) return;
// floyd-algo
int target = Dist[block_i*n + block_j];
#pragma unroll
for(int k=0; k<B; ++k){
if(target > shared_col[y*B + k] + shared_row[k*B + x])
target = shared_col[y*B + k] + shared_row[k*B + x];
}
// update global
Dist[block_i*n + block_j] = target;
}
void input(char *inFileName)
{
FILE* file = fopen(inFileName, "rb");
fread(&n, sizeof(int), 1, file);
fread(&m, sizeof(int), 1, file);
for (int i = 0; i < n; ++ i) {
for (int j = 0; j < n; ++ j) {
if (i == j) {
Dist[i*n + j] = 0;
} else {
Dist[i*n + j] = INF;
}
}
}
int pair[3];
for (int i = 0; i < m; ++ i) {
fread(pair, sizeof(int), 3, file);
Dist[pair[0]*n + pair[1]] = pair[2];
}
fclose(file);
}
void output(char *outFileName)
{
ofstream ofile;
ofile.open(outFileName, ios::binary | ios::out);
for(int i=0; i<n ;i++){
for(int j=0 ; j<n ; j++){
ofile.write((char*) &result[i*n+j], sizeof(int));
}
}
ofile.close();
}
int ceil(int a, int b)
{
return (a + b -1)/b;
}
void block_FW(int B)
{
int num_blocks = ceil(n, B);
int* device_Dist;
int shared_mem_size = sizeof(int)*n*n;
// setup host to device
cudaSetDevice(0);
cudaMalloc(&device_Dist, shared_mem_size);
cudaMemcpy(device_Dist, Dist, shared_mem_size, cudaMemcpyHostToDevice);
result = (int*) malloc(shared_mem_size);
dim3 block(B, B);
dim3 grid_phase_one(1, 1), grid_phase_two(num_blocks, 2), grid_phase_three(num_blocks, num_blocks);
// floyd-algo
for(int i = 0; i < num_blocks; i++){
phase_one<<< grid_phase_one, block, sizeof(int) * B*B >>>(i, n, B, device_Dist);
phase_two<<< grid_phase_two, block, sizeof(int) * 2*B*B >>>(i, n, B, device_Dist);
phase_three<<< grid_phase_three, block, sizeof(int) * 2*B*B >>>(i, n, B, device_Dist);
}
//device to host
cudaMemcpy(result, device_Dist, shared_mem_size, cudaMemcpyDeviceToHost);
cudaFree(device_Dist);
}
int main(int argc, char* argv[])
{
input(argv[1]);
// set block factor for experiment
int B = argc>3? atoi(argv[3]) : n>32? 32 : n;
block_FW(B);
output(argv[2]);
return 0;
}
|
12,188 | #include <stdio.h>
__global__ void helloKernel()
{
printf("Hello World! My threadId is %d\n", threadIdx.x);
}
int main()
{
helloKernel<<<1, 256>>>();
cudaDeviceSynchronize();
return 0;
} |
12,189 | #include <stdio.h>
#define W 32
#define H 32
#define D 32
#define TX 8
#define TY 8
#define TZ 8
int divUp(int a, int b){ return (a + b - 1) / b; }
__device__
float distance(int c, int r, int s, float3 pos){
return sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y) +
(s - pos.z)*(s - pos.z));
}
__global__
void distanceKernel(float *d_out, int w,int h, int d,float3 pos){
const int c = blockIdx.x*blockDim.x + threadIdx.x;// column
const int r = blockIdx.y*blockDim.y + threadIdx.y;// row
const int s = blockIdx.z*blockDim.z + threadIdx.z;// stack
const int i = c + r*w + s*w*h;
if ((c >= w)||(r >= h) || (s >= d)) return;
d_out[i] = distance(c, r, s, pos);
printf("d_out[%d]:%f\n",i,d_out[i]);
}
int main(){
float *out = (float *)calloc(W*H*D, sizeof(float));
float *d_out=0;
cudaMalloc(&d_out, W*H*D*sizeof(float));
const float3 pos = {0.0f, 0.0f, 0.0f};
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(W, TX), divUp(H, TY), divUp(D, TZ));
distanceKernel<<<gridSize, blockSize>>>(d_out, W, H, D, pos);
cudaMemcpy(out, d_out, W*H*D*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_out);
free(out);
return 0;
}
|
12,190 | #include "Vector.cuh"
#include "Normal.cuh"
__host__ __device__ Vector::Vector() :
x(0.0), y(0.0), z(0.0) { }
__host__ __device__ Vector::Vector(float v) :
x(v), y(v), z(v) {}
__host__ __device__ Vector::Vector(const Vector &w) :
x(w.x), y(w.y), z(w.z) { }
__host__ __device__ Vector::Vector(float xx, float yy, float zz) :
x(xx), y(yy), z(zz) { }
__host__ __device__ float Vector::dot(const Vector &w) const {
return this->x * w.x + this->y * w.y + this->z * w.z;
}
__host__ __device__ float Vector::dot(const Normal &n) const {
return this->x * n.x + this->y * n.y + this->z * n.z;
}
__host__ __device__ Vector Vector::cross(const Normal &n) const {
return Vector(this->y * n.z - this->z * n.y, this->z * n.x - this->x * n.z, this->x * n.y - this->y * n.x);
}
__host__ __device__ Vector Vector::cross(const Vector &w) const {
return Vector(this->y * w.z - this->z * w.y, this->z * w.x - this->x * w.z, this->x * w.y - this->y * w.x);
}
__host__ __device__ Vector Vector::operator+(float s) const {
return Vector(this->x + s, this->y + s, this->z + s);
}
__host__ __device__ Vector Vector::operator+(const Vector &w) const {
return Vector(this->x + w.x, this->y + w.y, this->z + w.z);
}
__host__ __device__ Vector& Vector::operator+=(float s) {
x += s; y += s; z += s;
return *this;
}
__host__ __device__ Vector& Vector::operator+=(const Vector &v) {
x += v.x; y += v.y; z += v.z;
return *this;
}
__host__ __device__ Vector Vector::operator-(float s) const {
return Vector(this->x - s, this->y - s, this->z - s);
}
__host__ __device__ Vector Vector::operator-(const Vector &w) const {
return Vector(this->x - w.x, this->y - w.y, this->z - w.z);
}
__host__ __device__ Vector& Vector::operator-=(float s) {
x -= s; y -= s; z -= s;
return *this;
}
__host__ __device__ Vector Vector::operator*(float s) const {
return Vector(s * this->x, s * this->y, s * this->z);
}
__host__ __device__ Vector Vector::operator*(const Vector &w) const {
return Vector(this->x * w.x, this->y * w.y, this->z * w.z);
}
__host__ __device__ Vector& Vector::operator*=(float s) {
x *= s; y *= s; z *= s;
return *this;
}
__host__ __device__ Vector Vector::operator/(float s) const {
return Vector(this->x / s, this->y / s, this->z / s);
}
__host__ __device__ Vector Vector::operator/(const Vector &w) const {
return Vector(this->x / w.x, this->y / w.y, this->z / w.z);
}
__host__ __device__ Vector& Vector::operator/=(float s) {
x /= s; y /= s; z /= s;
return *this;
}
__host__ __device__ float Vector::norm() const {
return sqrtf(this->x*this->x + this->y*this->y + this->z*this->z);
}
__host__ __device__ Vector& Vector::normalize() {
*this /= norm();
return *this;
}
__host__ __device__ Vector Vector::normalized() const {
return *this / norm();
}
__host__ __device__ Vector operator-(const Vector &v) {
return Vector(-v.x, -v.y, -v.z);
}
__host__ __device__ Vector operator*(float s, const Vector &v) {
return v*s;
}
__host__ __device__ void coordinateSystem(const Vector &v1, Vector *v2, Vector *v3) {
*v2 = Vector(0);
if (v1.x == 0 && v1.x == 0)
{
v2->x = -v1.z;
}
else {
v2->x = -v1.y;
v2->y = v1.x;
}
*v3 = v1.cross(*v2);
} |
12,191 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// CUDA kernel to pause for at least num_cycle cycles
__global__ void sleep(int64_t *array, int64_t num_elems)
{
int i;
for(i=0; i<num_elems; i++)
array[i] = sin((double)array[i]);
}
extern "C" void allocate_mem(int64_t **device_value, int64_t num_elems)
{
gpuErrchk( cudaMalloc((void**)device_value, num_elems*sizeof(int64_t)) );
}
extern "C" void copy_to_device(int64_t *host_array, int64_t *device_array, int64_t num_elems)
{
gpuErrchk( cudaMemcpy(device_array, host_array, num_elems*sizeof(int64_t), cudaMemcpyHostToDevice) );
}
extern "C" void copy_from_device(int64_t *host_array, int64_t *device_array, int64_t num_elems)
{
gpuErrchk( cudaMemcpy(host_array, device_array, 1*sizeof(int64_t), cudaMemcpyDeviceToHost) );
}
// Launches a kernel that sleeps for num_cycles
extern "C" void sleep_kernel(int64_t *completed_cycles, int64_t requested_cycles)
{
// Our kernel will launch a single thread to sleep the kernel
int blockSize, gridSize;
blockSize = 1;
gridSize = 1;
// Execute the kernel in default stream
sleep<<< gridSize, blockSize >>>(completed_cycles, requested_cycles);
}
// Wait for all work to complete
extern "C" void wait_for_gpu()
{
cudaDeviceSynchronize();
}
|
12,192 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define NUM_ELEMENT 100000
#define BLOCK_SIZE 32
#define GRID_SIZE ((NUM_ELEMENT + BLOCK_SIZE - 1) / BLOCK_SIZE)
#define WARP_SIZE 32
#define SHARED_SIZE (BLOCK_SIZE * 4)
__global__ void reduction_max(int *Arr, int *Max){
extern __shared__ int sharedmem[];
int tid = threadIdx.x;
int id = blockIdx.x * blockDim.x + threadIdx.x;
sharedmem[tid] = Arr[id];
__syncthreads();
for (int i = 1; i < blockDim.x; i *= 2){
if ((tid / WARP_SIZE) % (2*i) == 0){
if (sharedmem[tid] < sharedmem[tid + i])
sharedmem[tid] = sharedmem[tid + i];
}
__syncthreads();
}
if (tid == 0)
Max[0] = sharedmem[0];
}
int main(){
int* arr;
int* d_arr, *d_max;
int max = 0;
cudaEvent_t start, end;
float etime;
dim3 block(BLOCK_SIZE);
dim3 grid(GRID_SIZE);
cudaEventCreate(&start);
cudaEventCreate(&end);
srand(time(NULL));
// random number creation
arr = (int*)malloc(sizeof(int) * NUM_ELEMENT);
for (int i = 0; i < NUM_ELEMENT; i++)
arr[i] = rand() % (NUM_ELEMENT * 10);
// tmp print
//for (int i = 0; i < NUM_ELEMENT; i++)
// printf("%d\n", arr[i]);
// cuda var initialization
cudaMalloc((void**)&d_arr, sizeof(int)*NUM_ELEMENT);
cudaMalloc((void**)&d_max, sizeof(int));
cudaMemcpy(d_arr, arr, sizeof(int)*NUM_ELEMENT, cudaMemcpyHostToDevice);
// kernel call & exec time check
cudaEventRecord(start, 0);
reduction_max<<<grid, block, SHARED_SIZE>>>(d_arr, d_max);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&etime, start, end);
cudaMemcpy(&max, d_max, sizeof(int), cudaMemcpyDeviceToHost);
printf("MAX NUM : %d\n", max);
printf("EXEC TIME : %f ms\n", etime);
cudaEventDestroy(start);
cudaEventDestroy(end);
cudaFree(d_arr);
cudaFree(d_max);
return 0;
} |
12,193 | # include <stdlib.h>
# include <stdio.h>
# include <time.h>
# include <math.h>
# define BSIZE 32
// Forward declaration of the device multiplication function
__global__ void blockMxM(double*, double*, double*, int);
// Host multiplication function
double* cuda_MxM(double* A, double* B, int N) {
double* AA;
double* BB;
double* CC;
int size = N * N;
// Put into device
cudaMalloc((void **)&AA, size * sizeof(double));
cudaMemcpy(AA, A, size * sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void **)&BB, size * sizeof(double));
cudaMemcpy(BB, B, size * sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void **)&CC, size * sizeof(double));
// Each device is responsible for one block
dim3 dimBlock(BSIZE, BSIZE);
dim3 dimGrid(N / dimBlock.x, N / dimBlock.y);
blockMxM<<<dimGrid, dimBlock>>>(AA, BB, CC, N);
// Read result from the device
double* C = (double*)malloc(size * sizeof(double));
cudaMemcpy(C, CC, size * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(AA);
cudaFree(BB);
// cudaFree(CC);
return C;
}
// Device code for mutiplication on each block
__global__ void blockMxM(double* A, double* B, double* C, int N) {
int stripeA = (blockIdx.y * BSIZE) * N;
int stripeB = blockIdx.x * BSIZE;
int blockxy = threadIdx.y * N + threadIdx.x;
// printf("Hello from block %d, %d, thread %d, %d, sA: %d, sB: %d \n", blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, stripeA, stripeB);
double Csub = 0;
for (int i = 0; i < (N-1) / BSIZE + 1; i++) {
// Shared memory
__shared__ double AA[BSIZE][BSIZE];
__shared__ double BB[BSIZE][BSIZE];
int j = stripeA + i * BSIZE + blockxy;
int k = stripeB + i * (BSIZE * N) + blockxy;
AA[threadIdx.y][threadIdx.x] = A[j];
BB[threadIdx.y][threadIdx.x] = B[k];
__syncthreads();
for (int ii = 0; ii < BSIZE; ii++) {
Csub += AA[threadIdx.y][ii] * BB[ii][threadIdx.x];
}
__syncthreads();
}
// Write the block sub-matrix to global memory;
// each thread writes one element
int l = stripeA + stripeB + blockxy;
C[l] = Csub;
}
int main() {
int N = pow(2, 10);
int size = N * N;
double* A = (double*)malloc(sizeof(double) * size);
double* B = (double*)malloc(sizeof(double) * size);
for (int i = 0; i < size; i++) {
A[i] = 1.0;
B[i] = 1.0;
}
clock_t start = clock();
double* C = cuda_MxM(A, B, N);
clock_t end = clock();
int msec = (end - start) * 1000 / CLOCKS_PER_SEC;
printf("%d milliseconds \n", msec);
// for (int i = 0; i < size; i++) {
// printf("%lf\n", C[i]);
// }
} |
12,194 | #include"Device.cuh"
namespace Device {
void dumpCurrentDevice() {
int device;
if (cudaGetDevice(&device) != cudaSuccess) {
std::cout << "ERROR: Unable to detect CUDA device" << std::endl;
return;
}
cudaDeviceProp properties;
if (cudaGetDeviceProperties(&properties, device) != cudaSuccess) {
std::cout << "ERROR: Unable to retrieve CUDA device properties (id = " << device << ")" << std::endl;
return;
}
std::cout << "####################################################################" << std::endl;
std::cout << "------------ Device: " << device << std::endl;
std::cout << "------------ Name: " << properties.name << std::endl;
std::cout << "------------ Total Global Memory: " << properties.totalGlobalMem << std::endl;
std::cout << "------------ Shared Memory per block: " << properties.sharedMemPerBlock << std::endl;
std::cout << "------------ Registers per block: " << properties.regsPerBlock << std::endl;
std::cout << "------------ Warp Size: " << properties.warpSize << std::endl;
std::cout << "------------ Memory Pitch: " << properties.memPitch << std::endl;
std::cout << "------------ Max Threads Per Block: " << properties.maxThreadsPerBlock << std::endl;
std::cout << "------------ Max Threads dim: [" << properties.maxThreadsDim[0] << ", " << properties.maxThreadsDim[1] << ", " << properties.maxThreadsDim[2] << "]" << std::endl;
std::cout << "------------ Max Grid Size: [" << properties.maxGridSize[0] << ", " << properties.maxGridSize[1] << ", " << properties.maxGridSize[2] << "]" << std::endl;
std::cout << "------------ Total Constant Memory: " << properties.totalConstMem << std::endl;
std::cout << "------------ Compute Capability: " << properties.major << "." << properties.minor << std::endl;
std::cout << "------------ Clock Rate: " << properties.clockRate << std::endl;
std::cout << "------------ Texture Alignment: " << properties.textureAlignment << std::endl;
std::cout << "------------ Device Overlap: " << properties.deviceOverlap << std::endl;
std::cout << "------------ Multi Processor Count: " << properties.multiProcessorCount << std::endl;
std::cout << "------------ Kernel Exec Timeout Enabled: " << properties.kernelExecTimeoutEnabled << std::endl;
std::cout << "------------ Integrated: " << properties.integrated << std::endl;
std::cout << "------------ Can Map Host Memory: " << properties.canMapHostMemory << std::endl;
std::cout << "------------ Compute Mode: " << properties.computeMode << std::endl;
std::cout << "------------ Concurrent Kernels: " << properties.concurrentKernels << std::endl;
std::cout << "------------ ECC Enabled: " << properties.ECCEnabled << std::endl;
std::cout << "------------ PCI Bus ID: " << properties.pciBusID << std::endl;
std::cout << "------------ PCI Device ID: " << properties.pciDeviceID << std::endl;
std::cout << "------------ TCC Driver: " << properties.tccDriver << std::endl;
std::cout << "####################################################################" << std::endl;
}
int multiprocessorCount() {
int device;
if (cudaGetDevice(&device) != cudaSuccess) return -1;
cudaDeviceProp properties;
if (cudaGetDeviceProperties(&properties, device) != cudaSuccess) return -1;
return (properties.multiProcessorCount);
}
std::string getDeviceName(int device) {
cudaDeviceProp properties;
if (cudaGetDeviceProperties(&properties, device) != cudaSuccess) return "";
return properties.name;
}
}
|
12,195 | #include <fstream>
#include<iostream>
#include<assert.h>
#include<vector>
using namespace std;
float** DataRead2(int SizeRow, int SizeCol, char*name, int blocknum, int blockdim)
{
//ȡҪļļ
char matrixname[40];
memcpy(matrixname, name, 40);
FILE *outfile;
//printf("ļ");
//gets(name);
//ķʽȡļ
float **a = new float*[blocknum];
for (int i = 0; i < blocknum; i++){
a[i] = new float[blockdim];
}
assert(a != NULL);
ifstream infile(matrixname, ios::binary | ios::in);
if (!infile)
{
cerr << "open error!" << endl;
exit(1);
}
for (int j = 0; j < blocknum; j++){
infile.read((char *)a[j], sizeof(float)*blockdim);
} //Ӵļ,˳a,жȡ,עmatlab˴ݴ洢IJͬ
//ر
infile.close();
//for (int i = 0; i < blocknum; i++){
// for (int j = 0; j < blockdim; j++){
// cout << a[i][j] ;
// }
//}
return a;
} |
12,196 | #include "includes.h"
__global__ void sigmoidKernel(float* input, float* output, int edge) {
KERNEL_POSITION;
output[position] = 1 / (1 + exp(-input[position]));
} |
12,197 | //
// Created by fiberhome on 2021/4/7.
//
#include "cuda_runtime.h"
#include "iostream"
// __device__ __constant__ __shared__
extern __shared__ float shared[];
// dynamically
extern __shared__ float array[];
__device__ void func() // __device__ or __global__ function
{
short *array0 = (short *) array;
float *array1 = (float *) &array0[128];
int *array2 = (int *) &array1[64];
}
// Alignment requirement
__device__ void func2() // __device__ or __global__ function
{
short *array0 = (short *) array;
float *array1 = (float *) &array0[127]; // not aligned to 4 bytes, this doesn't work
}
int main() {
short array0[128];
float array1[64];
int array2[256];
}
|
12,198 | #include <iostream>
#include <math.h>
using namespace std;
__global__ void add(int n, float* x, float* y){ //__global__ establishes device to managed by CUDA
for (int i = 0; i < n; i++) {
y[i] = x[i] + y[i];
}
}
int main()
{
int N = 1 << 20;
float*x, *y;
cudaMallocManaged(&x, N * sizeof(float)); //allocate memory on GPU
cudaMallocManaged(&y, N * sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
add<<<1, 1>>>(N, x, y);
cudaDeviceSynchronize();
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i] - 3.0f));
cout << "Max error:" << maxError << endl;
delete[] x;
delete[] y;
return 0;
}
|
12,199 | #include "includes.h"
__global__ void Zero_Dot(float *a, float *b, float *c)
{
__shared__ float cache[ThreadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0;
while (tid < N) {
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIndex] = temp;
__syncthreads();
int i = blockDim.x / 2;
while (i != 0)
{
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
c[blockIdx.x] = cache[0];
} |
12,200 | /////////////////////////////////////////////////
// nvcc BatAlgorithm.cu
//
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_profiler_api.h>
#include "cuda.h"
#include <curand.h>
#include <curand_kernel.h>
#include <ctime>
#define PI 3.14159265
__device__ void Ackley(int D, int i, double* Sol, double* Fitness){
double a = 20;
double b = 0.2;
double c = 2*PI;
double val_1 = 0.0;
double val_2 = 0.0;
for (int j = 0; j < D; j++) {
val_1 = val_1 + (Sol[(i*D) + j]*Sol[(i*D) + j])/D;
val_2 = val_2 + cos(c*Sol[(i*D) + j])/D;
}
val_1 = sqrt(val_1);
Fitness[i] = a + exp(1.0) - (a*exp(-b*val_1)) - exp(val_2);
}
__device__ void Schwefel(int D, int i, double* Sol, double* Fitness){
double val = 0.0;
for (int j = 0; j < D; j++) {
val = val + Sol[(i*D) + j]*sin(sqrt(abs(Sol[(i*D) + j])));
}
Fitness[i] = (418.9829*D) - val;
}
__device__ void Fun3(int D, int i, double* Sol, double* Fitness){
double val = 0.0;
for (int j = 0; j < D; j++) {
val = val + (Sol[(i*D) + j]*Sol[(i*D) + j]);
}
double r = sin(sqrt(val));
r *= r;
r = r - 0.5;
double b = 1.0 + 0.001*val;
b *= b;
Fitness[i] = (0.5 - (r/b))*(-1);
}
__device__ void funFitness (int D, int i, double* Sol, double* Fitness, int fun){
switch (fun) {
case 1:{
Ackley(D, i, Sol, Fitness);
break;}
case 2:{
Schwefel(D, i, Sol, Fitness);
break;}
case 3:{
Fun3(D, i, Sol, Fitness);
break;}
}
}
__device__ double simplebounds(double val, double lower, double upper){
if (val < lower) val = lower;
if (val > upper) val = upper;
return val;
}
__global__ void best_bat(int D, double* Fitness, double* F, int NP, double* best, double* Sol, int* J,int b){
int i = threadIdx.x + (blockIdx.x * blockDim.x);
int NumHilos = blockDim.x * gridDim.x;
F[i] = Fitness[i];
J[i] = i;
int ii = i + NumHilos;
while (ii < (NP - b)){
if (F[i] > Fitness[ii]){
F[i] = Fitness[ii];
J[i] = ii;
}
ii += NumHilos;
}
__syncthreads();
if (blockIdx.x == 0) {
i = threadIdx.x;
ii = i + blockDim.x ;
while (ii < NumHilos){
if (F[i] > F[ii]){
F[i] = F[ii];
J[i] = J[ii];
}
ii += blockDim.x;
}
__syncthreads();
if (threadIdx.x == 0) {
i = 0;
ii = i + 1 ;
while (ii < blockDim.x){
if (F[i] > F[ii]){
F[i] = F[ii];
J[i] = J[ii];
}
ii ++;
}
double td = Fitness[J[i]];
Fitness[J[i]] = Fitness[NP - b - 1];
Fitness[NP - b - 1] = td;
//Fitness[J[i]] = 100000;
for (size_t j = 0; j < D; j++) {
best[j + (D*b)] = Sol[(J[i]*D) + j];
Sol[(J[i]*D) + j] = Sol[((NP - b - 1)*D) + j];
Sol[((NP - b - 1)*D) + j] = best[j + (D*b)];
}
}
}
}
__global__ void init_bat(int D, double* Lb, double* Ub, double *v, double * Sol, double* Fitness, double* Q, int function){
int i = threadIdx.x + (blockIdx.x * blockDim.x);
curandState_t state;
Q[i] = 0;
curand_init((unsigned long long)clock(), i, 0, &state);
double rnd;
for (int j = 0; j < D; j++) {
rnd = curand_uniform_double(&state);
v[(i*D) + j] = 0.0;
Sol[(i*D) + j] = Lb[j] + (Ub[j] - Lb[j])*rnd;
Sol[(i*D) + j] = simplebounds(Sol[(i*D) + j], Lb[j], Ub[j]);
}
funFitness(D, i, Sol, Fitness, function);
__syncthreads();
}
__global__ void move_bat(int D, double* Lb, double* Ub, double *v, double * Sol,
double* Fitness, double* Q, double Qmin, double Qmax, double A,
double* best, double* S, double r, double* Fnew, int function){
int i = threadIdx.x + (blockIdx.x * blockDim.x);
curandState_t state;
//a[i] = i;
curand_init((unsigned long long)clock(), i, 0, &state);
double rnd;
int k = curand(&state) % 20;
rnd = curand_uniform_double(&state);
Q[i] = Qmin + (Qmin - Qmax)*rnd;
for (int j = 0; j < D; j++) {
v[(i*D) + j] = v[(i*D) + j] + ((Sol[(i*D) + j] - best[j + (k*D)])*Q[i]);
S[(i*D) + j] = Sol[(i*D) + j] + v[(i*D) + j];
S[(i*D) + j] = simplebounds(S[(i*D) + j], Lb[j], Ub[j]);
}
rnd = curand_uniform_double(&state);
if (rnd > r) {
for (int j = 0; j < D; j++) {
rnd = curand_uniform_double(&state);
//rnd = curand_normal_double(&state);
S[(i*D) + j] = best[j + (k*D)] + (((Ub[j] - Lb[j])/20)* A * ((rnd*2)-1));
S[(i*D) + j] = simplebounds(S[(i*D) + j], Lb[j], Ub[j]);
}
}
funFitness(D, i, S, Fnew, function);
rnd = curand_uniform_double(&state);
if (Fnew[i] <= Fitness[i] && rnd < A) {
for (int j = 0; j < D; j++) {
Sol[(i*D) + j] = S[(i*D) + j];
}
Fitness[i] = Fnew[i];
}
__syncthreads();
}
double run_bat (int D, int NP,int N_Gen, double A, double r, double Qmin, double Qmax, double Lower, double Upper, int function, double* vecF){
unsigned long long int D_size = D*sizeof(double);
unsigned long long int NP_size = NP*sizeof(double);
unsigned long long int DxNP_size = D*NP*sizeof(double);
double *Lb, *Ub, *best; //size D
double *Q, *Fnew, *Fitness, *F; // size NP
double *v, *Sol, *S;//size D*NP
int *J;
double *_Lb = (double*)malloc(D_size);
double *_Ub = (double*)malloc(D_size);
double f, fnew, ff;
for (int i = 0; i < D; i++) {
_Lb[i] = Lower;
_Ub[i] = Upper;
}
cudaMallocManaged(&Lb, D_size);
cudaMallocManaged(&Ub, D_size);
cudaMallocManaged(&best, D_size*20);
cudaMallocManaged(&Q, NP_size);
cudaMallocManaged(&Fnew, NP_size);
cudaMallocManaged(&Fitness, NP_size);
cudaMallocManaged(&F, NP_size);
cudaMallocManaged(&v, DxNP_size);
cudaMallocManaged(&Sol, DxNP_size);
cudaMallocManaged(&S, DxNP_size);
cudaMallocManaged(&J, NP*sizeof(int));
cudaMemcpy(Lb, _Lb, D_size, cudaMemcpyHostToDevice );
cudaMemcpy(Ub, _Ub, D_size, cudaMemcpyHostToDevice );
init_bat<<< NP/100, 100>>>(D, Lb, Ub, v, Sol, Fitness, Q, function);
for (size_t i = 0; i < 20; i++) {
best_bat<<< 10, 100>>>(D, Fitness, F, NP, best, Sol, J, i);
cudaMemcpy(&f, F, sizeof(double), cudaMemcpyDeviceToHost);
if(i == 0) ff = f;
}
vecF[0] = ff;
//printf("-------------\n");
//printf("%5.10f\n", ff);
for (size_t i = 0; i < N_Gen; i++) {
move_bat<<< NP/100, 100>>>(D, Lb, Ub, v, Sol, Fitness, Q, Qmin, Qmax, A, best, S, r, Fnew, function);
for (size_t j = 0; j < 20; j++) {
best_bat<<< 10, 100>>>(D, Fitness, F, NP, best, Sol, J, j);
cudaMemcpy(&f, F, sizeof(double), cudaMemcpyDeviceToHost);
if(j == 0) ff = f;
}
//printf("-------------\n");
//printf("%5.10f\n", ff);
vecF[i + 1] = ff;
if(fnew < f){
A *= 0.8;
r *= (1 - exp(-1.0));
f = fnew;
}
}
//printf("-------------\n");
printf("%5.10f\n", -ff);
cudaFree(Lb); cudaFree(Ub); cudaFree(best);
cudaFree(Q); cudaFree(Fnew); cudaFree(Fitness);
cudaFree(F); cudaFree(v); cudaFree(Sol);
cudaFree(S); cudaFree(J);
return ff;
}
int main() {
double *v = (double*)malloc(51*sizeof(double));
double *vbest = (double*)malloc(51*sizeof(double));
double f, fbest = 100000;
for (size_t i = 0; i < 100; i++) {
//f = run_bat(8, 20000, 50, 0.5, 0.5, 0.0, 2.0, -32.7680, 32.7680, 1, v);
//f = run_bat(2, 20000, 50, 0.5, 0.5, 0.0, 2.0, -500.0, 500.0, 2, v);
f = run_bat(8, 20000, 50, 0.5, 0.5, 0.0, 2.0, -100.0, 100.0, 3, v);
if (fbest > f) {
fbest = f;
for (size_t j = 0; j < 51; j++) {
vbest[j] = v[j];
}
}
}
printf("-------------\n");
for (size_t j = 0; j < 51; j++) {
printf("%5.10f\n", -vbest[j]);
}
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.