hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
c967745c4bca2ec629a85bb184f5357774f66c90.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <sstream>
#include <stdio.h>
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "npp.h"
#include <math.h>
#include <windows.h>
// CUDA error checking Macro.
#define CUDA_CALL(x,y) {if((x) != hipSuccess){ \
printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \
printf(" %s\n", hipGetErrorString(hipGetLastError())); \
exit(EXIT_FAILURE);}\
else{printf("CUDA Success at %d. (%s)\n",__LINE__,y); }}
//Global declaration
#define DIM 512
// Function Protypes.
Npp8u *
LoadPGM(char * sFileName, int & nWidth, int & nHeight, int & nMaxGray);
void
WritePGM(char * sFileName, Npp8u * pDst_Host, int nWidth, int nHeight, int nMaxGray);
__global__ void
MinimumKernel(Npp8u * pSrc_Dev, Npp8u * pMin_Dev);
__global__ void
MaximumKernel(Npp8u * pSrc_Dev, Npp8u * pMax_Dev);
__global__ void
SubtractKernel(Npp8u * pDst_Dev, Npp8u * pSrc_Dev, Npp8u nMin_Dev);
__global__ void
MultiplyKernel(Npp8u * pDst_Dev, Npp8u nConstant, int nNormalizer);
// Main function.
int
main(int argc, char ** argv)
{
// Host parameter declarations.
Npp8u * pSrc_Host, *pDst_Host;
int nWidth, nHeight, nMaxGray, nNormalizer;
std::cout << "GPU VERSION" << std::endl;
// Load image to the host.
std::cout << "Load PGM file." << std::endl;
pSrc_Host = LoadPGM("lena_before.pgm", nWidth, nHeight, nMaxGray);
pDst_Host = new Npp8u[nWidth * nHeight];
// Device parameter declarations.
Npp8u * pSrc_Dev, *pDst_Dev;
Npp8u * pMin_Dev, *pMax_Dev;
Npp8u nMin_Host[DIM], nMax_Host[DIM];
int nSrcStep_Dev, nDstStep_Dev;
//Start Counter.
hipEvent_t start, stop;
float elapsed_time_ms;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
// Allocate Device variables and copy the image from the host to GPU
pSrc_Dev = nppiMalloc_8u_C1(nWidth, nHeight, &nSrcStep_Dev);
pDst_Dev = nppiMalloc_8u_C1(nWidth, nHeight, &nDstStep_Dev);
CUDA_CALL(hipMalloc(&pMin_Dev, sizeof(Npp8u) * DIM), "Memory allocated.");
CUDA_CALL(hipMalloc(&pMax_Dev, sizeof(Npp8u) * DIM), "Memory allocated.");
std::cout << "Copy image from host to device." << std::endl;
CUDA_CALL(hipMemcpy(pSrc_Dev, pSrc_Host, nWidth * nHeight * sizeof(Npp8u), hipMemcpyHostToDevice), "Memory copied.(HostToDevice)");
std::cout << "Process the image on GPU." << std::endl;
/*
Defining Kernel Execution Paramaters.
I defined two different block size to be able to find global minimum. During the First Max and Min kernels execution, they are only
be able to find local minimum. Credits: http://www.dmi.unict.it/~bilotta/gpgpu/notes/07-some-vector-examples.html
"The most efficient approach uses two kernel launches, one with the amount of blocks necessary to saturate the hardware,
the other with a single block to finish up the reduction."
*/
dim3 dimGrid(nWidth);
dim3 dimBlockMinMax(nWidth / 2);
dim3 dimBlock(nWidth);
//Min and max kernel are independent. So they will run async.
size_t sharedMemSize = nWidth * sizeof(Npp8u);
hipStream_t stream1, stream2;
hipStreamCreate(&stream1);
hipStreamCreate(&stream2);
// Compute the min and the max.
MinimumKernel << <dimGrid, dimBlockMinMax, sharedMemSize, stream1 >> > (pSrc_Dev, pMin_Dev);
MaximumKernel << <dimGrid, dimBlockMinMax, sharedMemSize, stream2 >> > (pSrc_Dev, pMax_Dev);
MinimumKernel << <1, dimBlockMinMax, sharedMemSize, stream1 >> > (pMin_Dev, pMin_Dev);
MaximumKernel << <1, dimBlockMinMax, sharedMemSize, stream2 >> > (pMax_Dev, pMax_Dev);
// get max and min to host
CUDA_CALL(hipMemcpy(&nMin_Host, pMin_Dev, sizeof(Npp8u) * 512, hipMemcpyDeviceToHost), "Memory copied.(DeviceToHost)");
CUDA_CALL(hipMemcpy(&nMax_Host, pMax_Dev, sizeof(Npp8u) * 512, hipMemcpyDeviceToHost), "Memory copied.(DeviceToHost)");
// Subtract Min
SubtractKernel << <dimGrid, dimBlock, 0, stream1 >> > (pDst_Dev, pSrc_Dev, nMin_Host[0]);
// Compute the optimal nConstant and nScaleFactor for integer operation see GTC 2013 Lab NPP.pptx for explanation
// I will prefer integer arithmetic, Instead of using 255.0f / (nMax_Host - nMin_Host) directly
int nScaleFactor = 0;
int nPower = 1;
while (nPower * 255.0f / (nMax_Host[0] - nMin_Host[0]) < 255.0f)
{
nScaleFactor++;
nPower *= 2;
}
Npp8u nConstant = static_cast<Npp8u>(255.0f / (nMax_Host[0] - nMin_Host[0]) * (nPower / 2));
// CUDA Kernel doesn't support these calculation. So that I calculated it outside the kernel.
nNormalizer = pow(2, (nScaleFactor - 1));
// Multiply Kernel
MultiplyKernel << <dimGrid, dimBlock, 0, stream1 >> > (pDst_Dev, nConstant, nNormalizer);
// Copy result back to the host.
std::cout << "Work done! Copy the result back to host." << std::endl;
CUDA_CALL(hipMemcpy(pDst_Host, pDst_Dev, nWidth * nHeight * sizeof(Npp8u), hipMemcpyDeviceToHost), "Memory copied.(DeviceToHost)");
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time_ms, start, stop);
printf("Time to calculate results(GPU Time): %f ms.\n", elapsed_time_ms);
// Output the result image.
std::cout << "Output the PGM file." << std::endl;
WritePGM("lena_after_GPUs.pgm", pDst_Host, nWidth, nHeight, nMaxGray);
// Clean up.
std::cout << "Clean up." << std::endl;
delete[] pSrc_Host;
delete[] pDst_Host;
nppiFree(pSrc_Dev);
nppiFree(pDst_Dev);
CUDA_CALL(hipFree(pMin_Dev), "Memory Freed.");
CUDA_CALL(hipFree(pMax_Dev), "Memory Freed.");
printf("All done. Press Any Key to Continue...");
getchar();
return 0;
}
// Disable reporting warnings on functions that were marked with deprecated.
#pragma warning( disable : 4996 )
// Load PGM file.
Npp8u *
LoadPGM(char * sFileName, int & nWidth, int & nHeight, int & nMaxGray)
{
char aLine[256];
FILE * fInput = fopen(sFileName, "r");
if (fInput == 0)
{
perror("Cannot open file to read");
exit(EXIT_FAILURE);
}
// First line: version
fgets(aLine, 256, fInput);
std::cout << "\tVersion: " << aLine;
// Second line: comment
fgets(aLine, 256, fInput);
std::cout << "\tComment: " << aLine;
fseek(fInput, -1, SEEK_CUR);
// Third line: size
fscanf(fInput, "%d", &nWidth);
std::cout << "\tWidth: " << nWidth;
fscanf(fInput, "%d", &nHeight);
std::cout << " Height: " << nHeight << std::endl;
// Fourth line: max value
fscanf(fInput, "%d", &nMaxGray);
std::cout << "\tMax value: " << nMaxGray << std::endl;
while (getc(fInput) != '\n');
// Following lines: data
Npp8u * pSrc_Host = new Npp8u[nWidth * nHeight];
for (int i = 0; i < nHeight; ++i)
for (int j = 0; j < nWidth; ++j)
pSrc_Host[i*nWidth + j] = fgetc(fInput);
fclose(fInput);
return pSrc_Host;
}
// Write PGM image.
void
WritePGM(char * sFileName, Npp8u * pDst_Host, int nWidth, int nHeight, int nMaxGray)
{
FILE * fOutput = fopen(sFileName, "w+");
if (fOutput == 0)
{
perror("Cannot open file to read");
exit(EXIT_FAILURE);
}
char * aComment = "# Created by NPP";
fprintf(fOutput, "P5\n%s\n%d %d\n%d\n", aComment, nWidth, nHeight, nMaxGray);
for (int i = 0; i < nHeight; ++i)
for (int j = 0; j < nWidth; ++j)
fputc(pDst_Host[i*nWidth + j], fOutput);
fclose(fOutput);
}
__global__ void
MinimumKernel(Npp8u * pSrc_Dev, Npp8u * pMin_Dev)
{
extern __shared__ Npp8u sMin[];
unsigned int tid = threadIdx.x;
unsigned int gid = blockIdx.x * blockDim.x + threadIdx.x;
//Warp Reducing part.
if (pSrc_Dev[gid] > pSrc_Dev[gid + blockDim.x])
{
sMin[tid] = pSrc_Dev[gid + blockDim.x];
}
else
{
sMin[tid] = pSrc_Dev[gid];
}
__syncthreads();
//Parallel Reduction. It is very similar to the example given in the lecture.
for (unsigned int s = blockDim.x / 2; s>32; s >>= 1)
{
if (tid < s)
if (sMin[tid] > sMin[tid + s]) sMin[tid] = sMin[tid + s];
__syncthreads();
}
if (tid < 32)
{
if (sMin[tid] > sMin[tid + 32]) sMin[tid] = sMin[tid + 32];
if (sMin[tid] > sMin[tid + 16]) sMin[tid] = sMin[tid + 16];
if (sMin[tid] > sMin[tid + 8]) sMin[tid] = sMin[tid + 8];
if (sMin[tid] > sMin[tid + 4]) sMin[tid] = sMin[tid + 4];
if (sMin[tid] > sMin[tid + 2]) sMin[tid] = sMin[tid + 2];
if (sMin[tid] > sMin[tid + 1]) sMin[tid] = sMin[tid + 1];
}
if (tid == 0) pMin_Dev[blockIdx.x] = sMin[0];
}
__global__ void
MaximumKernel(Npp8u * pSrc_Dev, Npp8u * pMax_Dev)
{
extern __shared__ Npp8u sMax[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
//Warp Reducing part.
if (pSrc_Dev[i] < pSrc_Dev[i + blockDim.x])
{
sMax[tid] = pSrc_Dev[i + blockDim.x];
}
else
{
sMax[tid] = pSrc_Dev[i];
}
__syncthreads();
//Parallel Reduction. It is very similar to the example given in the lecture.
for (unsigned int s = blockDim.x / 2; s>32; s >>= 1)
{
if (tid < s)
if (sMax[tid] < sMax[tid + s]) sMax[tid] = sMax[tid + s];
__syncthreads();
}
if (tid < 32)
{
if (sMax[tid] < sMax[tid + 32]) sMax[tid] = sMax[tid + 32];
if (sMax[tid] < sMax[tid + 16]) sMax[tid] = sMax[tid + 16];
if (sMax[tid] < sMax[tid + 8]) sMax[tid] = sMax[tid + 8];
if (sMax[tid] < sMax[tid + 4]) sMax[tid] = sMax[tid + 4];
if (sMax[tid] < sMax[tid + 2]) sMax[tid] = sMax[tid + 2];
if (sMax[tid] < sMax[tid + 1]) sMax[tid] = sMax[tid + 1];
}
if (tid == 0) pMax_Dev[blockIdx.x] = sMax[0];
}
__global__ void
SubtractKernel(Npp8u * pDst_Dev, Npp8u * pSrc_Dev, Npp8u nMin_Dev)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
pDst_Dev[i] = pSrc_Dev[i] - nMin_Dev;
}
__global__ void
MultiplyKernel(Npp8u * pDst_Dev, Npp8u nConstant, int nNormalizer)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
pDst_Dev[i] = static_cast<Npp8u>(pDst_Dev[i] * nConstant / nNormalizer);
} | c967745c4bca2ec629a85bb184f5357774f66c90.cu | #include <iostream>
#include <fstream>
#include <sstream>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "npp.h"
#include <math.h>
#include <windows.h>
// CUDA error checking Macro.
#define CUDA_CALL(x,y) {if((x) != cudaSuccess){ \
printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \
printf(" %s\n", cudaGetErrorString(cudaGetLastError())); \
exit(EXIT_FAILURE);}\
else{printf("CUDA Success at %d. (%s)\n",__LINE__,y); }}
//Global declaration
#define DIM 512
// Function Protypes.
Npp8u *
LoadPGM(char * sFileName, int & nWidth, int & nHeight, int & nMaxGray);
void
WritePGM(char * sFileName, Npp8u * pDst_Host, int nWidth, int nHeight, int nMaxGray);
__global__ void
MinimumKernel(Npp8u * pSrc_Dev, Npp8u * pMin_Dev);
__global__ void
MaximumKernel(Npp8u * pSrc_Dev, Npp8u * pMax_Dev);
__global__ void
SubtractKernel(Npp8u * pDst_Dev, Npp8u * pSrc_Dev, Npp8u nMin_Dev);
__global__ void
MultiplyKernel(Npp8u * pDst_Dev, Npp8u nConstant, int nNormalizer);
// Main function.
int
main(int argc, char ** argv)
{
// Host parameter declarations.
Npp8u * pSrc_Host, *pDst_Host;
int nWidth, nHeight, nMaxGray, nNormalizer;
std::cout << "GPU VERSION" << std::endl;
// Load image to the host.
std::cout << "Load PGM file." << std::endl;
pSrc_Host = LoadPGM("lena_before.pgm", nWidth, nHeight, nMaxGray);
pDst_Host = new Npp8u[nWidth * nHeight];
// Device parameter declarations.
Npp8u * pSrc_Dev, *pDst_Dev;
Npp8u * pMin_Dev, *pMax_Dev;
Npp8u nMin_Host[DIM], nMax_Host[DIM];
int nSrcStep_Dev, nDstStep_Dev;
//Start Counter.
cudaEvent_t start, stop;
float elapsed_time_ms;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Allocate Device variables and copy the image from the host to GPU
pSrc_Dev = nppiMalloc_8u_C1(nWidth, nHeight, &nSrcStep_Dev);
pDst_Dev = nppiMalloc_8u_C1(nWidth, nHeight, &nDstStep_Dev);
CUDA_CALL(cudaMalloc(&pMin_Dev, sizeof(Npp8u) * DIM), "Memory allocated.");
CUDA_CALL(cudaMalloc(&pMax_Dev, sizeof(Npp8u) * DIM), "Memory allocated.");
std::cout << "Copy image from host to device." << std::endl;
CUDA_CALL(cudaMemcpy(pSrc_Dev, pSrc_Host, nWidth * nHeight * sizeof(Npp8u), cudaMemcpyHostToDevice), "Memory copied.(HostToDevice)");
std::cout << "Process the image on GPU." << std::endl;
/*
Defining Kernel Execution Paramaters.
I defined two different block size to be able to find global minimum. During the First Max and Min kernels execution, they are only
be able to find local minimum. Credits: http://www.dmi.unict.it/~bilotta/gpgpu/notes/07-some-vector-examples.html
"The most efficient approach uses two kernel launches, one with the amount of blocks necessary to saturate the hardware,
the other with a single block to ‘finish up’ the reduction."
*/
dim3 dimGrid(nWidth);
dim3 dimBlockMinMax(nWidth / 2);
dim3 dimBlock(nWidth);
//Min and max kernel are independent. So they will run async.
size_t sharedMemSize = nWidth * sizeof(Npp8u);
cudaStream_t stream1, stream2;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
// Compute the min and the max.
MinimumKernel << <dimGrid, dimBlockMinMax, sharedMemSize, stream1 >> > (pSrc_Dev, pMin_Dev);
MaximumKernel << <dimGrid, dimBlockMinMax, sharedMemSize, stream2 >> > (pSrc_Dev, pMax_Dev);
MinimumKernel << <1, dimBlockMinMax, sharedMemSize, stream1 >> > (pMin_Dev, pMin_Dev);
MaximumKernel << <1, dimBlockMinMax, sharedMemSize, stream2 >> > (pMax_Dev, pMax_Dev);
// get max and min to host
CUDA_CALL(cudaMemcpy(&nMin_Host, pMin_Dev, sizeof(Npp8u) * 512, cudaMemcpyDeviceToHost), "Memory copied.(DeviceToHost)");
CUDA_CALL(cudaMemcpy(&nMax_Host, pMax_Dev, sizeof(Npp8u) * 512, cudaMemcpyDeviceToHost), "Memory copied.(DeviceToHost)");
// Subtract Min
SubtractKernel << <dimGrid, dimBlock, 0, stream1 >> > (pDst_Dev, pSrc_Dev, nMin_Host[0]);
// Compute the optimal nConstant and nScaleFactor for integer operation see GTC 2013 Lab NPP.pptx for explanation
// I will prefer integer arithmetic, Instead of using 255.0f / (nMax_Host - nMin_Host) directly
int nScaleFactor = 0;
int nPower = 1;
while (nPower * 255.0f / (nMax_Host[0] - nMin_Host[0]) < 255.0f)
{
nScaleFactor++;
nPower *= 2;
}
Npp8u nConstant = static_cast<Npp8u>(255.0f / (nMax_Host[0] - nMin_Host[0]) * (nPower / 2));
// CUDA Kernel doesn't support these calculation. So that I calculated it outside the kernel.
nNormalizer = pow(2, (nScaleFactor - 1));
// Multiply Kernel
MultiplyKernel << <dimGrid, dimBlock, 0, stream1 >> > (pDst_Dev, nConstant, nNormalizer);
// Copy result back to the host.
std::cout << "Work done! Copy the result back to host." << std::endl;
CUDA_CALL(cudaMemcpy(pDst_Host, pDst_Dev, nWidth * nHeight * sizeof(Npp8u), cudaMemcpyDeviceToHost), "Memory copied.(DeviceToHost)");
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
printf("Time to calculate results(GPU Time): %f ms.\n", elapsed_time_ms);
// Output the result image.
std::cout << "Output the PGM file." << std::endl;
WritePGM("lena_after_GPUs.pgm", pDst_Host, nWidth, nHeight, nMaxGray);
// Clean up.
std::cout << "Clean up." << std::endl;
delete[] pSrc_Host;
delete[] pDst_Host;
nppiFree(pSrc_Dev);
nppiFree(pDst_Dev);
CUDA_CALL(cudaFree(pMin_Dev), "Memory Freed.");
CUDA_CALL(cudaFree(pMax_Dev), "Memory Freed.");
printf("All done. Press Any Key to Continue...");
getchar();
return 0;
}
// Disable reporting warnings on functions that were marked with deprecated.
#pragma warning( disable : 4996 )
// Load PGM file.
Npp8u *
LoadPGM(char * sFileName, int & nWidth, int & nHeight, int & nMaxGray)
{
char aLine[256];
FILE * fInput = fopen(sFileName, "r");
if (fInput == 0)
{
perror("Cannot open file to read");
exit(EXIT_FAILURE);
}
// First line: version
fgets(aLine, 256, fInput);
std::cout << "\tVersion: " << aLine;
// Second line: comment
fgets(aLine, 256, fInput);
std::cout << "\tComment: " << aLine;
fseek(fInput, -1, SEEK_CUR);
// Third line: size
fscanf(fInput, "%d", &nWidth);
std::cout << "\tWidth: " << nWidth;
fscanf(fInput, "%d", &nHeight);
std::cout << " Height: " << nHeight << std::endl;
// Fourth line: max value
fscanf(fInput, "%d", &nMaxGray);
std::cout << "\tMax value: " << nMaxGray << std::endl;
while (getc(fInput) != '\n');
// Following lines: data
Npp8u * pSrc_Host = new Npp8u[nWidth * nHeight];
for (int i = 0; i < nHeight; ++i)
for (int j = 0; j < nWidth; ++j)
pSrc_Host[i*nWidth + j] = fgetc(fInput);
fclose(fInput);
return pSrc_Host;
}
// Write PGM image.
void
WritePGM(char * sFileName, Npp8u * pDst_Host, int nWidth, int nHeight, int nMaxGray)
{
FILE * fOutput = fopen(sFileName, "w+");
if (fOutput == 0)
{
perror("Cannot open file to read");
exit(EXIT_FAILURE);
}
char * aComment = "# Created by NPP";
fprintf(fOutput, "P5\n%s\n%d %d\n%d\n", aComment, nWidth, nHeight, nMaxGray);
for (int i = 0; i < nHeight; ++i)
for (int j = 0; j < nWidth; ++j)
fputc(pDst_Host[i*nWidth + j], fOutput);
fclose(fOutput);
}
__global__ void
MinimumKernel(Npp8u * pSrc_Dev, Npp8u * pMin_Dev)
{
extern __shared__ Npp8u sMin[];
unsigned int tid = threadIdx.x;
unsigned int gid = blockIdx.x * blockDim.x + threadIdx.x;
//Warp Reducing part.
if (pSrc_Dev[gid] > pSrc_Dev[gid + blockDim.x])
{
sMin[tid] = pSrc_Dev[gid + blockDim.x];
}
else
{
sMin[tid] = pSrc_Dev[gid];
}
__syncthreads();
//Parallel Reduction. It is very similar to the example given in the lecture.
for (unsigned int s = blockDim.x / 2; s>32; s >>= 1)
{
if (tid < s)
if (sMin[tid] > sMin[tid + s]) sMin[tid] = sMin[tid + s];
__syncthreads();
}
if (tid < 32)
{
if (sMin[tid] > sMin[tid + 32]) sMin[tid] = sMin[tid + 32];
if (sMin[tid] > sMin[tid + 16]) sMin[tid] = sMin[tid + 16];
if (sMin[tid] > sMin[tid + 8]) sMin[tid] = sMin[tid + 8];
if (sMin[tid] > sMin[tid + 4]) sMin[tid] = sMin[tid + 4];
if (sMin[tid] > sMin[tid + 2]) sMin[tid] = sMin[tid + 2];
if (sMin[tid] > sMin[tid + 1]) sMin[tid] = sMin[tid + 1];
}
if (tid == 0) pMin_Dev[blockIdx.x] = sMin[0];
}
__global__ void
MaximumKernel(Npp8u * pSrc_Dev, Npp8u * pMax_Dev)
{
extern __shared__ Npp8u sMax[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
//Warp Reducing part.
if (pSrc_Dev[i] < pSrc_Dev[i + blockDim.x])
{
sMax[tid] = pSrc_Dev[i + blockDim.x];
}
else
{
sMax[tid] = pSrc_Dev[i];
}
__syncthreads();
//Parallel Reduction. It is very similar to the example given in the lecture.
for (unsigned int s = blockDim.x / 2; s>32; s >>= 1)
{
if (tid < s)
if (sMax[tid] < sMax[tid + s]) sMax[tid] = sMax[tid + s];
__syncthreads();
}
if (tid < 32)
{
if (sMax[tid] < sMax[tid + 32]) sMax[tid] = sMax[tid + 32];
if (sMax[tid] < sMax[tid + 16]) sMax[tid] = sMax[tid + 16];
if (sMax[tid] < sMax[tid + 8]) sMax[tid] = sMax[tid + 8];
if (sMax[tid] < sMax[tid + 4]) sMax[tid] = sMax[tid + 4];
if (sMax[tid] < sMax[tid + 2]) sMax[tid] = sMax[tid + 2];
if (sMax[tid] < sMax[tid + 1]) sMax[tid] = sMax[tid + 1];
}
if (tid == 0) pMax_Dev[blockIdx.x] = sMax[0];
}
__global__ void
SubtractKernel(Npp8u * pDst_Dev, Npp8u * pSrc_Dev, Npp8u nMin_Dev)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
pDst_Dev[i] = pSrc_Dev[i] - nMin_Dev;
}
__global__ void
MultiplyKernel(Npp8u * pDst_Dev, Npp8u nConstant, int nNormalizer)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
pDst_Dev[i] = static_cast<Npp8u>(pDst_Dev[i] * nConstant / nNormalizer);
} |
b8290f8eb98a28c40230d98ede07b759d02e516f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef USE_LEGACY_DSLASH
#include <dslash.h>
#include <worker.h>
#include <dslash_helper.cuh>
#include <color_spinor_field_order.h>
#include <gauge_field_order.h>
#include <color_spinor.h>
#include <dslash_helper.cuh>
#include <index_helper.cuh>
#include <gauge_field.h>
#include <uint_to_char.h>
#include <dslash_policy.cuh>
#include <kernels/laplace.cuh>
/**
This is the laplacian derivative based on the basic gauged differential operator
*/
namespace quda
{
/**
@brief This is a helper class that is used to instantiate the
correct templated kernel for the dslash.
*/
template <typename Float, int nDim, int nColor, int nParity, bool dagger, bool xpay, KernelType kernel_type, typename Arg>
struct LaplaceLaunch {
// kernel name for jit compilation
static constexpr const char *kernel = "quda::laplaceGPU";
template <typename Dslash>
inline static void launch(Dslash &dslash, TuneParam &tp, Arg &arg, const hipStream_t &stream)
{
dslash.launch(laplaceGPU<Float, nDim, nColor, nParity, dagger, xpay, kernel_type, Arg>, tp, arg, stream);
}
};
template <typename Float, int nDim, int nColor, typename Arg> class Laplace : public Dslash<Float>
{
protected:
Arg &arg;
const ColorSpinorField ∈
public:
Laplace(Arg &arg, const ColorSpinorField &out, const ColorSpinorField &in) :
Dslash<Float>(arg, out, in, "kernels/laplace.cuh"),
arg(arg),
in(in)
{
}
virtual ~Laplace() {}
void apply(const hipStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
Dslash<Float>::setParam(arg);
Dslash<Float>::template instantiate<LaplaceLaunch, nDim, nColor>(tp, arg, stream);
}
long long flops() const
{
int mv_flops = (8 * in.Ncolor() - 2) * in.Ncolor(); // SU(3) matrix-vector flops
int num_mv_multiply = in.Nspin() == 4 ? 2 : 1;
int ghost_flops = (num_mv_multiply * mv_flops + 2 * in.Ncolor() * in.Nspin());
int xpay_flops = 2 * 2 * in.Ncolor() * in.Nspin(); // multiply and add per real component
int num_dir = (arg.dir == 4 ? 2 * 4 : 2 * 3); // 3D or 4D operator
long long flops_ = 0;
// FIXME - should we count the xpay flops in the derived kernels
// since some kernels require the xpay in the exterior (preconditiond clover)
switch (arg.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
flops_ = (ghost_flops + (arg.xpay ? xpay_flops : xpay_flops / 2)) * 2 * in.GhostFace()[arg.kernel_type];
break;
case EXTERIOR_KERNEL_ALL: {
long long ghost_sites = 2 * (in.GhostFace()[0] + in.GhostFace()[1] + in.GhostFace()[2] + in.GhostFace()[3]);
flops_ = (ghost_flops + (arg.xpay ? xpay_flops : xpay_flops / 2)) * ghost_sites;
break;
}
case INTERIOR_KERNEL:
case KERNEL_POLICY: {
long long sites = in.Volume();
flops_ = (num_dir * (in.Nspin() / 4) * in.Ncolor() * in.Nspin() + // spin project (=0 for staggered)
num_dir * num_mv_multiply * mv_flops + // SU(3) matrix-vector multiplies
((num_dir - 1) * 2 * in.Ncolor() * in.Nspin()))
* sites; // accumulation
if (arg.xpay) flops_ += xpay_flops * sites;
if (arg.kernel_type == KERNEL_POLICY) break;
// now correct for flops done by exterior kernel
long long ghost_sites = 0;
for (int d = 0; d < 4; d++)
if (arg.commDim[d]) ghost_sites += 2 * in.GhostFace()[d];
flops_ -= ghost_flops * ghost_sites;
break;
}
}
return flops_;
}
virtual long long bytes() const
{
int gauge_bytes = arg.reconstruct * in.Precision();
bool isFixed = (in.Precision() == sizeof(short) || in.Precision() == sizeof(char)) ? true : false;
int spinor_bytes = 2 * in.Ncolor() * in.Nspin() * in.Precision() + (isFixed ? sizeof(float) : 0);
int proj_spinor_bytes = in.Nspin() == 4 ? spinor_bytes / 2 : spinor_bytes;
int ghost_bytes = (proj_spinor_bytes + gauge_bytes) + 2 * spinor_bytes; // 2 since we have to load the partial
int num_dir = (arg.dir == 4 ? 2 * 4 : 2 * 3); // 3D or 4D operator
long long bytes_ = 0;
switch (arg.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T: bytes_ = ghost_bytes * 2 * in.GhostFace()[arg.kernel_type]; break;
case EXTERIOR_KERNEL_ALL: {
long long ghost_sites = 2 * (in.GhostFace()[0] + in.GhostFace()[1] + in.GhostFace()[2] + in.GhostFace()[3]);
bytes_ = ghost_bytes * ghost_sites;
break;
}
case INTERIOR_KERNEL:
case KERNEL_POLICY: {
long long sites = in.Volume();
bytes_ = (num_dir * gauge_bytes + ((num_dir - 2) * spinor_bytes + 2 * proj_spinor_bytes) + spinor_bytes) * sites;
if (arg.xpay) bytes_ += spinor_bytes;
if (arg.kernel_type == KERNEL_POLICY) break;
// now correct for bytes done by exterior kernel
long long ghost_sites = 0;
for (int d = 0; d < 4; d++)
if (arg.commDim[d]) ghost_sites += 2 * in.GhostFace()[d];
bytes_ -= ghost_bytes * ghost_sites;
break;
}
}
return bytes_;
}
TuneKey tuneKey() const
{
// add laplace transverse dir to the key
char aux[TuneKey::aux_n];
strcpy(aux, Dslash<Float>::aux[arg.kernel_type]);
strcat(aux, ",laplace=");
char laplace[32];
u32toa(laplace, arg.dir);
strcat(aux, laplace);
return TuneKey(in.VolString(), typeid(*this).name(), aux);
}
};
template <typename Float, int nColor, QudaReconstructType recon> struct LaplaceApply {
inline LaplaceApply(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, int dir, double a,
const ColorSpinorField &x, int parity, bool dagger, const int *comm_override,
TimeProfile &profile)
{
constexpr int nDim = 4;
LaplaceArg<Float, nColor, recon> arg(out, in, U, dir, a, x, parity, dagger, comm_override);
Laplace<Float, nDim, nColor, LaplaceArg<Float, nColor, recon>> laplace(arg, out, in);
dslash::DslashPolicyTune<decltype(laplace)> policy(
laplace, const_cast<cudaColorSpinorField *>(static_cast<const cudaColorSpinorField *>(&in)), in.VolumeCB(),
in.GhostFaceCB(), profile);
policy.apply(0);
checkCudaError();
}
};
// Apply the Laplace operator
// out(x) = M*in = - kappa*\sum_mu U_{-\mu}(x)in(x+mu) + U^\dagger_mu(x-mu)in(x-mu)
// Uses the kappa normalization for the Wilson operator.
// Omits direction 'dir' from the operator.
void ApplyLaplace(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, int dir, double kappa,
const ColorSpinorField &x, int parity, bool dagger, const int *comm_override, TimeProfile &profile)
{
if (in.V() == out.V()) errorQuda("Aliasing pointers");
if (in.FieldOrder() != out.FieldOrder())
errorQuda("Field order mismatch in = %d, out = %d", in.FieldOrder(), out.FieldOrder());
// check all precisions match
checkPrecision(out, in, U);
// check all locations match
checkLocation(out, in, U);
instantiate<LaplaceApply>(out, in, U, dir, kappa, x, parity, dagger, comm_override, profile);
}
} // namespace quda
#else
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <color_spinor_field.h>
#include <color_spinor_field_order.h>
#include <index_helper.cuh>
#include <dslash_quda.h>
#include <color_spinor.h>
#include <worker.h>
#include <tune_quda.h>
/**
This is a basic gauged Laplace operator
*/
namespace quda {
/**
@brief Parameter structure for driving the Laplace operator
*/
template <typename Float, int nColor, QudaReconstructType reconstruct, bool xpay>
struct LaplaceArg {
typedef typename colorspinor_mapper<Float,1,nColor>::type F;
typedef typename gauge_mapper<Float,reconstruct>::type G;
F out; // output vector field
const F in; // input vector field
const F x; // input vector when doing xpay
const G U; // the gauge field
const Float kappa; // kappa parameter = 1/(8+m)
const int parity; // only use this for single parity fields
const int nParity; // number of parities we're working on
const int nFace; // hard code to 1 for now
const int dim[5]; // full lattice dimensions
const int commDim[4]; // whether a given dimension is partitioned or not
const int volumeCB; // checkerboarded volume
__host__ __device__ static constexpr bool isXpay() { return xpay; }
LaplaceArg(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U,
Float kappa, const ColorSpinorField *x, int parity)
: out(out), in(in), U(U), kappa(kappa), x(xpay ? *x : in), parity(parity), nParity(in.SiteSubset()), nFace(1),
dim{ (3-nParity) * in.X(0), in.X(1), in.X(2), in.X(3), 1 },
commDim{comm_dim_partitioned(0), comm_dim_partitioned(1), comm_dim_partitioned(2), comm_dim_partitioned(3)},
volumeCB(in.VolumeCB())
{
if (in.FieldOrder() != QUDA_FLOAT2_FIELD_ORDER || !U.isNative())
errorQuda("Unsupported field order colorspinor=%d gauge=%d combination\n", in.FieldOrder(), U.FieldOrder());
}
};
/**
Applies the off-diagonal part of the Laplace operator
@param[out] out The out result field
@param[in] U The gauge field
@param[in] kappa Kappa value
@param[in] in The input field
@param[in] parity The site parity
@param[in] x_cb The checkerboarded site index
*/
template <typename Float, int nDim, int nColor, typename Vector, typename Arg>
__device__ __host__ inline void applyLaplace(Vector &out, Arg &arg, int x_cb, int parity) {
typedef Matrix<complex<Float>,nColor> Link;
const int their_spinor_parity = (arg.nParity == 2) ? 1-parity : 0;
int coord[5];
getCoords(coord, x_cb, arg.dim, parity);
coord[4] = 0;
#pragma unroll
for (int d = 0; d<nDim; d++) // loop over dimension
{
//Forward gather - compute fwd offset for vector fetch
const int fwd_idx = linkIndexP1(coord, arg.dim, d);
if ( arg.commDim[d] && (coord[d] + arg.nFace >= arg.dim[d]) ) {
const int ghost_idx = ghostFaceIndex<1>(coord, arg.dim, d, arg.nFace);
const Link U = arg.U(d, x_cb, parity);
const Vector in = arg.in.Ghost(d, 1, ghost_idx, their_spinor_parity);
out += U * in;
} else {
const Link U = arg.U(d, x_cb, parity);
const Vector in = arg.in(fwd_idx, their_spinor_parity);
out += U * in;
}
//Backward gather - compute back offset for spinor and gauge fetch
const int back_idx = linkIndexM1(coord, arg.dim, d);
const int gauge_idx = back_idx;
if ( arg.commDim[d] && (coord[d] - arg.nFace < 0) ) {
const int ghost_idx = ghostFaceIndex<0>(coord, arg.dim, d, arg.nFace);
const Link U = arg.U.Ghost(d, ghost_idx, 1-parity);
const Vector in = arg.in.Ghost(d, 0, ghost_idx, their_spinor_parity);
out += conj(U) * in;
} else {
const Link U = arg.U(d, gauge_idx, 1-parity);
const Vector in = arg.in(back_idx, their_spinor_parity);
out += conj(U) * in;
}
} //nDim
}
//out(x) = M*in = (-D + m) * in(x-mu)
template <typename Float, int nDim, int nColor, typename Arg>
__device__ __host__ inline void laplace(Arg &arg, int x_cb, int parity)
{
typedef ColorSpinor<Float,nColor,1> Vector;
Vector out;
applyLaplace<Float,nDim,nColor>(out, arg, x_cb, parity);
if (arg.isXpay()) {
Vector x = arg.x(x_cb, parity);
out = x + arg.kappa * out;
}
arg.out(x_cb, arg.nParity == 2 ? parity : 0) = out;
}
// CPU kernel for applying the Laplace operator to a vector
template <typename Float, int nDim, int nColor, typename Arg>
void laplaceCPU(Arg arg)
{
for (int parity= 0; parity < arg.nParity; parity++) {
// for full fields then set parity from loop else use arg setting
parity = (arg.nParity == 2) ? parity : arg.parity;
for (int x_cb = 0; x_cb < arg.volumeCB; x_cb++) { // 4-d volume
laplace<Float,nDim,nColor>(arg, x_cb, parity);
} // 4-d volumeCB
} // parity
}
// GPU Kernel for applying the Laplace operator to a vector
template <typename Float, int nDim, int nColor, typename Arg>
__global__ void laplaceGPU(Arg arg)
{
int x_cb = blockIdx.x*blockDim.x + threadIdx.x;
// for full fields set parity from y thread index else use arg setting
int parity = (arg.nParity == 2) ? blockDim.y*blockIdx.y + threadIdx.y : arg.parity;
if (x_cb >= arg.volumeCB) return;
if (parity >= arg.nParity) return;
laplace<Float,nDim,nColor>(arg, x_cb, parity);
}
template <typename Float, int nDim, int nColor, typename Arg>
class Laplace : public TunableVectorY {
protected:
Arg &arg;
const ColorSpinorField &meta;
long long flops() const
{
return (2*nDim*(8*nColor*nColor)-2*nColor + (arg.isXpay() ? 2*2*nColor : 0) )*arg.nParity*(long long)meta.VolumeCB();
}
long long bytes() const
{
return arg.out.Bytes() + 2*nDim*arg.in.Bytes() + arg.nParity*2*nDim*arg.U.Bytes()*meta.VolumeCB() +
(arg.isXpay() ? arg.x.Bytes() : 0);
}
bool tuneGridDim() const { return false; }
unsigned int minThreads() const { return arg.volumeCB; }
public:
Laplace(Arg &arg, const ColorSpinorField &meta) : TunableVectorY(arg.nParity), arg(arg), meta(meta)
{
strcpy(aux, meta.AuxString());
#ifdef MULTI_GPU
char comm[5];
comm[0] = (arg.commDim[0] ? '1' : '0');
comm[1] = (arg.commDim[1] ? '1' : '0');
comm[2] = (arg.commDim[2] ? '1' : '0');
comm[3] = (arg.commDim[3] ? '1' : '0');
comm[4] = '\0';
strcat(aux,",comm=");
strcat(aux,comm);
#endif
if (arg.isXpay()) strcat(aux,",xpay");
}
virtual ~Laplace() { }
void apply(const hipStream_t &stream) {
if (meta.Location() == QUDA_CPU_FIELD_LOCATION) {
laplaceCPU<Float,nDim,nColor>(arg);
} else {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
hipLaunchKernelGGL(( laplaceGPU<Float,nDim,nColor>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg);
}
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
};
template <typename Float, int nColor, QudaReconstructType recon>
void ApplyLaplace(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U,
double kappa, const ColorSpinorField *x, int parity)
{
constexpr int nDim = 4;
if (x) {
LaplaceArg<Float,nColor,recon,true> arg(out, in, U, kappa, x, parity);
Laplace<Float,nDim,nColor,LaplaceArg<Float,nColor,recon,true> > laplace(arg, in);
laplace.apply(0);
} else {
LaplaceArg<Float,nColor,recon,false> arg(out, in, U, kappa, x, parity);
Laplace<Float,nDim,nColor,LaplaceArg<Float,nColor,recon,false> > laplace(arg, in);
laplace.apply(0);
}
}
// template on the gauge reconstruction
template <typename Float, int nColor>
void ApplyLaplace(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U,
double kappa, const ColorSpinorField *x, int parity)
{
if (U.Reconstruct()== QUDA_RECONSTRUCT_NO) {
ApplyLaplace<Float,nColor,QUDA_RECONSTRUCT_NO>(out, in, U, kappa, x, parity);
} else if (U.Reconstruct()== QUDA_RECONSTRUCT_12) {
ApplyLaplace<Float,nColor,QUDA_RECONSTRUCT_12>(out, in, U, kappa, x, parity);
} else if (U.Reconstruct()== QUDA_RECONSTRUCT_8) {
ApplyLaplace<Float,nColor,QUDA_RECONSTRUCT_8>(out, in, U, kappa, x, parity);
} else {
errorQuda("Unsupported reconstruct type %d\n", U.Reconstruct());
}
}
// template on the number of colors
template <typename Float>
void ApplyLaplace(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U,
double kappa, const ColorSpinorField *x, int parity)
{
if (in.Ncolor() == 3) {
ApplyLaplace<Float,3>(out, in, U, kappa, x, parity);
} else {
errorQuda("Unsupported number of colors %d\n", U.Ncolor());
}
}
// this is the Worker pointer that may have issue additional work
// while we're waiting on communication to finish
namespace dslash {
extern Worker* aux_worker;
}
//Apply the Laplace operator
//out(x) = M*in = - kappa*\sum_mu U_{-\mu}(x)in(x+mu) + U^\dagger_mu(x-mu)in(x-mu)
//Uses the kappa normalization for the Wilson operator.
void ApplyLaplace(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U,
double kappa, const ColorSpinorField *x, int parity)
{
if (in.V() == out.V()) errorQuda("Aliasing pointers");
if (in.FieldOrder() != out.FieldOrder())
errorQuda("Field order mismatch in = %d, out = %d", in.FieldOrder(), out.FieldOrder());
// check all precisions match
checkPrecision(out, in, U);
// check all locations match
checkLocation(out, in, U);
const int nFace = 1;
in.exchangeGhost((QudaParity)(1-parity), nFace, 0); // last parameter is dummy
if (dslash::aux_worker) dslash::aux_worker->apply(0);
if (U.Precision() == QUDA_DOUBLE_PRECISION) {
ApplyLaplace<double>(out, in, U, kappa, x, parity);
} else if (U.Precision() == QUDA_SINGLE_PRECISION) {
ApplyLaplace<float>(out, in, U, kappa, x, parity);
} else {
errorQuda("Unsupported precision %d\n", U.Precision());
}
in.bufferIndex = (1 - in.bufferIndex);
}
} // namespace quda
#endif
| b8290f8eb98a28c40230d98ede07b759d02e516f.cu | #ifndef USE_LEGACY_DSLASH
#include <dslash.h>
#include <worker.h>
#include <dslash_helper.cuh>
#include <color_spinor_field_order.h>
#include <gauge_field_order.h>
#include <color_spinor.h>
#include <dslash_helper.cuh>
#include <index_helper.cuh>
#include <gauge_field.h>
#include <uint_to_char.h>
#include <dslash_policy.cuh>
#include <kernels/laplace.cuh>
/**
This is the laplacian derivative based on the basic gauged differential operator
*/
namespace quda
{
/**
@brief This is a helper class that is used to instantiate the
correct templated kernel for the dslash.
*/
template <typename Float, int nDim, int nColor, int nParity, bool dagger, bool xpay, KernelType kernel_type, typename Arg>
struct LaplaceLaunch {
// kernel name for jit compilation
static constexpr const char *kernel = "quda::laplaceGPU";
template <typename Dslash>
inline static void launch(Dslash &dslash, TuneParam &tp, Arg &arg, const cudaStream_t &stream)
{
dslash.launch(laplaceGPU<Float, nDim, nColor, nParity, dagger, xpay, kernel_type, Arg>, tp, arg, stream);
}
};
template <typename Float, int nDim, int nColor, typename Arg> class Laplace : public Dslash<Float>
{
protected:
Arg &arg;
const ColorSpinorField ∈
public:
Laplace(Arg &arg, const ColorSpinorField &out, const ColorSpinorField &in) :
Dslash<Float>(arg, out, in, "kernels/laplace.cuh"),
arg(arg),
in(in)
{
}
virtual ~Laplace() {}
void apply(const cudaStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
Dslash<Float>::setParam(arg);
Dslash<Float>::template instantiate<LaplaceLaunch, nDim, nColor>(tp, arg, stream);
}
long long flops() const
{
int mv_flops = (8 * in.Ncolor() - 2) * in.Ncolor(); // SU(3) matrix-vector flops
int num_mv_multiply = in.Nspin() == 4 ? 2 : 1;
int ghost_flops = (num_mv_multiply * mv_flops + 2 * in.Ncolor() * in.Nspin());
int xpay_flops = 2 * 2 * in.Ncolor() * in.Nspin(); // multiply and add per real component
int num_dir = (arg.dir == 4 ? 2 * 4 : 2 * 3); // 3D or 4D operator
long long flops_ = 0;
// FIXME - should we count the xpay flops in the derived kernels
// since some kernels require the xpay in the exterior (preconditiond clover)
switch (arg.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
flops_ = (ghost_flops + (arg.xpay ? xpay_flops : xpay_flops / 2)) * 2 * in.GhostFace()[arg.kernel_type];
break;
case EXTERIOR_KERNEL_ALL: {
long long ghost_sites = 2 * (in.GhostFace()[0] + in.GhostFace()[1] + in.GhostFace()[2] + in.GhostFace()[3]);
flops_ = (ghost_flops + (arg.xpay ? xpay_flops : xpay_flops / 2)) * ghost_sites;
break;
}
case INTERIOR_KERNEL:
case KERNEL_POLICY: {
long long sites = in.Volume();
flops_ = (num_dir * (in.Nspin() / 4) * in.Ncolor() * in.Nspin() + // spin project (=0 for staggered)
num_dir * num_mv_multiply * mv_flops + // SU(3) matrix-vector multiplies
((num_dir - 1) * 2 * in.Ncolor() * in.Nspin()))
* sites; // accumulation
if (arg.xpay) flops_ += xpay_flops * sites;
if (arg.kernel_type == KERNEL_POLICY) break;
// now correct for flops done by exterior kernel
long long ghost_sites = 0;
for (int d = 0; d < 4; d++)
if (arg.commDim[d]) ghost_sites += 2 * in.GhostFace()[d];
flops_ -= ghost_flops * ghost_sites;
break;
}
}
return flops_;
}
virtual long long bytes() const
{
int gauge_bytes = arg.reconstruct * in.Precision();
bool isFixed = (in.Precision() == sizeof(short) || in.Precision() == sizeof(char)) ? true : false;
int spinor_bytes = 2 * in.Ncolor() * in.Nspin() * in.Precision() + (isFixed ? sizeof(float) : 0);
int proj_spinor_bytes = in.Nspin() == 4 ? spinor_bytes / 2 : spinor_bytes;
int ghost_bytes = (proj_spinor_bytes + gauge_bytes) + 2 * spinor_bytes; // 2 since we have to load the partial
int num_dir = (arg.dir == 4 ? 2 * 4 : 2 * 3); // 3D or 4D operator
long long bytes_ = 0;
switch (arg.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T: bytes_ = ghost_bytes * 2 * in.GhostFace()[arg.kernel_type]; break;
case EXTERIOR_KERNEL_ALL: {
long long ghost_sites = 2 * (in.GhostFace()[0] + in.GhostFace()[1] + in.GhostFace()[2] + in.GhostFace()[3]);
bytes_ = ghost_bytes * ghost_sites;
break;
}
case INTERIOR_KERNEL:
case KERNEL_POLICY: {
long long sites = in.Volume();
bytes_ = (num_dir * gauge_bytes + ((num_dir - 2) * spinor_bytes + 2 * proj_spinor_bytes) + spinor_bytes) * sites;
if (arg.xpay) bytes_ += spinor_bytes;
if (arg.kernel_type == KERNEL_POLICY) break;
// now correct for bytes done by exterior kernel
long long ghost_sites = 0;
for (int d = 0; d < 4; d++)
if (arg.commDim[d]) ghost_sites += 2 * in.GhostFace()[d];
bytes_ -= ghost_bytes * ghost_sites;
break;
}
}
return bytes_;
}
TuneKey tuneKey() const
{
// add laplace transverse dir to the key
char aux[TuneKey::aux_n];
strcpy(aux, Dslash<Float>::aux[arg.kernel_type]);
strcat(aux, ",laplace=");
char laplace[32];
u32toa(laplace, arg.dir);
strcat(aux, laplace);
return TuneKey(in.VolString(), typeid(*this).name(), aux);
}
};
template <typename Float, int nColor, QudaReconstructType recon> struct LaplaceApply {
inline LaplaceApply(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, int dir, double a,
const ColorSpinorField &x, int parity, bool dagger, const int *comm_override,
TimeProfile &profile)
{
constexpr int nDim = 4;
LaplaceArg<Float, nColor, recon> arg(out, in, U, dir, a, x, parity, dagger, comm_override);
Laplace<Float, nDim, nColor, LaplaceArg<Float, nColor, recon>> laplace(arg, out, in);
dslash::DslashPolicyTune<decltype(laplace)> policy(
laplace, const_cast<cudaColorSpinorField *>(static_cast<const cudaColorSpinorField *>(&in)), in.VolumeCB(),
in.GhostFaceCB(), profile);
policy.apply(0);
checkCudaError();
}
};
// Apply the Laplace operator
// out(x) = M*in = - kappa*\sum_mu U_{-\mu}(x)in(x+mu) + U^\dagger_mu(x-mu)in(x-mu)
// Uses the kappa normalization for the Wilson operator.
// Omits direction 'dir' from the operator.
void ApplyLaplace(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, int dir, double kappa,
const ColorSpinorField &x, int parity, bool dagger, const int *comm_override, TimeProfile &profile)
{
if (in.V() == out.V()) errorQuda("Aliasing pointers");
if (in.FieldOrder() != out.FieldOrder())
errorQuda("Field order mismatch in = %d, out = %d", in.FieldOrder(), out.FieldOrder());
// check all precisions match
checkPrecision(out, in, U);
// check all locations match
checkLocation(out, in, U);
instantiate<LaplaceApply>(out, in, U, dir, kappa, x, parity, dagger, comm_override, profile);
}
} // namespace quda
#else
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <color_spinor_field.h>
#include <color_spinor_field_order.h>
#include <index_helper.cuh>
#include <dslash_quda.h>
#include <color_spinor.h>
#include <worker.h>
#include <tune_quda.h>
/**
This is a basic gauged Laplace operator
*/
namespace quda {
/**
@brief Parameter structure for driving the Laplace operator
*/
template <typename Float, int nColor, QudaReconstructType reconstruct, bool xpay>
struct LaplaceArg {
typedef typename colorspinor_mapper<Float,1,nColor>::type F;
typedef typename gauge_mapper<Float,reconstruct>::type G;
F out; // output vector field
const F in; // input vector field
const F x; // input vector when doing xpay
const G U; // the gauge field
const Float kappa; // kappa parameter = 1/(8+m)
const int parity; // only use this for single parity fields
const int nParity; // number of parities we're working on
const int nFace; // hard code to 1 for now
const int dim[5]; // full lattice dimensions
const int commDim[4]; // whether a given dimension is partitioned or not
const int volumeCB; // checkerboarded volume
__host__ __device__ static constexpr bool isXpay() { return xpay; }
LaplaceArg(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U,
Float kappa, const ColorSpinorField *x, int parity)
: out(out), in(in), U(U), kappa(kappa), x(xpay ? *x : in), parity(parity), nParity(in.SiteSubset()), nFace(1),
dim{ (3-nParity) * in.X(0), in.X(1), in.X(2), in.X(3), 1 },
commDim{comm_dim_partitioned(0), comm_dim_partitioned(1), comm_dim_partitioned(2), comm_dim_partitioned(3)},
volumeCB(in.VolumeCB())
{
if (in.FieldOrder() != QUDA_FLOAT2_FIELD_ORDER || !U.isNative())
errorQuda("Unsupported field order colorspinor=%d gauge=%d combination\n", in.FieldOrder(), U.FieldOrder());
}
};
/**
Applies the off-diagonal part of the Laplace operator
@param[out] out The out result field
@param[in] U The gauge field
@param[in] kappa Kappa value
@param[in] in The input field
@param[in] parity The site parity
@param[in] x_cb The checkerboarded site index
*/
template <typename Float, int nDim, int nColor, typename Vector, typename Arg>
__device__ __host__ inline void applyLaplace(Vector &out, Arg &arg, int x_cb, int parity) {
typedef Matrix<complex<Float>,nColor> Link;
const int their_spinor_parity = (arg.nParity == 2) ? 1-parity : 0;
int coord[5];
getCoords(coord, x_cb, arg.dim, parity);
coord[4] = 0;
#pragma unroll
for (int d = 0; d<nDim; d++) // loop over dimension
{
//Forward gather - compute fwd offset for vector fetch
const int fwd_idx = linkIndexP1(coord, arg.dim, d);
if ( arg.commDim[d] && (coord[d] + arg.nFace >= arg.dim[d]) ) {
const int ghost_idx = ghostFaceIndex<1>(coord, arg.dim, d, arg.nFace);
const Link U = arg.U(d, x_cb, parity);
const Vector in = arg.in.Ghost(d, 1, ghost_idx, their_spinor_parity);
out += U * in;
} else {
const Link U = arg.U(d, x_cb, parity);
const Vector in = arg.in(fwd_idx, their_spinor_parity);
out += U * in;
}
//Backward gather - compute back offset for spinor and gauge fetch
const int back_idx = linkIndexM1(coord, arg.dim, d);
const int gauge_idx = back_idx;
if ( arg.commDim[d] && (coord[d] - arg.nFace < 0) ) {
const int ghost_idx = ghostFaceIndex<0>(coord, arg.dim, d, arg.nFace);
const Link U = arg.U.Ghost(d, ghost_idx, 1-parity);
const Vector in = arg.in.Ghost(d, 0, ghost_idx, their_spinor_parity);
out += conj(U) * in;
} else {
const Link U = arg.U(d, gauge_idx, 1-parity);
const Vector in = arg.in(back_idx, their_spinor_parity);
out += conj(U) * in;
}
} //nDim
}
//out(x) = M*in = (-D + m) * in(x-mu)
template <typename Float, int nDim, int nColor, typename Arg>
__device__ __host__ inline void laplace(Arg &arg, int x_cb, int parity)
{
typedef ColorSpinor<Float,nColor,1> Vector;
Vector out;
applyLaplace<Float,nDim,nColor>(out, arg, x_cb, parity);
if (arg.isXpay()) {
Vector x = arg.x(x_cb, parity);
out = x + arg.kappa * out;
}
arg.out(x_cb, arg.nParity == 2 ? parity : 0) = out;
}
// CPU kernel for applying the Laplace operator to a vector
template <typename Float, int nDim, int nColor, typename Arg>
void laplaceCPU(Arg arg)
{
for (int parity= 0; parity < arg.nParity; parity++) {
// for full fields then set parity from loop else use arg setting
parity = (arg.nParity == 2) ? parity : arg.parity;
for (int x_cb = 0; x_cb < arg.volumeCB; x_cb++) { // 4-d volume
laplace<Float,nDim,nColor>(arg, x_cb, parity);
} // 4-d volumeCB
} // parity
}
// GPU Kernel for applying the Laplace operator to a vector
template <typename Float, int nDim, int nColor, typename Arg>
__global__ void laplaceGPU(Arg arg)
{
int x_cb = blockIdx.x*blockDim.x + threadIdx.x;
// for full fields set parity from y thread index else use arg setting
int parity = (arg.nParity == 2) ? blockDim.y*blockIdx.y + threadIdx.y : arg.parity;
if (x_cb >= arg.volumeCB) return;
if (parity >= arg.nParity) return;
laplace<Float,nDim,nColor>(arg, x_cb, parity);
}
template <typename Float, int nDim, int nColor, typename Arg>
class Laplace : public TunableVectorY {
protected:
Arg &arg;
const ColorSpinorField &meta;
long long flops() const
{
return (2*nDim*(8*nColor*nColor)-2*nColor + (arg.isXpay() ? 2*2*nColor : 0) )*arg.nParity*(long long)meta.VolumeCB();
}
long long bytes() const
{
return arg.out.Bytes() + 2*nDim*arg.in.Bytes() + arg.nParity*2*nDim*arg.U.Bytes()*meta.VolumeCB() +
(arg.isXpay() ? arg.x.Bytes() : 0);
}
bool tuneGridDim() const { return false; }
unsigned int minThreads() const { return arg.volumeCB; }
public:
Laplace(Arg &arg, const ColorSpinorField &meta) : TunableVectorY(arg.nParity), arg(arg), meta(meta)
{
strcpy(aux, meta.AuxString());
#ifdef MULTI_GPU
char comm[5];
comm[0] = (arg.commDim[0] ? '1' : '0');
comm[1] = (arg.commDim[1] ? '1' : '0');
comm[2] = (arg.commDim[2] ? '1' : '0');
comm[3] = (arg.commDim[3] ? '1' : '0');
comm[4] = '\0';
strcat(aux,",comm=");
strcat(aux,comm);
#endif
if (arg.isXpay()) strcat(aux,",xpay");
}
virtual ~Laplace() { }
void apply(const cudaStream_t &stream) {
if (meta.Location() == QUDA_CPU_FIELD_LOCATION) {
laplaceCPU<Float,nDim,nColor>(arg);
} else {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
laplaceGPU<Float,nDim,nColor> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg);
}
}
TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); }
};
template <typename Float, int nColor, QudaReconstructType recon>
void ApplyLaplace(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U,
double kappa, const ColorSpinorField *x, int parity)
{
constexpr int nDim = 4;
if (x) {
LaplaceArg<Float,nColor,recon,true> arg(out, in, U, kappa, x, parity);
Laplace<Float,nDim,nColor,LaplaceArg<Float,nColor,recon,true> > laplace(arg, in);
laplace.apply(0);
} else {
LaplaceArg<Float,nColor,recon,false> arg(out, in, U, kappa, x, parity);
Laplace<Float,nDim,nColor,LaplaceArg<Float,nColor,recon,false> > laplace(arg, in);
laplace.apply(0);
}
}
// template on the gauge reconstruction
template <typename Float, int nColor>
void ApplyLaplace(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U,
double kappa, const ColorSpinorField *x, int parity)
{
if (U.Reconstruct()== QUDA_RECONSTRUCT_NO) {
ApplyLaplace<Float,nColor,QUDA_RECONSTRUCT_NO>(out, in, U, kappa, x, parity);
} else if (U.Reconstruct()== QUDA_RECONSTRUCT_12) {
ApplyLaplace<Float,nColor,QUDA_RECONSTRUCT_12>(out, in, U, kappa, x, parity);
} else if (U.Reconstruct()== QUDA_RECONSTRUCT_8) {
ApplyLaplace<Float,nColor,QUDA_RECONSTRUCT_8>(out, in, U, kappa, x, parity);
} else {
errorQuda("Unsupported reconstruct type %d\n", U.Reconstruct());
}
}
// template on the number of colors
template <typename Float>
void ApplyLaplace(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U,
double kappa, const ColorSpinorField *x, int parity)
{
if (in.Ncolor() == 3) {
ApplyLaplace<Float,3>(out, in, U, kappa, x, parity);
} else {
errorQuda("Unsupported number of colors %d\n", U.Ncolor());
}
}
// this is the Worker pointer that may have issue additional work
// while we're waiting on communication to finish
namespace dslash {
extern Worker* aux_worker;
}
//Apply the Laplace operator
//out(x) = M*in = - kappa*\sum_mu U_{-\mu}(x)in(x+mu) + U^\dagger_mu(x-mu)in(x-mu)
//Uses the kappa normalization for the Wilson operator.
void ApplyLaplace(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U,
double kappa, const ColorSpinorField *x, int parity)
{
if (in.V() == out.V()) errorQuda("Aliasing pointers");
if (in.FieldOrder() != out.FieldOrder())
errorQuda("Field order mismatch in = %d, out = %d", in.FieldOrder(), out.FieldOrder());
// check all precisions match
checkPrecision(out, in, U);
// check all locations match
checkLocation(out, in, U);
const int nFace = 1;
in.exchangeGhost((QudaParity)(1-parity), nFace, 0); // last parameter is dummy
if (dslash::aux_worker) dslash::aux_worker->apply(0);
if (U.Precision() == QUDA_DOUBLE_PRECISION) {
ApplyLaplace<double>(out, in, U, kappa, x, parity);
} else if (U.Precision() == QUDA_SINGLE_PRECISION) {
ApplyLaplace<float>(out, in, U, kappa, x, parity);
} else {
errorQuda("Unsupported precision %d\n", U.Precision());
}
in.bufferIndex = (1 - in.bufferIndex);
}
} // namespace quda
#endif
|
6911dbfda46f8aa76787b196f3eaf67045457564.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************************
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
************************************************************************/
#include <chrono>
#include <cstdio>
#include <cstdlib>
#include <string>
#include <float.h>
#include "nccl.h"
#include "test_utilities.h"
#include <roctracer/roctx.h>
void showUsage(const char* bin) {
printf("\n"
"Usage: %s <type> <op> <n_min> <n_max> [delta] [gpus] [gpu0 [gpu1 [...]]]\n"
"Where:\n"
#ifdef CUDA_HAS_HALF
" type = [char|int|half|float|double|int64|uint64]\n"
#else
" type = [char|int|float|double|int64|uint64]\n"
#endif
" op = [sum|prod|max|min]\n"
" n_min > 0\n"
" n_max >= n_min\n"
" delta > 0\n\n", bin);
return;
}
int main(int argc, char* argv[]) {
int nvis = 0;
CUDACHECK(hipGetDeviceCount(&nvis));
if (nvis == 0) {
printf("No GPUs found\n");
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
ncclDataType_t type;
ncclRedOp_t op;
int n_min;
int n_max;
int delta;
int gpus;
int* list = nullptr;
if (argc < 5) {
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
type = strToType(argv[1]);
if (type == nccl_NUM_TYPES) {
printf("Invalid <type> '%s'\n", argv[1]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
op = strToOp(argv[2]);
if (op == nccl_NUM_OPS) {
printf("Invalid <op> '%s'\n", argv[2]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
n_min = strToPosInt(argv[3]);
if (n_min < 1) {
printf("Invalid <n_min> '%s'\n", argv[3]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
n_max = strToPosInt(argv[4]);
if (n_max < n_min) {
printf("Invalid <n_max> '%s'\n", argv[4]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
if (argc > 5) {
delta = strToPosInt(argv[5]);
if (delta < 1) {
printf("Invalid <delta> '%s'\n", argv[5]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
} else {
delta = (n_max == n_min) ? 1 : (n_max - n_min+9) / 10;
}
if (argc > 6) {
gpus = strToPosInt(argv[6]);
if (gpus < 1) {
printf("Invalid <gpus> '%s'\n", argv[6]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
} else {
gpus = nvis;
}
list = (int*)malloc(gpus*sizeof(int));
if (argc > 7 && argc != 7+gpus) {
printf("If given, GPU list must be fully specified.\n");
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
for(int g=0; g<gpus; ++g) {
if(argc > 7) {
list[g] = strToNonNeg(argv[7+g]);
if (list[g] < 0) {
printf("Invalid GPU%d '%s'\n", g, argv[7+g]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
} else if (list[g] >= nvis) {
printf("GPU%d (%d) exceeds visible devices (%d)\n", g, list[g], nvis);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
} else {
list[g] = g % nvis;
}
}
size_t word = wordSize(type);
size_t max_size = n_max * word;
void* refout;
CUDACHECK(hipHostMalloc(&refout, max_size));
void** input;
void* output; // always goes on rank 0
double* maxError;
ncclComm_t* comm;
hipStream_t* stream;
input = (void**)malloc(gpus*sizeof(void*));
comm = (ncclComm_t*)malloc(gpus*sizeof(ncclComm_t));
stream = (hipStream_t*)malloc(gpus*sizeof(hipStream_t));
for(int g=0; g<gpus; ++g) {
char busid[32] = {0};
CUDACHECK(hipDeviceGetPCIBusId(busid, 32, list[g]));
printf("# Rank %d using device %d [%s]\n", g, list[g], busid);
CUDACHECK(hipSetDevice(list[g]));
CUDACHECK(hipStreamCreate(&stream[g]));
CUDACHECK(hipMalloc(&input[g], max_size));
makeRandom(input[g], n_max, type, 42+g);
if (g == 0) {
CUDACHECK(hipMalloc(&output, max_size));
CUDACHECK(hipHostMalloc(&maxError, sizeof(double)));
CUDACHECK(hipMemcpy(refout, input[g], max_size, hipMemcpyDeviceToHost));
} else {
accVec(refout, input[g], n_max, type, op);
}
}
NCCLCHECK(ncclCommInitAll(comm, gpus, list));
printf(" BYTES ERROR MSEC BW\n");
for(int n=n_min; n<=n_max; n+=delta) {
size_t bytes = word * n;
CUDACHECK(hipSetDevice(list[0]));
CUDACHECK(hipMemsetAsync(output, 0, bytes, stream[0]));
for(int g=0; g<gpus; ++g)
CUDACHECK(hipStreamSynchronize(stream[0]));
auto start = std::chrono::high_resolution_clock::now();
for(int g=0; g<gpus; ++g) {
CUDACHECK(hipSetDevice(list[g]));
NCCLCHECK(ncclReduce(input[g], output, n, type, op, 0, comm[g], stream[g]));
}
for(int g=0; g<gpus; ++g) {
CUDACHECK(hipSetDevice(list[g]));
CUDACHECK(hipStreamSynchronize(stream[g]));
}
auto stop = std::chrono::high_resolution_clock::now();
double ms = std::chrono::duration_cast<std::chrono::duration<double>>
(stop - start).count() * 1000.0;
CUDACHECK(hipSetDevice(list[0]));
maxDiff(maxError, output, refout, n, type, stream[0]);
CUDACHECK(hipStreamSynchronize(stream[0]));
double mb = (double)bytes * 1.e-6;
double algbw = mb / ms;
printf("%12lu %5.0le %10.3lf %6.2lf\n",
n*word, *maxError, ms, algbw);
}
for(int g=0; g<gpus; ++g) {
CUDACHECK(hipSetDevice(list[g]));
CUDACHECK(hipStreamDestroy(stream[g]));
ncclCommDestroy(comm[g]);
CUDACHECK(hipFree(input[g]));
if(g == 0) {
CUDACHECK(hipFree(output));
CUDACHECK(hipHostFree(maxError));
}
}
free(input);
free(comm);
free(stream);
CUDACHECK(hipHostFree(refout));
exit(EXIT_SUCCESS);
}
| 6911dbfda46f8aa76787b196f3eaf67045457564.cu | /*************************************************************************
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
************************************************************************/
#include <chrono>
#include <cstdio>
#include <cstdlib>
#include <string>
#include <float.h>
#include "nccl.h"
#include "test_utilities.h"
#include <nvToolsExt.h>
void showUsage(const char* bin) {
printf("\n"
"Usage: %s <type> <op> <n_min> <n_max> [delta] [gpus] [gpu0 [gpu1 [...]]]\n"
"Where:\n"
#ifdef CUDA_HAS_HALF
" type = [char|int|half|float|double|int64|uint64]\n"
#else
" type = [char|int|float|double|int64|uint64]\n"
#endif
" op = [sum|prod|max|min]\n"
" n_min > 0\n"
" n_max >= n_min\n"
" delta > 0\n\n", bin);
return;
}
int main(int argc, char* argv[]) {
int nvis = 0;
CUDACHECK(cudaGetDeviceCount(&nvis));
if (nvis == 0) {
printf("No GPUs found\n");
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
ncclDataType_t type;
ncclRedOp_t op;
int n_min;
int n_max;
int delta;
int gpus;
int* list = nullptr;
if (argc < 5) {
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
type = strToType(argv[1]);
if (type == nccl_NUM_TYPES) {
printf("Invalid <type> '%s'\n", argv[1]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
op = strToOp(argv[2]);
if (op == nccl_NUM_OPS) {
printf("Invalid <op> '%s'\n", argv[2]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
n_min = strToPosInt(argv[3]);
if (n_min < 1) {
printf("Invalid <n_min> '%s'\n", argv[3]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
n_max = strToPosInt(argv[4]);
if (n_max < n_min) {
printf("Invalid <n_max> '%s'\n", argv[4]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
if (argc > 5) {
delta = strToPosInt(argv[5]);
if (delta < 1) {
printf("Invalid <delta> '%s'\n", argv[5]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
} else {
delta = (n_max == n_min) ? 1 : (n_max - n_min+9) / 10;
}
if (argc > 6) {
gpus = strToPosInt(argv[6]);
if (gpus < 1) {
printf("Invalid <gpus> '%s'\n", argv[6]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
} else {
gpus = nvis;
}
list = (int*)malloc(gpus*sizeof(int));
if (argc > 7 && argc != 7+gpus) {
printf("If given, GPU list must be fully specified.\n");
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
for(int g=0; g<gpus; ++g) {
if(argc > 7) {
list[g] = strToNonNeg(argv[7+g]);
if (list[g] < 0) {
printf("Invalid GPU%d '%s'\n", g, argv[7+g]);
showUsage(argv[0]);
exit(EXIT_FAILURE);
} else if (list[g] >= nvis) {
printf("GPU%d (%d) exceeds visible devices (%d)\n", g, list[g], nvis);
showUsage(argv[0]);
exit(EXIT_FAILURE);
}
} else {
list[g] = g % nvis;
}
}
size_t word = wordSize(type);
size_t max_size = n_max * word;
void* refout;
CUDACHECK(cudaMallocHost(&refout, max_size));
void** input;
void* output; // always goes on rank 0
double* maxError;
ncclComm_t* comm;
cudaStream_t* stream;
input = (void**)malloc(gpus*sizeof(void*));
comm = (ncclComm_t*)malloc(gpus*sizeof(ncclComm_t));
stream = (cudaStream_t*)malloc(gpus*sizeof(cudaStream_t));
for(int g=0; g<gpus; ++g) {
char busid[32] = {0};
CUDACHECK(cudaDeviceGetPCIBusId(busid, 32, list[g]));
printf("# Rank %d using device %d [%s]\n", g, list[g], busid);
CUDACHECK(cudaSetDevice(list[g]));
CUDACHECK(cudaStreamCreate(&stream[g]));
CUDACHECK(cudaMalloc(&input[g], max_size));
makeRandom(input[g], n_max, type, 42+g);
if (g == 0) {
CUDACHECK(cudaMalloc(&output, max_size));
CUDACHECK(cudaMallocHost(&maxError, sizeof(double)));
CUDACHECK(cudaMemcpy(refout, input[g], max_size, cudaMemcpyDeviceToHost));
} else {
accVec(refout, input[g], n_max, type, op);
}
}
NCCLCHECK(ncclCommInitAll(comm, gpus, list));
printf(" BYTES ERROR MSEC BW\n");
for(int n=n_min; n<=n_max; n+=delta) {
size_t bytes = word * n;
CUDACHECK(cudaSetDevice(list[0]));
CUDACHECK(cudaMemsetAsync(output, 0, bytes, stream[0]));
for(int g=0; g<gpus; ++g)
CUDACHECK(cudaStreamSynchronize(stream[0]));
auto start = std::chrono::high_resolution_clock::now();
for(int g=0; g<gpus; ++g) {
CUDACHECK(cudaSetDevice(list[g]));
NCCLCHECK(ncclReduce(input[g], output, n, type, op, 0, comm[g], stream[g]));
}
for(int g=0; g<gpus; ++g) {
CUDACHECK(cudaSetDevice(list[g]));
CUDACHECK(cudaStreamSynchronize(stream[g]));
}
auto stop = std::chrono::high_resolution_clock::now();
double ms = std::chrono::duration_cast<std::chrono::duration<double>>
(stop - start).count() * 1000.0;
CUDACHECK(cudaSetDevice(list[0]));
maxDiff(maxError, output, refout, n, type, stream[0]);
CUDACHECK(cudaStreamSynchronize(stream[0]));
double mb = (double)bytes * 1.e-6;
double algbw = mb / ms;
printf("%12lu %5.0le %10.3lf %6.2lf\n",
n*word, *maxError, ms, algbw);
}
for(int g=0; g<gpus; ++g) {
CUDACHECK(cudaSetDevice(list[g]));
CUDACHECK(cudaStreamDestroy(stream[g]));
ncclCommDestroy(comm[g]);
CUDACHECK(cudaFree(input[g]));
if(g == 0) {
CUDACHECK(cudaFree(output));
CUDACHECK(cudaFreeHost(maxError));
}
}
free(input);
free(comm);
free(stream);
CUDACHECK(cudaFreeHost(refout));
exit(EXIT_SUCCESS);
}
|
d55a8d4cd30d31ec0b4fccf63dc16b1371a956c3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* histogram_compare.cu
*
* \file histogram_compare.cu
* \author typed up by Ernest Yeung
* \brief comparing histogram methods
*
*
* typed up by Ernest Yeung ernestyalumni@gmail.com
* \date 20170110
* cf. http://www.orangeowlsolutions.com/archives/1178
*
* Also in
* cf. Jason Sanders, Edward Kandrot. CUDA by Example: An Introduction to General-Purpose GPU Programming
* Chapter 9 Atomics
* 9.4 Computing Histograms
* 9.4.1 CPU Histogram Computation
* and
* cf. https://github.com/ernestyalumni/CompPhys/blob/master/CUDA-By-Example/histshared.cu
*
* Compilation tip
* nvcc -std=c++11 histogram_compare.cu -o histogram_compare.exe
*
*
*/
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/generate.h>
#include <thrust/adjacent_difference.h>
#include <thrust/binary_search.h>
#include "utils.h" // checkCudaErrors()
#define SIZE (100*1024*1024) // 100 MB
/**********************************************/
/* FUNCTION TO GENERATE RANDOM UNSIGNED CHARS */
/**********************************************/
unsigned char* big_random_block(int size) {
unsigned char *data = (unsigned char*)malloc(size);
for (int i=0; i<size; i++)
data[i] = rand();
return data;
}
// GPU with atomics; i.e.
// GPU with (global) atomics
/****************************************/
/* GPU HISTOGRAM CALCULATION VERSION 1 */
/****************************************/
__global__ void histo_kernel1(unsigned char *buffer, long size, unsigned int *histo) {
// --- The number of threads does not cover all the data size
int i = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while (i < size) {
atomicAdd(&histo[buffer[i]], 1);
i += stride;
}
}
// GPU with atomics in shared memory with final summation of partial histograms
/****************************************/
/* GPU HISTOGRAM CALCULATION VERSION 2 */
/****************************************/
__global__ void histo_kernel2(unsigned char *buffer, long size, unsigned int *histo) {
// --- Allocating and initializing shared memory to store partial histograms
__shared__ unsigned int temp[256];
temp[threadIdx.x] = 0;
__syncthreads();
// --- The number of threads does not cover all the data size
int i = threadIdx.x + blockIdx.x * blockDim.x;
int offset = blockDim.x * gridDim.x;
while (i < size)
{
atomicAdd(&temp[buffer[i]], 1);
i += offset;
}
__syncthreads();
// --- Summing histograms
atomicAdd(&(histo[threadIdx.x]), temp[threadIdx.x]);
}
/********/
/* MAIN */
/********/
int main() {
// --- Generating an array of SIZE unsigned chars
unsigned char *buffer = (unsigned char*)big_random_block(SIZE);
/********************/
/* CPU COMPUTATIONS */
/********************/
// --- Allocating host memory space and initializing the host-side histogram
unsigned int histo[256];
for (int i = 0; i < 256; i++) histo[i] = 0;
clock_t start_CPU, stop_CPU;
// --- Histogram calculation on the host
start_CPU = clock();
for (int i=0; i<SIZE; i++) histo [buffer[i]]++;
stop_CPU = clock();
float elapsedTime = (float)(stop_CPU - start_CPU) / (float) CLOCKS_PER_SEC * 1000.0f ;
printf("Time to generate (CPU): %3.1f ms \n", elapsedTime);
// --- Indirect check of the result
long histoCount = 0;
for (int i=0; i<256; i++) { histoCount += histo[i]; }
printf("Histogram Sum: %ld \n", histoCount);
/********************/
/* GPU COMPUTATIONS */
/********************/
// --- Initializing the device-side data
unsigned char *dev_buffer;
checkCudaErrors(hipMalloc((void**)&dev_buffer,SIZE));
checkCudaErrors(hipMemcpy(dev_buffer, buffer, SIZE, hipMemcpyHostToDevice));
// --- Allocating device memory space for the device-side histogram
unsigned int *dev_histo;
checkCudaErrors(hipMalloc((void**)&dev_histo,256*sizeof(long)));
// --- GPU timing
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
// --- ATOMICS
// --- Histogram calculation on the device - 2x the number of multiprocessors gives best timing
checkCudaErrors(hipEventRecord(start,0));
checkCudaErrors(hipMemset(dev_histo,0,256*sizeof(int)));
hipDeviceProp_t prop;
checkCudaErrors(hipGetDeviceProperties(&prop,0));
int blocks = prop.multiProcessorCount;
hipLaunchKernelGGL(( histo_kernel1), dim3(blocks*2),dim3(256), 0, 0, dev_buffer, SIZE, dev_histo);
checkCudaErrors(hipMemcpy(histo,dev_histo,256*sizeof(int),hipMemcpyDeviceToHost));
checkCudaErrors(hipEventRecord(stop,0));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime,start,stop));
printf("Time to generate (GPU): %3.1f ms \n", elapsedTime);
histoCount = 0;
for (int i=0; i<256; i++) {
histoCount += histo[i];
}
printf( "Histogram Sum: %ld \n", histoCount );
// --- Check the correctness of the results via the host
for (int i=0; i<SIZE; i++) histo[buffer[i]]--;
for (int i=0; i<256; i++) {
if (histo[i] != 0) printf( "Failure at %d! Off by %d \n", i, histo[i] );
}
// --- ATOMICS IN SHARED MEMORY
// --- Histogram calculation on the device - 2x the number of multiprocessors gives best timing
checkCudaErrors(hipEventRecord(start,0));
checkCudaErrors(hipMemset(dev_histo,0,256*sizeof(int)));
checkCudaErrors(hipGetDeviceProperties(&prop,0));
blocks = prop.multiProcessorCount;
hipLaunchKernelGGL(( histo_kernel2), dim3(blocks*2),dim3(256), 0, 0, dev_buffer, SIZE, dev_histo);
checkCudaErrors(hipMemcpy(histo,dev_histo,256*sizeof(int),hipMemcpyDeviceToHost));
checkCudaErrors(hipEventRecord(stop,0));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime,start,stop));
printf("Time to generate (GPU): %3.1f \n", elapsedTime);
histoCount = 0;
for (int i=0; i<256; i++) {
histoCount += histo[i];
}
printf( "Histogram Sum: %ld \n", histoCount );
// --- Check the correctness of the results via the host
for (int i=0; i<SIZE; i++) histo[buffer[i]]--;
for (int i=0; i<256; i++) {
if (histo[i] != 0) printf( "Failure at %d! Off by %d \n", i, histo[i] );
}
// --- CUDA THRUST
checkCudaErrors(hipEventRecord(start,0));
// --- Wrapping dev_buffer raw pointer with a device_ptr and initializing a device_vector with it
thrust::device_ptr<unsigned char> dev_ptr(dev_buffer);
thrust::device_vector<unsigned char> dev_buffer_thrust(dev_ptr, dev_ptr + SIZE);
// --- Sorting data to bring equal elements together
thrust::sort(dev_buffer_thrust.begin(), dev_buffer_thrust.end());
// - The number of histogram bins is equal to the maximum value plus one
int num_bins = dev_buffer_thrust.back() + 1;
// --- Resize histogram storage
thrust::device_vector<int> d_histogram;
d_histogram.resize(num_bins);
// --- Find the end of each bin of values
thrust::counting_iterator<int> search_begin(0);
thrust::upper_bound(dev_buffer_thrust.begin(), dev_buffer_thrust.end(),
search_begin, search_begin + num_bins,
d_histogram.begin());
// --- Compute the histogram by taking differences of the cumulative histogram
thrust::adjacent_difference(d_histogram.begin(), d_histogram.end(),
d_histogram.begin());
thrust::host_vector<int> h_histogram(d_histogram);
checkCudaErrors(hipEventRecord(stop,0));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime,start,stop));
printf("Time to generate (GPU): %3.1f \n", elapsedTime);
histoCount = 0;
for (int i=0; i<256; i++) {
histoCount += h_histogram[i];
}
printf( "Histogram Sum: %ld \n", histoCount );
// --- Check the correctness of the results via the host
for (int i=0; i<SIZE; i++) h_histogram[buffer[i]]--;
for (int i=0; i<256; i++) {
if (h_histogram[i] != 0) printf( "Failure at %d! Off by %d \n", i, h_histogram[i] );
}
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
checkCudaErrors(hipFree(dev_histo));
checkCudaErrors(hipFree(dev_buffer));
free(buffer);
getchar();
}
| d55a8d4cd30d31ec0b4fccf63dc16b1371a956c3.cu | /**
* histogram_compare.cu
*
* \file histogram_compare.cu
* \author typed up by Ernest Yeung
* \brief comparing histogram methods
*
*
* typed up by Ernest Yeung ernestyalumni@gmail.com
* \date 20170110
* cf. http://www.orangeowlsolutions.com/archives/1178
*
* Also in
* cf. Jason Sanders, Edward Kandrot. CUDA by Example: An Introduction to General-Purpose GPU Programming
* Chapter 9 Atomics
* 9.4 Computing Histograms
* 9.4.1 CPU Histogram Computation
* and
* cf. https://github.com/ernestyalumni/CompPhys/blob/master/CUDA-By-Example/histshared.cu
*
* Compilation tip
* nvcc -std=c++11 histogram_compare.cu -o histogram_compare.exe
*
*
*/
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/generate.h>
#include <thrust/adjacent_difference.h>
#include <thrust/binary_search.h>
#include "utils.h" // checkCudaErrors()
#define SIZE (100*1024*1024) // 100 MB
/**********************************************/
/* FUNCTION TO GENERATE RANDOM UNSIGNED CHARS */
/**********************************************/
unsigned char* big_random_block(int size) {
unsigned char *data = (unsigned char*)malloc(size);
for (int i=0; i<size; i++)
data[i] = rand();
return data;
}
// GPU with atomics; i.e.
// GPU with (global) atomics
/****************************************/
/* GPU HISTOGRAM CALCULATION VERSION 1 */
/****************************************/
__global__ void histo_kernel1(unsigned char *buffer, long size, unsigned int *histo) {
// --- The number of threads does not cover all the data size
int i = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while (i < size) {
atomicAdd(&histo[buffer[i]], 1);
i += stride;
}
}
// GPU with atomics in shared memory with final summation of partial histograms
/****************************************/
/* GPU HISTOGRAM CALCULATION VERSION 2 */
/****************************************/
__global__ void histo_kernel2(unsigned char *buffer, long size, unsigned int *histo) {
// --- Allocating and initializing shared memory to store partial histograms
__shared__ unsigned int temp[256];
temp[threadIdx.x] = 0;
__syncthreads();
// --- The number of threads does not cover all the data size
int i = threadIdx.x + blockIdx.x * blockDim.x;
int offset = blockDim.x * gridDim.x;
while (i < size)
{
atomicAdd(&temp[buffer[i]], 1);
i += offset;
}
__syncthreads();
// --- Summing histograms
atomicAdd(&(histo[threadIdx.x]), temp[threadIdx.x]);
}
/********/
/* MAIN */
/********/
int main() {
// --- Generating an array of SIZE unsigned chars
unsigned char *buffer = (unsigned char*)big_random_block(SIZE);
/********************/
/* CPU COMPUTATIONS */
/********************/
// --- Allocating host memory space and initializing the host-side histogram
unsigned int histo[256];
for (int i = 0; i < 256; i++) histo[i] = 0;
clock_t start_CPU, stop_CPU;
// --- Histogram calculation on the host
start_CPU = clock();
for (int i=0; i<SIZE; i++) histo [buffer[i]]++;
stop_CPU = clock();
float elapsedTime = (float)(stop_CPU - start_CPU) / (float) CLOCKS_PER_SEC * 1000.0f ;
printf("Time to generate (CPU): %3.1f ms \n", elapsedTime);
// --- Indirect check of the result
long histoCount = 0;
for (int i=0; i<256; i++) { histoCount += histo[i]; }
printf("Histogram Sum: %ld \n", histoCount);
/********************/
/* GPU COMPUTATIONS */
/********************/
// --- Initializing the device-side data
unsigned char *dev_buffer;
checkCudaErrors(cudaMalloc((void**)&dev_buffer,SIZE));
checkCudaErrors(cudaMemcpy(dev_buffer, buffer, SIZE, cudaMemcpyHostToDevice));
// --- Allocating device memory space for the device-side histogram
unsigned int *dev_histo;
checkCudaErrors(cudaMalloc((void**)&dev_histo,256*sizeof(long)));
// --- GPU timing
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
// --- ATOMICS
// --- Histogram calculation on the device - 2x the number of multiprocessors gives best timing
checkCudaErrors(cudaEventRecord(start,0));
checkCudaErrors(cudaMemset(dev_histo,0,256*sizeof(int)));
cudaDeviceProp prop;
checkCudaErrors(cudaGetDeviceProperties(&prop,0));
int blocks = prop.multiProcessorCount;
histo_kernel1<<<blocks*2,256>>>(dev_buffer, SIZE, dev_histo);
checkCudaErrors(cudaMemcpy(histo,dev_histo,256*sizeof(int),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaEventRecord(stop,0));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime,start,stop));
printf("Time to generate (GPU): %3.1f ms \n", elapsedTime);
histoCount = 0;
for (int i=0; i<256; i++) {
histoCount += histo[i];
}
printf( "Histogram Sum: %ld \n", histoCount );
// --- Check the correctness of the results via the host
for (int i=0; i<SIZE; i++) histo[buffer[i]]--;
for (int i=0; i<256; i++) {
if (histo[i] != 0) printf( "Failure at %d! Off by %d \n", i, histo[i] );
}
// --- ATOMICS IN SHARED MEMORY
// --- Histogram calculation on the device - 2x the number of multiprocessors gives best timing
checkCudaErrors(cudaEventRecord(start,0));
checkCudaErrors(cudaMemset(dev_histo,0,256*sizeof(int)));
checkCudaErrors(cudaGetDeviceProperties(&prop,0));
blocks = prop.multiProcessorCount;
histo_kernel2<<<blocks*2,256>>>(dev_buffer, SIZE, dev_histo);
checkCudaErrors(cudaMemcpy(histo,dev_histo,256*sizeof(int),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaEventRecord(stop,0));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime,start,stop));
printf("Time to generate (GPU): %3.1f \n", elapsedTime);
histoCount = 0;
for (int i=0; i<256; i++) {
histoCount += histo[i];
}
printf( "Histogram Sum: %ld \n", histoCount );
// --- Check the correctness of the results via the host
for (int i=0; i<SIZE; i++) histo[buffer[i]]--;
for (int i=0; i<256; i++) {
if (histo[i] != 0) printf( "Failure at %d! Off by %d \n", i, histo[i] );
}
// --- CUDA THRUST
checkCudaErrors(cudaEventRecord(start,0));
// --- Wrapping dev_buffer raw pointer with a device_ptr and initializing a device_vector with it
thrust::device_ptr<unsigned char> dev_ptr(dev_buffer);
thrust::device_vector<unsigned char> dev_buffer_thrust(dev_ptr, dev_ptr + SIZE);
// --- Sorting data to bring equal elements together
thrust::sort(dev_buffer_thrust.begin(), dev_buffer_thrust.end());
// - The number of histogram bins is equal to the maximum value plus one
int num_bins = dev_buffer_thrust.back() + 1;
// --- Resize histogram storage
thrust::device_vector<int> d_histogram;
d_histogram.resize(num_bins);
// --- Find the end of each bin of values
thrust::counting_iterator<int> search_begin(0);
thrust::upper_bound(dev_buffer_thrust.begin(), dev_buffer_thrust.end(),
search_begin, search_begin + num_bins,
d_histogram.begin());
// --- Compute the histogram by taking differences of the cumulative histogram
thrust::adjacent_difference(d_histogram.begin(), d_histogram.end(),
d_histogram.begin());
thrust::host_vector<int> h_histogram(d_histogram);
checkCudaErrors(cudaEventRecord(stop,0));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime,start,stop));
printf("Time to generate (GPU): %3.1f \n", elapsedTime);
histoCount = 0;
for (int i=0; i<256; i++) {
histoCount += h_histogram[i];
}
printf( "Histogram Sum: %ld \n", histoCount );
// --- Check the correctness of the results via the host
for (int i=0; i<SIZE; i++) h_histogram[buffer[i]]--;
for (int i=0; i<256; i++) {
if (h_histogram[i] != 0) printf( "Failure at %d! Off by %d \n", i, h_histogram[i] );
}
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
checkCudaErrors(cudaFree(dev_histo));
checkCudaErrors(cudaFree(dev_buffer));
free(buffer);
getchar();
}
|
1b5ec4eb69c5d9c8605f8b8e158ce6df99dd84cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Udacity HW 4
//Radix Sorting
#include <stdio.h>
#include "reference_calc.cpp"
#include "utils.h"
#include <thrust/device_vector.h>
#include <thrust/scan.h>
/*
Michael Kinsey
Udacity Homework 4
Red Eye Removal
*/
/*
perform histogram of data & mask into bins. Inspired from the reference_function
*/
__global__ void bin_hist(unsigned int* d_out, unsigned int* const d_in,
unsigned int shift, const unsigned int numElems) {
unsigned int mask = 1 << shift;
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= numElems) return;
int bin = (d_in[i] & mask) >> shift;
atomicAdd(&d_out[bin], 1);
}
/*
Exclusive scan also inspired from Mark Harris' examples
*/
__global__ void sum_scan(unsigned int* d_in, const size_t numBins,
const unsigned int numElems) {
int tid = threadIdx.x;
if (tid >= numElems) return;
// copy data to shared
extern __shared__ float sdata[];
sdata[tid] = d_in[tid];
__syncthreads();
for (int d=1; d < numBins; d *= 2) {
if (tid >= d) {
sdata[tid] += sdata[tid - d];
}
__syncthreads();
if (tid == 0) {
d_in[0] = 0;
} else {
d_in[tid] = sdata[tid -1];
}
}
}
/*
Get the flipped bit value at shift index and store in a temp array
*/
__global__ void bit_pos(unsigned int* d_in, unsigned int* d_scan,
unsigned int shift, const unsigned int numElems) {
unsigned int mask = 1 << shift;
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= numElems) return;
d_scan[i] = ((d_in[i] & mask) >> shift) ? 0:1;
}
/*
Partition s.t. all values with a 0 at the bit index preceed those with a 1
Heavily inspired from Mark Harris' example functions provided in the course
materials
*/
__global__ void partition(
unsigned int* const d_inputVals, unsigned int* const d_inputPos,
unsigned int* const d_outputVals, unsigned int* const d_outputPos,
const unsigned int numElems, unsigned int* const d_histogram,
unsigned int* const d_scanned, unsigned int shift) {
unsigned int mask = 1 << shift;
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= numElems) return;
int final_i = 0;
// put value into appropriate location based on current bit
if ((d_inputVals[i] & mask) >> shift) {
final_i = i + d_histogram[1] - d_scanned[i];
} else {
final_i = d_scanned[i];
}
d_outputVals[final_i] = d_inputVals[i];
d_outputPos[final_i] = d_inputPos[i];
}
/*
Radix sort implementation. Inspired from the provided reference function
For each bit position, partition elts so that all elts with a 0 preceed those
with a 1. When all bits have been processed the array is sorted.
*/
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems) {
const int numBits = 1;
// numBins will always be 2 because we are counting 1's and 0's
const int numBins = 1 << numBits;
// set up vars
unsigned int *d_binHistogram;
unsigned int BIN_BYTES = numBins * sizeof(unsigned int);
unsigned int V_BYTES = numElems * sizeof(unsigned int);
int threads = 1024;
int blocks = ceil((float)numElems / threads);
// allocate mem
checkCudaErrors(hipMalloc((void **) &d_binHistogram, BIN_BYTES));
hipMemset(d_binHistogram, 0, BIN_BYTES);
// declare container for scanned intermediary vals. using thrust lib
thrust::device_vector<unsigned int> d_scan(numElems);
for (unsigned int i = 0; i < 8 * sizeof(unsigned int); i++){
// zero out bins
hipMemset(d_binHistogram, 0, BIN_BYTES);
//
hipLaunchKernelGGL(( bin_hist) , dim3(blocks), dim3(threads), 0, 0, d_binHistogram, d_inputVals, i, numElems);
// single block scan histogram
hipLaunchKernelGGL(( sum_scan), dim3(1), dim3(numBins), BIN_BYTES, 0, d_binHistogram, numBins, numElems);
hipLaunchKernelGGL(( bit_pos), dim3(blocks), dim3(threads), 0, 0, d_inputVals,
thrust::raw_pointer_cast(&d_scan[0]), i, numElems);
// TODO rewrite exclusive scan
thrust::exclusive_scan(d_scan.begin(), d_scan.end(), d_scan.begin());
hipLaunchKernelGGL(( partition), dim3(blocks), dim3(threads), 0, 0, d_inputVals, d_inputPos, d_outputVals,
d_outputPos, numElems, d_binHistogram, thrust::raw_pointer_cast(&d_scan[0]), i);
// copy output to dest for each index
hipMemcpy(d_inputVals, d_outputVals, V_BYTES, hipMemcpyDeviceToDevice);
hipMemcpy(d_inputPos, d_outputPos, V_BYTES, hipMemcpyDeviceToDevice);
}
// Free allocated memory
hipFree(d_binHistogram);
}
| 1b5ec4eb69c5d9c8605f8b8e158ce6df99dd84cb.cu | //Udacity HW 4
//Radix Sorting
#include <stdio.h>
#include "reference_calc.cpp"
#include "utils.h"
#include <thrust/device_vector.h>
#include <thrust/scan.h>
/*
Michael Kinsey
Udacity Homework 4
Red Eye Removal
*/
/*
perform histogram of data & mask into bins. Inspired from the reference_function
*/
__global__ void bin_hist(unsigned int* d_out, unsigned int* const d_in,
unsigned int shift, const unsigned int numElems) {
unsigned int mask = 1 << shift;
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= numElems) return;
int bin = (d_in[i] & mask) >> shift;
atomicAdd(&d_out[bin], 1);
}
/*
Exclusive scan also inspired from Mark Harris' examples
*/
__global__ void sum_scan(unsigned int* d_in, const size_t numBins,
const unsigned int numElems) {
int tid = threadIdx.x;
if (tid >= numElems) return;
// copy data to shared
extern __shared__ float sdata[];
sdata[tid] = d_in[tid];
__syncthreads();
for (int d=1; d < numBins; d *= 2) {
if (tid >= d) {
sdata[tid] += sdata[tid - d];
}
__syncthreads();
if (tid == 0) {
d_in[0] = 0;
} else {
d_in[tid] = sdata[tid -1];
}
}
}
/*
Get the flipped bit value at shift index and store in a temp array
*/
__global__ void bit_pos(unsigned int* d_in, unsigned int* d_scan,
unsigned int shift, const unsigned int numElems) {
unsigned int mask = 1 << shift;
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= numElems) return;
d_scan[i] = ((d_in[i] & mask) >> shift) ? 0:1;
}
/*
Partition s.t. all values with a 0 at the bit index preceed those with a 1
Heavily inspired from Mark Harris' example functions provided in the course
materials
*/
__global__ void partition(
unsigned int* const d_inputVals, unsigned int* const d_inputPos,
unsigned int* const d_outputVals, unsigned int* const d_outputPos,
const unsigned int numElems, unsigned int* const d_histogram,
unsigned int* const d_scanned, unsigned int shift) {
unsigned int mask = 1 << shift;
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= numElems) return;
int final_i = 0;
// put value into appropriate location based on current bit
if ((d_inputVals[i] & mask) >> shift) {
final_i = i + d_histogram[1] - d_scanned[i];
} else {
final_i = d_scanned[i];
}
d_outputVals[final_i] = d_inputVals[i];
d_outputPos[final_i] = d_inputPos[i];
}
/*
Radix sort implementation. Inspired from the provided reference function
For each bit position, partition elts so that all elts with a 0 preceed those
with a 1. When all bits have been processed the array is sorted.
*/
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems) {
const int numBits = 1;
// numBins will always be 2 because we are counting 1's and 0's
const int numBins = 1 << numBits;
// set up vars
unsigned int *d_binHistogram;
unsigned int BIN_BYTES = numBins * sizeof(unsigned int);
unsigned int V_BYTES = numElems * sizeof(unsigned int);
int threads = 1024;
int blocks = ceil((float)numElems / threads);
// allocate mem
checkCudaErrors(cudaMalloc((void **) &d_binHistogram, BIN_BYTES));
cudaMemset(d_binHistogram, 0, BIN_BYTES);
// declare container for scanned intermediary vals. using thrust lib
thrust::device_vector<unsigned int> d_scan(numElems);
for (unsigned int i = 0; i < 8 * sizeof(unsigned int); i++){
// zero out bins
cudaMemset(d_binHistogram, 0, BIN_BYTES);
//
bin_hist <<<blocks, threads>>>(d_binHistogram, d_inputVals, i, numElems);
// single block scan histogram
sum_scan<<<1, numBins, BIN_BYTES>>>(d_binHistogram, numBins, numElems);
bit_pos<<<blocks, threads>>>(d_inputVals,
thrust::raw_pointer_cast(&d_scan[0]), i, numElems);
// TODO rewrite exclusive scan
thrust::exclusive_scan(d_scan.begin(), d_scan.end(), d_scan.begin());
partition<<<blocks, threads>>>(d_inputVals, d_inputPos, d_outputVals,
d_outputPos, numElems, d_binHistogram, thrust::raw_pointer_cast(&d_scan[0]), i);
// copy output to dest for each index
cudaMemcpy(d_inputVals, d_outputVals, V_BYTES, cudaMemcpyDeviceToDevice);
cudaMemcpy(d_inputPos, d_outputPos, V_BYTES, cudaMemcpyDeviceToDevice);
}
// Free allocated memory
cudaFree(d_binHistogram);
}
|
103c84479862cf3d62b6076c17c6fe55a1befdbb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include "yololayer.h"
#include "utils.h"
using namespace Yolo;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin()
{
mClassCount = CLASS_NUM;
mYoloKernel.clear();
mYoloKernel.push_back(yolo1);
mYoloKernel.push_back(yolo2);
mYoloKernel.push_back(yolo3);
mKernelCount = mYoloKernel.size();
CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT*2;
for(int ii = 0; ii < mKernelCount; ii ++)
{
CUDA_CHECK(hipMalloc(&mAnchor[ii],AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice));
}
}
YoloLayerPlugin::~YoloLayerPlugin()
{
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(mYoloKernel.data(),d,kernelSize);
d += kernelSize;
CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT*2;
for(int ii = 0; ii < mKernelCount; ii ++)
{
CUDA_CHECK(hipMalloc(&mAnchor[ii],AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice));
}
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(d,mYoloKernel.data(),kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size();
}
int YoloLayerPlugin::initialize()
{
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalsize = MAX_OUTPUT_BBOX_COUNT * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext() {}
const char* YoloLayerPlugin::getPluginType() const
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const
{
return "1";
}
void YoloLayerPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const
{
YoloLayerPlugin *p = new YoloLayerPlugin();
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data){ return 1./(1. + exp(-data)); };
__global__ void CalDetection(const float *input, float *output,int noElements,
int yoloWidth,int yoloHeight,const float anchors[CHECK_COUNT*2],int classes,int outputElem) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid*bnIdx;
int info_len_i = 5 + classes;
const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT);
for (int k = 0; k < 3; ++k) {
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < info_len_i; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]);
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH) continue;
float *res_count = output + bnIdx*outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= MAX_OUTPUT_BBOX_COUNT) return;
char* data = (char * )res_count + sizeof(float) + count*sizeof(Detection);
Detection* det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
//Location
det->bbox[0] = (col + Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * INPUT_W / yoloWidth;
det->bbox[1] = (row + Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * INPUT_H / yoloHeight;
det->bbox[2] = exp(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]) * anchors[2*k];
det->bbox[3] = exp(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]) * anchors[2*k + 1];
det->det_confidence = box_prob;
det->class_id = class_id;
det->class_confidence = max_cls_prob;
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs, float* output, hipStream_t stream, int batchSize) {
int outputElem = 1 + MAX_OUTPUT_BBOX_COUNT * sizeof(Detection) / sizeof(float);
for(int idx = 0 ; idx < batchSize; ++idx) {
CUDA_CHECK(hipMemset(output + idx*outputElem, 0, sizeof(float)));
}
int numElem = 0;
for (unsigned int i = 0;i< mYoloKernel.size();++i)
{
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height*batchSize;
if (numElem < mThreadCount)
mThreadCount = numElem;
hipLaunchKernelGGL(( CalDetection), dim3((yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, 0,
inputs[i],output, numElem, yolo.width, yolo.height, (float *)mAnchor[i], mClassCount ,outputElem);
}
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, hipStream_t stream)
{
//assert(batchSize == 1);
//GPU
//CUDA_CHECK(hipStreamSynchronize(stream));
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
YoloLayerPlugin* obj = new YoloLayerPlugin();
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call MishPlugin::destroy()
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
| 103c84479862cf3d62b6076c17c6fe55a1befdbb.cu | #include <assert.h>
#include "yololayer.h"
#include "utils.h"
using namespace Yolo;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin()
{
mClassCount = CLASS_NUM;
mYoloKernel.clear();
mYoloKernel.push_back(yolo1);
mYoloKernel.push_back(yolo2);
mYoloKernel.push_back(yolo3);
mKernelCount = mYoloKernel.size();
CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT*2;
for(int ii = 0; ii < mKernelCount; ii ++)
{
CUDA_CHECK(cudaMalloc(&mAnchor[ii],AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
}
}
YoloLayerPlugin::~YoloLayerPlugin()
{
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(mYoloKernel.data(),d,kernelSize);
d += kernelSize;
CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*)));
size_t AnchorLen = sizeof(float)* CHECK_COUNT*2;
for(int ii = 0; ii < mKernelCount; ii ++)
{
CUDA_CHECK(cudaMalloc(&mAnchor[ii],AnchorLen));
const auto& yolo = mYoloKernel[ii];
CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
}
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer) const
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(d,mYoloKernel.data(),kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize() const
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size();
}
int YoloLayerPlugin::initialize()
{
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalsize = MAX_OUTPUT_BBOX_COUNT * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* YoloLayerPlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void YoloLayerPlugin::detachFromContext() {}
const char* YoloLayerPlugin::getPluginType() const
{
return "YoloLayer_TRT";
}
const char* YoloLayerPlugin::getPluginVersion() const
{
return "1";
}
void YoloLayerPlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* YoloLayerPlugin::clone() const
{
YoloLayerPlugin *p = new YoloLayerPlugin();
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data){ return 1./(1. + exp(-data)); };
__global__ void CalDetection(const float *input, float *output,int noElements,
int yoloWidth,int yoloHeight,const float anchors[CHECK_COUNT*2],int classes,int outputElem) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid*bnIdx;
int info_len_i = 5 + classes;
const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT);
for (int k = 0; k < 3; ++k) {
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < info_len_i; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]);
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH) continue;
float *res_count = output + bnIdx*outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= MAX_OUTPUT_BBOX_COUNT) return;
char* data = (char * )res_count + sizeof(float) + count*sizeof(Detection);
Detection* det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
//Location
det->bbox[0] = (col + Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * INPUT_W / yoloWidth;
det->bbox[1] = (row + Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * INPUT_H / yoloHeight;
det->bbox[2] = exp(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]) * anchors[2*k];
det->bbox[3] = exp(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]) * anchors[2*k + 1];
det->det_confidence = box_prob;
det->class_id = class_id;
det->class_confidence = max_cls_prob;
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs, float* output, cudaStream_t stream, int batchSize) {
int outputElem = 1 + MAX_OUTPUT_BBOX_COUNT * sizeof(Detection) / sizeof(float);
for(int idx = 0 ; idx < batchSize; ++idx) {
CUDA_CHECK(cudaMemset(output + idx*outputElem, 0, sizeof(float)));
}
int numElem = 0;
for (unsigned int i = 0;i< mYoloKernel.size();++i)
{
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height*batchSize;
if (numElem < mThreadCount)
mThreadCount = numElem;
CalDetection<<< (yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount, mThreadCount>>>
(inputs[i],output, numElem, yolo.width, yolo.height, (float *)mAnchor[i], mClassCount ,outputElem);
}
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream)
{
//assert(batchSize == 1);
//GPU
//CUDA_CHECK(cudaStreamSynchronize(stream));
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
PluginFieldCollection YoloPluginCreator::mFC{};
std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
YoloPluginCreator::YoloPluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* YoloPluginCreator::getPluginName() const
{
return "YoloLayer_TRT";
}
const char* YoloPluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* YoloPluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
YoloLayerPlugin* obj = new YoloLayerPlugin();
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call MishPlugin::destroy()
YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
|
7fd51aa980b4d5d48df51f0336bbb47fb6b9c4ec.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <string.h>
#define MAX 32
__global__ void toggle_case(char *x, char *y)
{
int i=threadIdx.x;
if(x[i] >= 'a' && x[i] <='z')
y[i] = x[i]-32;
else
if(x[i] >= 'A' && x[i] <='Z')
y[i] = x[i]+32;
else
{
y[i] = x[i];
}
}
int main()
{
int n;
hipEvent_t start, stop;
float time;
char A[MAX],B[MAX],*d,*e;
printf("Enter String to be toggled: ");
scanf("%s",A);
n = strlen(A)+1;
hipEventCreate(&start);
hipEventCreate(&stop);
hipMalloc((void **)&d,n*sizeof(char));
hipMalloc((void **)&e,n*sizeof(char));
hipMemcpy(d,A,n*sizeof(char),hipMemcpyHostToDevice);
hipEventRecord(start, 0);hipLaunchKernelGGL((
toggle_case), dim3(1),dim3(n), 0, 0, d,e);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipMemcpy(B,e,n*sizeof(char),hipMemcpyDeviceToHost);
printf("The toggled case String is %s", B);
printf("\n");
hipFree(d);
hipFree(e);
hipEventElapsedTime(&time, start, stop);
printf ("Time for the kernel: %f ms\n", time);
return 0;
}
| 7fd51aa980b4d5d48df51f0336bbb47fb6b9c4ec.cu |
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <string.h>
#define MAX 32
__global__ void toggle_case(char *x, char *y)
{
int i=threadIdx.x;
if(x[i] >= 'a' && x[i] <='z')
y[i] = x[i]-32;
else
if(x[i] >= 'A' && x[i] <='Z')
y[i] = x[i]+32;
else
{
y[i] = x[i];
}
}
int main()
{
int n;
cudaEvent_t start, stop;
float time;
char A[MAX],B[MAX],*d,*e;
printf("Enter String to be toggled: ");
scanf("%s",A);
n = strlen(A)+1;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMalloc((void **)&d,n*sizeof(char));
cudaMalloc((void **)&e,n*sizeof(char));
cudaMemcpy(d,A,n*sizeof(char),cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
toggle_case<<<1,n>>>(d,e);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaMemcpy(B,e,n*sizeof(char),cudaMemcpyDeviceToHost);
printf("The toggled case String is %s", B);
printf("\n");
cudaFree(d);
cudaFree(e);
cudaEventElapsedTime(&time, start, stop);
printf ("Time for the kernel: %f ms\n", time);
return 0;
}
|
af4354a526915a1c4f9468bb7ba0a06b283517bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "colorspace_converter.h"
#include <hip/hip_runtime.h>
namespace solids
{
namespace lib
{
namespace video
{
namespace nvidia
{
__constant__ float mat_yuv2rgb[3][3];
template<class T>
__device__ static T clamp(T x, T lower, T upper)
{
return x < lower ? lower : (x > upper ? upper : x);
}
template<class rgbs, class yuvs>
__device__ inline rgbs yuv_to_rgb_pixel(yuvs y, yuvs u, yuvs v)
{
const int32_t low = 1 << (sizeof(yuvs) * 8 - 4);
const int32_t mid = 1 << (sizeof(yuvs) * 8 - 1);
float fy = (int32_t)y - low;
float fu = (int32_t)u - mid;
float fv = (int32_t)v - mid;
const float maxf = (1 << sizeof(yuvs) * 8) - 1.0f;
yuvs r = (yuvs)clamp(mat_yuv2rgb[0][0] * fy + mat_yuv2rgb[0][1] * fu + mat_yuv2rgb[0][2] * fv, 0.0f, maxf);
yuvs g = (yuvs)clamp(mat_yuv2rgb[1][0] * fy + mat_yuv2rgb[1][1] * fu + mat_yuv2rgb[1][2] * fv, 0.0f, maxf);
yuvs b = (yuvs)clamp(mat_yuv2rgb[2][0] * fy + mat_yuv2rgb[2][1] * fu + mat_yuv2rgb[2][2] * fv, 0.0f, maxf);
rgbs rgb = {};
const int32_t shift = abs((int)sizeof(yuvs) - (int)sizeof(rgb.c.r)) * 8;
if (sizeof(yuvs) >= sizeof(rgb.c.r))
{
rgb.c.r = r >> shift;
rgb.c.g = g >> shift;
rgb.c.b = b >> shift;
}
else
{
rgb.c.r = r << shift;
rgb.c.g = g << shift;
rgb.c.b = b << shift;
}
return rgb;
}
union bgra32
{
uint32_t d;
uchar4 v;
struct
{
uint8_t b, g, r, a;
} c;
};
////////////////////////device////////////////////
__global__ static void yuv_to_rgb_kernel(uint8_t* pYUV, int32_t yuvPitch, uint8_t* pRGB, int32_t rgbPitch, int32_t width, int32_t height)
{
int32_t x = (threadIdx.x + blockIdx.x * blockDim.x) * 2;
int32_t y = (threadIdx.y + blockIdx.y * blockDim.y) * 2;
if (x + 1 >= width || y + 1 >= height)
return;
uint8_t* pSrc = pYUV + x * sizeof(uchar2) / 2 + y * yuvPitch;
uint8_t* pDst = pRGB + x * sizeof(bgra32) + y * rgbPitch;
uchar2 l0 = *(uchar2*)pSrc;
uchar2 l1 = *(uchar2*)(pSrc + yuvPitch);
uchar2 ch = *(uchar2*)(pSrc + (height - y / 2) * yuvPitch);
uint2 dst;
dst.x = yuv_to_rgb_pixel<bgra32>(l0.x, ch.x, ch.y).d;
dst.y = yuv_to_rgb_pixel<bgra32>(l0.y, ch.x, ch.y).d;
*(uint2*)pDst = dst;
dst.x = yuv_to_rgb_pixel<bgra32>(l1.x, ch.x, ch.y).d;
dst.y = yuv_to_rgb_pixel<bgra32>(l1.y, ch.x, ch.y).d;
*(uint2*)(pDst + rgbPitch) = dst;
}
__global__ static void nv12_to_i420_kernel(uint8_t* pNV12Chroma, int32_t nv12ChromaPitch, uint8_t* pI420Chroma, int32_t i420ChromaPitch, int32_t chromaWidth, int32_t chromaHeight)
{
int32_t x = blockIdx.x * blockDim.x + threadIdx.x;
int32_t y = blockIdx.y * blockDim.y + threadIdx.y;
uint8_t u = *(pNV12Chroma + (2 * x) + y * nv12ChromaPitch);
uint8_t v = *(pNV12Chroma + (2 * x + 1) + y * nv12ChromaPitch);
uint8_t* pI420U = pI420Chroma;
uint8_t* pI420V = pI420U + (i420ChromaPitch * chromaHeight);
*(pI420U + x + y * i420ChromaPitch) = u;
*(pI420V + x + y * i420ChromaPitch) = v;
}
__global__ static void nv12_to_yv12_kernel(uint8_t* pNV12Chroma, int32_t nv12ChromaPitch, uint8_t* pYV12Chroma, int32_t yv12ChromaPitch, int32_t chromaWidth, int32_t chromaHeight)
{
int32_t x = blockIdx.x * blockDim.x + threadIdx.x;
int32_t y = blockIdx.y * blockDim.y + threadIdx.y;
uint8_t u = *(pNV12Chroma + (2 * x) + y * nv12ChromaPitch);
uint8_t v = *(pNV12Chroma + (2 * x + 1) + y * nv12ChromaPitch);
uint8_t* pYV12V = pYV12Chroma;
uint8_t* pYV12U = pYV12V + (yv12ChromaPitch * chromaHeight);
*(pYV12V + x + y * yv12ChromaPitch) = v;
*(pYV12U + x + y * yv12ChromaPitch) = u;
}
void decoder::converter::convert_nv12_to_bgra32(unsigned char* nv12, int nv12Pitch, unsigned char* bgra, int bgraPicth, int width, int height)
{
if ((width * height) >= (RESOLUTION_W4K * RESOLUTION_H4K))
setup_mat_yuv2rgb(solids::lib::video::nvidia::decoder::converter::standard_t::bt2020);
else if ((width * height) >= (RESOLUTION_WFHD * RESOLUTION_HFHD))
setup_mat_yuv2rgb(solids::lib::video::nvidia::decoder::converter::standard_t::bt709);
else
setup_mat_yuv2rgb(solids::lib::video::nvidia::decoder::converter::standard_t::bt601);
yuv_to_rgb_kernel << <dim3((width + 63) / 32 / 2, (height + 3) / 2 / 2), dim3(32, 2) >> > (nv12, nv12Pitch, bgra, bgraPicth, width, height);
}
void decoder::converter::convert_nv12_to_i420(unsigned char* nv12, int nv12Pitch, unsigned char* i420, int i420Pitch, int width, int height)
{
hipError_t cerr = hipMemcpy2D(i420, i420Pitch, nv12, nv12Pitch, width, height, hipMemcpyDeviceToDevice);
int chromaWidth = width >> 1;
int chromaheight = height >> 1;
unsigned char* nv12Chroma = nv12 + nv12Pitch * height;
unsigned char* i420Chroma = i420 + i420Pitch * height;
int nv12ChromaPitch = nv12Pitch;
int i420ChromaPitch = i420Pitch >> 1;
dim3 threadPerBlock(4, 4);
dim3 blocks(chromaWidth / threadPerBlock.x, chromaheight / threadPerBlock.y);
nv12_to_i420_kernel << <blocks, threadPerBlock >> > (nv12Chroma, nv12ChromaPitch, i420Chroma, i420ChromaPitch, chromaWidth, chromaheight);
}
void decoder::converter::convert_nv12_to_yv12(unsigned char* nv12, int nv12Pitch, unsigned char* yv12, int yv12Pitch, int width, int height)
{
hipError_t cerr = hipMemcpy2D(yv12, yv12Pitch, nv12, nv12Pitch, width, height, hipMemcpyDeviceToDevice);
int chromaWidth = width >> 1;
int chromaheight = height >> 1;
unsigned char* nv12Chroma = nv12 + nv12Pitch * height;
unsigned char* yv12Chroma = yv12 + yv12Pitch * height;
int nv12ChromaPitch = nv12Pitch;
int yv12ChromaPitch = yv12Pitch >> 1;
dim3 threadPerBlock(4, 4);
dim3 blocks(chromaWidth / threadPerBlock.x, chromaheight / threadPerBlock.y);
nv12_to_yv12_kernel << <blocks, threadPerBlock >> > (nv12Chroma, nv12ChromaPitch, yv12Chroma, yv12ChromaPitch, chromaWidth, chromaheight);
}
void decoder::converter::setup_mat_yuv2rgb(int imat)
{
float wr;
float wb;
int black;
int white;
int max;
constants(imat, wr, wb, black, white, max);
float mat[3][3] = {
1.0f, 0.0f, (1.0f - wr) / 0.5f,
1.0f, -wb * (1.0f - wb) / 0.5f / (1 - wb - wr), -wr * (1 - wr) / 0.5f / (1 - wb - wr),
1.0f, (1.0f - wb) / 0.5f, 0.0f,
};
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 3; j++)
mat[i][j] = (float)(1.0 * max / (white - black) * mat[i][j]);
}
::hipMemcpyToSymbol(mat_yuv2rgb, mat, sizeof(mat));
}
void decoder::converter::constants(int mat, float& wr, float& wb, int& black, int& white, int& max)
{
// Default is BT709
wr = 0.2126f;
wb = 0.0722f;
black = 16;
white = 235;
max = 255;
if (mat == solids::lib::video::nvidia::decoder::converter::converter::standard_t::bt601)
{
wr = 0.2990f;
wb = 0.1140f;
}
else if (mat == solids::lib::video::nvidia::decoder::converter::standard_t::bt2020)
{
wr = 0.2627f;
wb = 0.0593f;
// 10-bit only
black = 64 << 6;
white = 940 << 6;
max = (1 << 16) - 1;
}
}
};
};
};
};
| af4354a526915a1c4f9468bb7ba0a06b283517bc.cu | #include "colorspace_converter.h"
#include <cuda_runtime.h>
namespace solids
{
namespace lib
{
namespace video
{
namespace nvidia
{
__constant__ float mat_yuv2rgb[3][3];
template<class T>
__device__ static T clamp(T x, T lower, T upper)
{
return x < lower ? lower : (x > upper ? upper : x);
}
template<class rgbs, class yuvs>
__device__ inline rgbs yuv_to_rgb_pixel(yuvs y, yuvs u, yuvs v)
{
const int32_t low = 1 << (sizeof(yuvs) * 8 - 4);
const int32_t mid = 1 << (sizeof(yuvs) * 8 - 1);
float fy = (int32_t)y - low;
float fu = (int32_t)u - mid;
float fv = (int32_t)v - mid;
const float maxf = (1 << sizeof(yuvs) * 8) - 1.0f;
yuvs r = (yuvs)clamp(mat_yuv2rgb[0][0] * fy + mat_yuv2rgb[0][1] * fu + mat_yuv2rgb[0][2] * fv, 0.0f, maxf);
yuvs g = (yuvs)clamp(mat_yuv2rgb[1][0] * fy + mat_yuv2rgb[1][1] * fu + mat_yuv2rgb[1][2] * fv, 0.0f, maxf);
yuvs b = (yuvs)clamp(mat_yuv2rgb[2][0] * fy + mat_yuv2rgb[2][1] * fu + mat_yuv2rgb[2][2] * fv, 0.0f, maxf);
rgbs rgb = {};
const int32_t shift = abs((int)sizeof(yuvs) - (int)sizeof(rgb.c.r)) * 8;
if (sizeof(yuvs) >= sizeof(rgb.c.r))
{
rgb.c.r = r >> shift;
rgb.c.g = g >> shift;
rgb.c.b = b >> shift;
}
else
{
rgb.c.r = r << shift;
rgb.c.g = g << shift;
rgb.c.b = b << shift;
}
return rgb;
}
union bgra32
{
uint32_t d;
uchar4 v;
struct
{
uint8_t b, g, r, a;
} c;
};
////////////////////////device////////////////////
__global__ static void yuv_to_rgb_kernel(uint8_t* pYUV, int32_t yuvPitch, uint8_t* pRGB, int32_t rgbPitch, int32_t width, int32_t height)
{
int32_t x = (threadIdx.x + blockIdx.x * blockDim.x) * 2;
int32_t y = (threadIdx.y + blockIdx.y * blockDim.y) * 2;
if (x + 1 >= width || y + 1 >= height)
return;
uint8_t* pSrc = pYUV + x * sizeof(uchar2) / 2 + y * yuvPitch;
uint8_t* pDst = pRGB + x * sizeof(bgra32) + y * rgbPitch;
uchar2 l0 = *(uchar2*)pSrc;
uchar2 l1 = *(uchar2*)(pSrc + yuvPitch);
uchar2 ch = *(uchar2*)(pSrc + (height - y / 2) * yuvPitch);
uint2 dst;
dst.x = yuv_to_rgb_pixel<bgra32>(l0.x, ch.x, ch.y).d;
dst.y = yuv_to_rgb_pixel<bgra32>(l0.y, ch.x, ch.y).d;
*(uint2*)pDst = dst;
dst.x = yuv_to_rgb_pixel<bgra32>(l1.x, ch.x, ch.y).d;
dst.y = yuv_to_rgb_pixel<bgra32>(l1.y, ch.x, ch.y).d;
*(uint2*)(pDst + rgbPitch) = dst;
}
__global__ static void nv12_to_i420_kernel(uint8_t* pNV12Chroma, int32_t nv12ChromaPitch, uint8_t* pI420Chroma, int32_t i420ChromaPitch, int32_t chromaWidth, int32_t chromaHeight)
{
int32_t x = blockIdx.x * blockDim.x + threadIdx.x;
int32_t y = blockIdx.y * blockDim.y + threadIdx.y;
uint8_t u = *(pNV12Chroma + (2 * x) + y * nv12ChromaPitch);
uint8_t v = *(pNV12Chroma + (2 * x + 1) + y * nv12ChromaPitch);
uint8_t* pI420U = pI420Chroma;
uint8_t* pI420V = pI420U + (i420ChromaPitch * chromaHeight);
*(pI420U + x + y * i420ChromaPitch) = u;
*(pI420V + x + y * i420ChromaPitch) = v;
}
__global__ static void nv12_to_yv12_kernel(uint8_t* pNV12Chroma, int32_t nv12ChromaPitch, uint8_t* pYV12Chroma, int32_t yv12ChromaPitch, int32_t chromaWidth, int32_t chromaHeight)
{
int32_t x = blockIdx.x * blockDim.x + threadIdx.x;
int32_t y = blockIdx.y * blockDim.y + threadIdx.y;
uint8_t u = *(pNV12Chroma + (2 * x) + y * nv12ChromaPitch);
uint8_t v = *(pNV12Chroma + (2 * x + 1) + y * nv12ChromaPitch);
uint8_t* pYV12V = pYV12Chroma;
uint8_t* pYV12U = pYV12V + (yv12ChromaPitch * chromaHeight);
*(pYV12V + x + y * yv12ChromaPitch) = v;
*(pYV12U + x + y * yv12ChromaPitch) = u;
}
void decoder::converter::convert_nv12_to_bgra32(unsigned char* nv12, int nv12Pitch, unsigned char* bgra, int bgraPicth, int width, int height)
{
if ((width * height) >= (RESOLUTION_W4K * RESOLUTION_H4K))
setup_mat_yuv2rgb(solids::lib::video::nvidia::decoder::converter::standard_t::bt2020);
else if ((width * height) >= (RESOLUTION_WFHD * RESOLUTION_HFHD))
setup_mat_yuv2rgb(solids::lib::video::nvidia::decoder::converter::standard_t::bt709);
else
setup_mat_yuv2rgb(solids::lib::video::nvidia::decoder::converter::standard_t::bt601);
yuv_to_rgb_kernel << <dim3((width + 63) / 32 / 2, (height + 3) / 2 / 2), dim3(32, 2) >> > (nv12, nv12Pitch, bgra, bgraPicth, width, height);
}
void decoder::converter::convert_nv12_to_i420(unsigned char* nv12, int nv12Pitch, unsigned char* i420, int i420Pitch, int width, int height)
{
cudaError_t cerr = cudaMemcpy2D(i420, i420Pitch, nv12, nv12Pitch, width, height, cudaMemcpyDeviceToDevice);
int chromaWidth = width >> 1;
int chromaheight = height >> 1;
unsigned char* nv12Chroma = nv12 + nv12Pitch * height;
unsigned char* i420Chroma = i420 + i420Pitch * height;
int nv12ChromaPitch = nv12Pitch;
int i420ChromaPitch = i420Pitch >> 1;
dim3 threadPerBlock(4, 4);
dim3 blocks(chromaWidth / threadPerBlock.x, chromaheight / threadPerBlock.y);
nv12_to_i420_kernel << <blocks, threadPerBlock >> > (nv12Chroma, nv12ChromaPitch, i420Chroma, i420ChromaPitch, chromaWidth, chromaheight);
}
void decoder::converter::convert_nv12_to_yv12(unsigned char* nv12, int nv12Pitch, unsigned char* yv12, int yv12Pitch, int width, int height)
{
cudaError_t cerr = cudaMemcpy2D(yv12, yv12Pitch, nv12, nv12Pitch, width, height, cudaMemcpyDeviceToDevice);
int chromaWidth = width >> 1;
int chromaheight = height >> 1;
unsigned char* nv12Chroma = nv12 + nv12Pitch * height;
unsigned char* yv12Chroma = yv12 + yv12Pitch * height;
int nv12ChromaPitch = nv12Pitch;
int yv12ChromaPitch = yv12Pitch >> 1;
dim3 threadPerBlock(4, 4);
dim3 blocks(chromaWidth / threadPerBlock.x, chromaheight / threadPerBlock.y);
nv12_to_yv12_kernel << <blocks, threadPerBlock >> > (nv12Chroma, nv12ChromaPitch, yv12Chroma, yv12ChromaPitch, chromaWidth, chromaheight);
}
void decoder::converter::setup_mat_yuv2rgb(int imat)
{
float wr;
float wb;
int black;
int white;
int max;
constants(imat, wr, wb, black, white, max);
float mat[3][3] = {
1.0f, 0.0f, (1.0f - wr) / 0.5f,
1.0f, -wb * (1.0f - wb) / 0.5f / (1 - wb - wr), -wr * (1 - wr) / 0.5f / (1 - wb - wr),
1.0f, (1.0f - wb) / 0.5f, 0.0f,
};
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 3; j++)
mat[i][j] = (float)(1.0 * max / (white - black) * mat[i][j]);
}
::cudaMemcpyToSymbol(mat_yuv2rgb, mat, sizeof(mat));
}
void decoder::converter::constants(int mat, float& wr, float& wb, int& black, int& white, int& max)
{
// Default is BT709
wr = 0.2126f;
wb = 0.0722f;
black = 16;
white = 235;
max = 255;
if (mat == solids::lib::video::nvidia::decoder::converter::converter::standard_t::bt601)
{
wr = 0.2990f;
wb = 0.1140f;
}
else if (mat == solids::lib::video::nvidia::decoder::converter::standard_t::bt2020)
{
wr = 0.2627f;
wb = 0.0593f;
// 10-bit only
black = 64 << 6;
white = 940 << 6;
max = (1 << 16) - 1;
}
}
};
};
};
};
|
407c151771971837424bdd7634633fb9207a1960.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm> // perf stats
#include <cstring>
#include <iomanip>
#include <iostream>
#include <numeric> // perf stats
#include <unistd.h>
#include <vector>
#include "CPPProcess.h"
#include "HelAmps_sm.h"
#include "rambo.h"
#include "timer.h"
#define gpuErrchk3(ans) \
{ gpuAssert3((ans), __FILE__, __LINE__); }
inline void gpuAssert3(hipError_t code, const char *file, int line,
bool abort = true) {
if (code != hipSuccess) {
printf("GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
}
}
#define TIMERTYPE std::chrono::high_resolution_clock
bool is_number(const char *s) {
const char *t = s;
while (*t != '\0' && isdigit(*t))
++t;
return strlen(s) == t - s;
}
int usage(char* argv0, int ret = 1) {
std::cout << "Usage: " << argv0
<< " [--verbose|-v] [--debug|-d] [--performance|-p]"
<< " [#gpuBlocksPerGrid #gpuThreadsPerBlock] #iterations" << std::endl;
return ret;
}
int main(int argc, char **argv) {
bool verbose = false, debug = false, perf = false;
int numiter = 0, gpublocks = 1, gputhreads = 1;
std::vector<int> numvec;
Timer<TIMERTYPE> timer;
std::vector<float> wavetimes;
for (int argn = 1; argn < argc; ++argn) {
if (strcmp(argv[argn], "--verbose") == 0 || strcmp(argv[argn], "-v") == 0)
verbose = true;
else if (strcmp(argv[argn], "--debug") == 0 ||
strcmp(argv[argn], "-d") == 0)
debug = true;
else if (strcmp(argv[argn], "--performance") == 0 ||
strcmp(argv[argn], "-p") == 0)
perf = true;
else if (is_number(argv[argn]))
numvec.push_back(atoi(argv[argn]));
else
return usage(argv[0]);
}
int veclen = numvec.size();
if (veclen == 3) {
gpublocks = numvec[0];
gputhreads = numvec[1];
numiter = numvec[2];
} else if (veclen == 1) {
numiter = numvec[0];
} else {
return usage(argv[0]);
}
if (numiter == 0)
return usage(argv[0]);
hipFree(0);
if (verbose)
std::cout << "# iterations: " << numiter << std::endl;
// Create a process object
CPPProcess process(numiter, gpublocks, gputhreads, verbose, debug);
// Read param_card and set parameters
process.initProc("../../Cards/param_card.dat");
double energy = 1500;
double weight;
int meGeVexponent = -(2 * process.nexternal - 8);
int dim = gpublocks * gputhreads;
// Local Memory
//typedef double arr_t[4][4];
double* lp = new double[4*3*dim];
double* meHostPtr = new double[dim*1];
double *meDevPtr =0;
int num_bytes_back = 1 * dim * sizeof(double);
hipMalloc((void**)&meDevPtr, num_bytes_back);
std::vector<double> matrixelementvector;
for (int x = 0; x < numiter; ++x) {
// Get phase space point
std::vector<std::vector<double *>> p =
get_momenta(process.ninitial, energy, process.getMasses(), weight, dim);
// Set momenta for this event
for (int d = 0; d < dim; ++d) {
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 3; ++j) {
lp[i*dim*3+j*dim+d] = p[d][i][1+j];
}
}
}
//new
int num_bytes = 3*4*dim * sizeof(double);
double *allmomenta = 0;
hipMalloc((void**)&allmomenta, num_bytes);
hipMemcpy(allmomenta,lp,num_bytes,hipMemcpyHostToDevice);
//gpuErrchk3(hipMemcpy3D(&tdp));
//process.preSigmaKin();
if (perf) {
timer.Start();
}
// Evaluate matrix element
// later process.sigmaKin(ncomb, goodhel, ntry, sum_hel, ngood, igood,
// jhel);
hipLaunchKernelGGL(( sigmaKin), dim3(gpublocks), dim3(gputhreads), 0, 0, allmomenta, meDevPtr);//, debug, verbose);
gpuErrchk3( hipPeekAtLastError() );
//gpuErrchk3(hipMemcpy2D(meHostPtr, sizeof(double), meDevPtr, mePitch,
// sizeof(double), dim, hipMemcpyDeviceToHost));
hipMemcpy(meHostPtr, meDevPtr, 1 * dim*sizeof(double), hipMemcpyDeviceToHost);
if (verbose)
std::cout << "***********************************" << std::endl
<< "Iteration #" << x+1 << " of " << numiter << std::endl;
if (perf) {
float gputime = timer.GetDuration();
wavetimes.push_back(gputime);
if (verbose)
std::cout << "Wave function time: " << gputime << std::endl;
}
if (verbose || perf) {
for (int d = 0; d < dim; ++d) {
if (verbose) {
std::cout << "Momenta:" << std::endl;
for (int i = 0; i < process.nexternal; i++)
std::cout << std::setw(4) << i + 1
<< setiosflags(std::ios::scientific) << std::setw(14)
<< p[d][i][0] << setiosflags(std::ios::scientific)
<< std::setw(14) << p[d][i][1]
<< setiosflags(std::ios::scientific) << std::setw(14)
<< p[d][i][2] << setiosflags(std::ios::scientific)
<< std::setw(14) << p[d][i][3] << std::endl;
std::cout << std::string(80, '-') << std::endl;
}
// Display matrix elements
for (int i = 0; i < process.nprocesses; i++) {
if (verbose)
std::cout << " Matrix element = "
// << setiosflags(ios::fixed) << setprecision(17)
<< meHostPtr[i*1 + d] << " GeV^" << meGeVexponent << std::endl;
if (perf)
matrixelementvector.push_back(meHostPtr[i*1 + d]);
}
if (verbose)
std::cout << std::string(80, '-') << std::endl;
}
} else if (!debug) {
std::cout << ".";
}
for (std::vector<std::vector<double *>>::iterator it = p.begin();
it != p.end(); ++it) {
for (std::vector<double *>::iterator jt = it->begin(); jt != it->end();
++jt) {
delete[] & (**jt);
}
}
}
if (!(verbose || debug || perf)) {
std::cout << std::endl;
}
if (perf) {
float sum = std::accumulate(wavetimes.begin(), wavetimes.end(), 0.0);
int num_wts = wavetimes.size();
float mean = sum / num_wts;
float sq_sum = std::inner_product(wavetimes.begin(), wavetimes.end(),
wavetimes.begin(), 0.0);
float stdev = std::sqrt(sq_sum / num_wts - mean * mean);
std::vector<float>::iterator mintime =
std::min_element(wavetimes.begin(), wavetimes.end());
std::vector<float>::iterator maxtime =
std::max_element(wavetimes.begin(), wavetimes.end());
int num_mes = matrixelementvector.size();
float sumelem = std::accumulate(matrixelementvector.begin(), matrixelementvector.end(), 0.0);
float meanelem = sumelem / num_mes;
float sqselem = std::inner_product(matrixelementvector.begin(), matrixelementvector.end(),
matrixelementvector.begin(), 0.0);
float stdelem = std::sqrt(sqselem / num_mes - meanelem * meanelem);
std::vector<double>::iterator maxelem = std::max_element(
matrixelementvector.begin(), matrixelementvector.end());
std::vector<double>::iterator minelem = std::min_element(
matrixelementvector.begin(), matrixelementvector.end());
std::cout << "***********************************" << std::endl
<< "NumIterations = " << numiter << std::endl
<< "NumThreadsPerBlock = " << gputhreads << std::endl
<< "NumBlocksPerGrid = " << gpublocks << std::endl
<< "-----------------------------------" << std::endl
<< "NumberOfEntries = " << num_wts << std::endl
<< std::scientific
<< "TotalTimeInWaveFuncs = " << sum << " sec" << std::endl
<< "MeanTimeInWaveFuncs = " << mean << " sec" << std::endl
<< "StdDevTimeInWaveFuncs = " << stdev << " sec" << std::endl
<< "MinTimeInWaveFuncs = " << *mintime << " sec" << std::endl
<< "MaxTimeInWaveFuncs = " << *maxtime << " sec" << std::endl
<< "-----------------------------------" << std::endl
<< "ProcessID: = " << getpid() << std::endl
<< "NProcesses = " << process.nprocesses << std::endl
<< "NumMatrixElements = " << num_mes << std::endl
<< "MatrixElementsPerSec = " << num_mes/sum << " sec^-1" << std::endl;
std::cout << "***********************************" << std::endl
<< "NumMatrixElements = " << num_mes << std::endl
<< std::scientific
<< "MeanMatrixElemValue = " << meanelem << " GeV^" << meGeVexponent << std::endl
<< "StdErrMatrixElemValue = " << stdelem/sqrt(num_mes) << " GeV^" << meGeVexponent << std::endl
<< "StdDevMatrixElemValue = " << stdelem << " GeV^" << meGeVexponent << std::endl
<< "MinMatrixElemValue = " << *minelem << " GeV^" << meGeVexponent << std::endl
<< "MaxMatrixElemValue = " << *maxelem << " GeV^" << meGeVexponent << std::endl;
}
delete[] lp;
}
| 407c151771971837424bdd7634633fb9207a1960.cu | #include <algorithm> // perf stats
#include <cstring>
#include <iomanip>
#include <iostream>
#include <numeric> // perf stats
#include <unistd.h>
#include <vector>
#include "CPPProcess.h"
#include "HelAmps_sm.h"
#include "rambo.h"
#include "timer.h"
#define gpuErrchk3(ans) \
{ gpuAssert3((ans), __FILE__, __LINE__); }
inline void gpuAssert3(cudaError_t code, const char *file, int line,
bool abort = true) {
if (code != cudaSuccess) {
printf("GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
}
}
#define TIMERTYPE std::chrono::high_resolution_clock
bool is_number(const char *s) {
const char *t = s;
while (*t != '\0' && isdigit(*t))
++t;
return strlen(s) == t - s;
}
int usage(char* argv0, int ret = 1) {
std::cout << "Usage: " << argv0
<< " [--verbose|-v] [--debug|-d] [--performance|-p]"
<< " [#gpuBlocksPerGrid #gpuThreadsPerBlock] #iterations" << std::endl;
return ret;
}
int main(int argc, char **argv) {
bool verbose = false, debug = false, perf = false;
int numiter = 0, gpublocks = 1, gputhreads = 1;
std::vector<int> numvec;
Timer<TIMERTYPE> timer;
std::vector<float> wavetimes;
for (int argn = 1; argn < argc; ++argn) {
if (strcmp(argv[argn], "--verbose") == 0 || strcmp(argv[argn], "-v") == 0)
verbose = true;
else if (strcmp(argv[argn], "--debug") == 0 ||
strcmp(argv[argn], "-d") == 0)
debug = true;
else if (strcmp(argv[argn], "--performance") == 0 ||
strcmp(argv[argn], "-p") == 0)
perf = true;
else if (is_number(argv[argn]))
numvec.push_back(atoi(argv[argn]));
else
return usage(argv[0]);
}
int veclen = numvec.size();
if (veclen == 3) {
gpublocks = numvec[0];
gputhreads = numvec[1];
numiter = numvec[2];
} else if (veclen == 1) {
numiter = numvec[0];
} else {
return usage(argv[0]);
}
if (numiter == 0)
return usage(argv[0]);
cudaFree(0);
if (verbose)
std::cout << "# iterations: " << numiter << std::endl;
// Create a process object
CPPProcess process(numiter, gpublocks, gputhreads, verbose, debug);
// Read param_card and set parameters
process.initProc("../../Cards/param_card.dat");
double energy = 1500;
double weight;
int meGeVexponent = -(2 * process.nexternal - 8);
int dim = gpublocks * gputhreads;
// Local Memory
//typedef double arr_t[4][4];
double* lp = new double[4*3*dim];
double* meHostPtr = new double[dim*1];
double *meDevPtr =0;
int num_bytes_back = 1 * dim * sizeof(double);
cudaMalloc((void**)&meDevPtr, num_bytes_back);
std::vector<double> matrixelementvector;
for (int x = 0; x < numiter; ++x) {
// Get phase space point
std::vector<std::vector<double *>> p =
get_momenta(process.ninitial, energy, process.getMasses(), weight, dim);
// Set momenta for this event
for (int d = 0; d < dim; ++d) {
for (int i = 0; i < 4; ++i) {
for (int j = 0; j < 3; ++j) {
lp[i*dim*3+j*dim+d] = p[d][i][1+j];
}
}
}
//new
int num_bytes = 3*4*dim * sizeof(double);
double *allmomenta = 0;
cudaMalloc((void**)&allmomenta, num_bytes);
cudaMemcpy(allmomenta,lp,num_bytes,cudaMemcpyHostToDevice);
//gpuErrchk3(cudaMemcpy3D(&tdp));
//process.preSigmaKin();
if (perf) {
timer.Start();
}
// Evaluate matrix element
// later process.sigmaKin(ncomb, goodhel, ntry, sum_hel, ngood, igood,
// jhel);
sigmaKin<<<gpublocks, gputhreads>>>(allmomenta, meDevPtr);//, debug, verbose);
gpuErrchk3( cudaPeekAtLastError() );
//gpuErrchk3(cudaMemcpy2D(meHostPtr, sizeof(double), meDevPtr, mePitch,
// sizeof(double), dim, cudaMemcpyDeviceToHost));
cudaMemcpy(meHostPtr, meDevPtr, 1 * dim*sizeof(double), cudaMemcpyDeviceToHost);
if (verbose)
std::cout << "***********************************" << std::endl
<< "Iteration #" << x+1 << " of " << numiter << std::endl;
if (perf) {
float gputime = timer.GetDuration();
wavetimes.push_back(gputime);
if (verbose)
std::cout << "Wave function time: " << gputime << std::endl;
}
if (verbose || perf) {
for (int d = 0; d < dim; ++d) {
if (verbose) {
std::cout << "Momenta:" << std::endl;
for (int i = 0; i < process.nexternal; i++)
std::cout << std::setw(4) << i + 1
<< setiosflags(std::ios::scientific) << std::setw(14)
<< p[d][i][0] << setiosflags(std::ios::scientific)
<< std::setw(14) << p[d][i][1]
<< setiosflags(std::ios::scientific) << std::setw(14)
<< p[d][i][2] << setiosflags(std::ios::scientific)
<< std::setw(14) << p[d][i][3] << std::endl;
std::cout << std::string(80, '-') << std::endl;
}
// Display matrix elements
for (int i = 0; i < process.nprocesses; i++) {
if (verbose)
std::cout << " Matrix element = "
// << setiosflags(ios::fixed) << setprecision(17)
<< meHostPtr[i*1 + d] << " GeV^" << meGeVexponent << std::endl;
if (perf)
matrixelementvector.push_back(meHostPtr[i*1 + d]);
}
if (verbose)
std::cout << std::string(80, '-') << std::endl;
}
} else if (!debug) {
std::cout << ".";
}
for (std::vector<std::vector<double *>>::iterator it = p.begin();
it != p.end(); ++it) {
for (std::vector<double *>::iterator jt = it->begin(); jt != it->end();
++jt) {
delete[] & (**jt);
}
}
}
if (!(verbose || debug || perf)) {
std::cout << std::endl;
}
if (perf) {
float sum = std::accumulate(wavetimes.begin(), wavetimes.end(), 0.0);
int num_wts = wavetimes.size();
float mean = sum / num_wts;
float sq_sum = std::inner_product(wavetimes.begin(), wavetimes.end(),
wavetimes.begin(), 0.0);
float stdev = std::sqrt(sq_sum / num_wts - mean * mean);
std::vector<float>::iterator mintime =
std::min_element(wavetimes.begin(), wavetimes.end());
std::vector<float>::iterator maxtime =
std::max_element(wavetimes.begin(), wavetimes.end());
int num_mes = matrixelementvector.size();
float sumelem = std::accumulate(matrixelementvector.begin(), matrixelementvector.end(), 0.0);
float meanelem = sumelem / num_mes;
float sqselem = std::inner_product(matrixelementvector.begin(), matrixelementvector.end(),
matrixelementvector.begin(), 0.0);
float stdelem = std::sqrt(sqselem / num_mes - meanelem * meanelem);
std::vector<double>::iterator maxelem = std::max_element(
matrixelementvector.begin(), matrixelementvector.end());
std::vector<double>::iterator minelem = std::min_element(
matrixelementvector.begin(), matrixelementvector.end());
std::cout << "***********************************" << std::endl
<< "NumIterations = " << numiter << std::endl
<< "NumThreadsPerBlock = " << gputhreads << std::endl
<< "NumBlocksPerGrid = " << gpublocks << std::endl
<< "-----------------------------------" << std::endl
<< "NumberOfEntries = " << num_wts << std::endl
<< std::scientific
<< "TotalTimeInWaveFuncs = " << sum << " sec" << std::endl
<< "MeanTimeInWaveFuncs = " << mean << " sec" << std::endl
<< "StdDevTimeInWaveFuncs = " << stdev << " sec" << std::endl
<< "MinTimeInWaveFuncs = " << *mintime << " sec" << std::endl
<< "MaxTimeInWaveFuncs = " << *maxtime << " sec" << std::endl
<< "-----------------------------------" << std::endl
<< "ProcessID: = " << getpid() << std::endl
<< "NProcesses = " << process.nprocesses << std::endl
<< "NumMatrixElements = " << num_mes << std::endl
<< "MatrixElementsPerSec = " << num_mes/sum << " sec^-1" << std::endl;
std::cout << "***********************************" << std::endl
<< "NumMatrixElements = " << num_mes << std::endl
<< std::scientific
<< "MeanMatrixElemValue = " << meanelem << " GeV^" << meGeVexponent << std::endl
<< "StdErrMatrixElemValue = " << stdelem/sqrt(num_mes) << " GeV^" << meGeVexponent << std::endl
<< "StdDevMatrixElemValue = " << stdelem << " GeV^" << meGeVexponent << std::endl
<< "MinMatrixElemValue = " << *minelem << " GeV^" << meGeVexponent << std::endl
<< "MaxMatrixElemValue = " << *maxelem << " GeV^" << meGeVexponent << std::endl;
}
delete[] lp;
}
|
ae49dbd5d63408bbd19acd7d4812696f4e5faa58.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <random>
#define BLOCK_SIZE 500
using namespace std;
__global__ void piCalcGPU(float* d_X, float* d_Y, int* d_countInBlocks, int blocksPerGrid, int N)
{
__shared__ int shared_blocks[500];
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * blocksPerGrid;
int points_in_circle = 0;
for (int i = index; i < N; i+= stride) {
if (d_X[i]*d_X[i] + d_Y[i]*d_Y[i] <= 1.0f) {
points_in_circle++;
}
}
shared_blocks[threadIdx.x] = points_in_circle;
__syncthreads();
if (threadIdx.x == 0)
{
int pointsInCircleBlock = 0;
for (int j = 0; j < blockDim.x; j++)
{
pointsInCircleBlock += shared_blocks[j];
}
d_countInBlocks[blockIdx.x] = pointsInCircleBlock;
}
}
float piCalcCPU(int interval, float * X, float * Y) {
int points_in_circle = 0;
float dist;
for(int i = 0; i < interval; i++) {
dist = X[i]*X[i] + Y[i]*Y[i];
if (dist <= 1.0){
points_in_circle++;
}
}
return 4.0f * points_in_circle / interval;
}
float * generateSequencesRandom(int N) {
float * randArr = new float[N];
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<> dis(0.0, 1.0);
for (int n = 0; n < N; ++n) {
randArr[n] = dis(gen);
}
return randArr;
}
int main(int argc, char *argv[]) {
srand(time(NULL));
int N = atoi(argv[1]);
float * h_X = generateSequencesRandom(N);
float * h_Y = generateSequencesRandom(N);
size_t size = N * sizeof(float);
float* d_X;
float* d_Y;
hipMalloc((void **)&d_X, size);
hipMalloc((void **)&d_Y, size);
hipMemcpy(d_X, h_X, size, hipMemcpyHostToDevice);
hipMemcpy(d_Y, h_Y, size, hipMemcpyHostToDevice);
int threadsPerBlock = BLOCK_SIZE;
int blocks = N / BLOCK_SIZE;
int blocksPerGrid = (N % BLOCK_SIZE > 0) ? blocks + 1 : blocks;
size_t countBlocks = blocksPerGrid * sizeof(int);
int* d_countInBlocks;
hipMalloc((void **)&d_countInBlocks, countBlocks);
clock_t start1 = clock();
hipLaunchKernelGGL(( piCalcGPU), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_X, d_Y, d_countInBlocks, blocksPerGrid, N);
if (hipSuccess != hipGetLastError())
cout << "Error!\n";
int* h_countInBlocks = new int[blocksPerGrid];
hipMemcpy(h_countInBlocks, d_countInBlocks, countBlocks, hipMemcpyDeviceToHost);
int N_in_circle = 0;
for (int i = 0 ; i < blocksPerGrid; i++) {
N_in_circle = N_in_circle + h_countInBlocks[i];
}
float pi_gpu = 4.0 * float(N_in_circle) / N;
clock_t stop1 = clock();
float gpu_time = (stop1-start1)/(float)CLOCKS_PER_SEC;
printf("time Pi GPU: %f s.\n", gpu_time);
printf("value Pi GPU: %f\n", pi_gpu);
clock_t start2 = clock();
float pi_cpu = piCalcCPU(N, h_X, h_Y);
clock_t stop2 = clock();
float cpu_time = (stop2-start2)/(float)CLOCKS_PER_SEC;
printf("time Pi CPU: %f s.\n", cpu_time);
printf("value Pi CPU: %f\n", pi_cpu);
printf("Acceleration: %f\n", cpu_time/gpu_time);
delete[]h_X;
delete[]h_Y;
hipFree(d_X);
hipFree(d_Y);
hipFree(d_countInBlocks);
} | ae49dbd5d63408bbd19acd7d4812696f4e5faa58.cu | #include <iostream>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand.h>
#include <random>
#define BLOCK_SIZE 500
using namespace std;
__global__ void piCalcGPU(float* d_X, float* d_Y, int* d_countInBlocks, int blocksPerGrid, int N)
{
__shared__ int shared_blocks[500];
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * blocksPerGrid;
int points_in_circle = 0;
for (int i = index; i < N; i+= stride) {
if (d_X[i]*d_X[i] + d_Y[i]*d_Y[i] <= 1.0f) {
points_in_circle++;
}
}
shared_blocks[threadIdx.x] = points_in_circle;
__syncthreads();
if (threadIdx.x == 0)
{
int pointsInCircleBlock = 0;
for (int j = 0; j < blockDim.x; j++)
{
pointsInCircleBlock += shared_blocks[j];
}
d_countInBlocks[blockIdx.x] = pointsInCircleBlock;
}
}
float piCalcCPU(int interval, float * X, float * Y) {
int points_in_circle = 0;
float dist;
for(int i = 0; i < interval; i++) {
dist = X[i]*X[i] + Y[i]*Y[i];
if (dist <= 1.0){
points_in_circle++;
}
}
return 4.0f * points_in_circle / interval;
}
float * generateSequencesRandom(int N) {
float * randArr = new float[N];
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<> dis(0.0, 1.0);
for (int n = 0; n < N; ++n) {
randArr[n] = dis(gen);
}
return randArr;
}
int main(int argc, char *argv[]) {
srand(time(NULL));
int N = atoi(argv[1]);
float * h_X = generateSequencesRandom(N);
float * h_Y = generateSequencesRandom(N);
size_t size = N * sizeof(float);
float* d_X;
float* d_Y;
cudaMalloc((void **)&d_X, size);
cudaMalloc((void **)&d_Y, size);
cudaMemcpy(d_X, h_X, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_Y, h_Y, size, cudaMemcpyHostToDevice);
int threadsPerBlock = BLOCK_SIZE;
int blocks = N / BLOCK_SIZE;
int blocksPerGrid = (N % BLOCK_SIZE > 0) ? blocks + 1 : blocks;
size_t countBlocks = blocksPerGrid * sizeof(int);
int* d_countInBlocks;
cudaMalloc((void **)&d_countInBlocks, countBlocks);
clock_t start1 = clock();
piCalcGPU<<<blocksPerGrid, threadsPerBlock>>>(d_X, d_Y, d_countInBlocks, blocksPerGrid, N);
if (cudaSuccess != cudaGetLastError())
cout << "Error!\n";
int* h_countInBlocks = new int[blocksPerGrid];
cudaMemcpy(h_countInBlocks, d_countInBlocks, countBlocks, cudaMemcpyDeviceToHost);
int N_in_circle = 0;
for (int i = 0 ; i < blocksPerGrid; i++) {
N_in_circle = N_in_circle + h_countInBlocks[i];
}
float pi_gpu = 4.0 * float(N_in_circle) / N;
clock_t stop1 = clock();
float gpu_time = (stop1-start1)/(float)CLOCKS_PER_SEC;
printf("time Pi GPU: %f s.\n", gpu_time);
printf("value Pi GPU: %f\n", pi_gpu);
clock_t start2 = clock();
float pi_cpu = piCalcCPU(N, h_X, h_Y);
clock_t stop2 = clock();
float cpu_time = (stop2-start2)/(float)CLOCKS_PER_SEC;
printf("time Pi CPU: %f s.\n", cpu_time);
printf("value Pi CPU: %f\n", pi_cpu);
printf("Acceleration: %f\n", cpu_time/gpu_time);
delete[]h_X;
delete[]h_Y;
cudaFree(d_X);
cudaFree(d_Y);
cudaFree(d_countInBlocks);
} |
77ce46010671b38583835a366d80900b17e1ca92.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <unistd.h>
#include <hip/hip_runtime.h>
#include <cv.h>
#include <highgui.h>
#define BLOCKSIZE 4
using namespace cv;
using namespace std;
int get_pixel(Mat &img, int i, int j) {
return *(img.data + img.cols * i + j);
}
void set_pixel(Mat &img, int i, int j, int color) {
*(img.data + img.cols * i + j) = color;
}
__global__ void erosion(char *a, char *b, int rows, int cols) {
int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y
+ threadIdx.y * blockDim.x + threadIdx.x;
if(i < rows){
for (int j = 0; j < cols; j++) {
if((i == 0) || (i == rows - 1) || (j == 0) || (j == cols - 1)){
*(b + i * cols + j) = 0;
continue;
}
int origin = *(a + i * cols + j);
int upper = *(a + i * cols + j - 1);
int left = *(a + (i - 1) * cols + j);
int lower = *(a + i * cols + j + 1);
int right = *(a + (i + 1) * cols + j);
if (upper && origin && left && lower && right) {
*(b + i * cols + j) = 255;
}
else {
*(b + i * cols + j) = 0;
}
}
}
}
int main(int argc, char **argv) {
struct timeval start, end;
double cuda_timeuse, serial_timeuse;
Mat src, erosion_dst;
Mat erosion_dst_cuda, binary_dst;
src = imread("test.png", 0);
binary_dst = src.clone();
threshold(src, binary_dst, 150, 255, CV_THRESH_BINARY);
imwrite("binary.png", binary_dst);
erosion_dst = binary_dst.clone();
erosion_dst_cuda = binary_dst.clone();
memset(erosion_dst_cuda.data, 0, binary_dst.rows * binary_dst.cols);
hipError_t error = hipSuccess;
char *device_a, *device_b;
error = hipMalloc((void **)&device_a, binary_dst.rows * binary_dst.cols);
error = hipMalloc((void **)&device_b, binary_dst.rows * binary_dst.cols);
if (error != hipSuccess) {
printf("Fail to hipMalloc on GPU");
return 1;
}
//GPU parallel start
gettimeofday(&start, NULL);
hipMemcpy(device_a, binary_dst.data, binary_dst.rows * binary_dst.cols, hipMemcpyHostToDevice);
int gridsize = (int)ceil(sqrt(ceil(binary_dst.rows / (BLOCKSIZE * BLOCKSIZE))));
dim3 dimBlock(BLOCKSIZE, BLOCKSIZE, 1);
dim3 dimGrid(gridsize, gridsize, 1);
hipLaunchKernelGGL(( erosion), dim3(dimGrid), dim3(dimBlock), 0, 0, device_a, device_b, binary_dst.rows, binary_dst.cols);
hipDeviceSynchronize();
hipMemcpy(erosion_dst_cuda.data, device_b, binary_dst.rows * binary_dst.cols, hipMemcpyDeviceToHost);
gettimeofday(&end, NULL);
cuda_timeuse = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec;
printf("The Cuda const time is %lf ms\n", cuda_timeuse / 1000);
//GPU parallel end
//CPU serial start
gettimeofday(&start, NULL);
for (int i = 0; i < binary_dst.rows; i++) {
for (int j = 0; j < binary_dst.cols; j++) {
if((i == 0) || (i == binary_dst.rows - 1) || (j == 0) || (j == binary_dst.cols - 1)){
set_pixel(erosion_dst, i, j, 0);
continue;
}
int origin = get_pixel(binary_dst, i, j);
int upper = get_pixel(binary_dst, i, j - 1);
int left = get_pixel(binary_dst, i - 1, j);
int lower = get_pixel(binary_dst, i, j + 1);
int right = get_pixel(binary_dst, i + 1, j);
if (upper && origin && left && lower && right) {
set_pixel(erosion_dst, i, j, 255);
}
else {
set_pixel(erosion_dst, i, j, 0);
}
}
}
gettimeofday(&end, NULL);
serial_timeuse = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec;
printf("The Serial const time is %f ms\n", serial_timeuse / 1000);
//CPU serial end
imwrite("serial.png", erosion_dst);
imwrite("cuda.png", erosion_dst_cuda);
//check
int errorNum = 0;
for (int i = 1; i < binary_dst.rows - 1; i++) {
for (int j = 1; j < binary_dst.cols - 1; j++) {
if(get_pixel(erosion_dst, i, j) != get_pixel(erosion_dst_cuda, i, j)) {
errorNum ++;
printf("%d %d\n", get_pixel(erosion_dst, i, j), get_pixel(erosion_dst_cuda, i, j));
}
}
}
if (errorNum == 0) {
printf("Successfully run on GPU and CPU!\n");
} else {
printf("%d error(s) occurs!\n", errorNum);
}
waitKey();
hipFree(device_a);
hipFree(device_b);
return 0;
}
| 77ce46010671b38583835a366d80900b17e1ca92.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <unistd.h>
#include <cuda.h>
#include <cv.h>
#include <highgui.h>
#define BLOCKSIZE 4
using namespace cv;
using namespace std;
int get_pixel(Mat &img, int i, int j) {
return *(img.data + img.cols * i + j);
}
void set_pixel(Mat &img, int i, int j, int color) {
*(img.data + img.cols * i + j) = color;
}
__global__ void erosion(char *a, char *b, int rows, int cols) {
int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y
+ threadIdx.y * blockDim.x + threadIdx.x;
if(i < rows){
for (int j = 0; j < cols; j++) {
if((i == 0) || (i == rows - 1) || (j == 0) || (j == cols - 1)){
*(b + i * cols + j) = 0;
continue;
}
int origin = *(a + i * cols + j);
int upper = *(a + i * cols + j - 1);
int left = *(a + (i - 1) * cols + j);
int lower = *(a + i * cols + j + 1);
int right = *(a + (i + 1) * cols + j);
if (upper && origin && left && lower && right) {
*(b + i * cols + j) = 255;
}
else {
*(b + i * cols + j) = 0;
}
}
}
}
int main(int argc, char **argv) {
struct timeval start, end;
double cuda_timeuse, serial_timeuse;
Mat src, erosion_dst;
Mat erosion_dst_cuda, binary_dst;
src = imread("test.png", 0);
binary_dst = src.clone();
threshold(src, binary_dst, 150, 255, CV_THRESH_BINARY);
imwrite("binary.png", binary_dst);
erosion_dst = binary_dst.clone();
erosion_dst_cuda = binary_dst.clone();
memset(erosion_dst_cuda.data, 0, binary_dst.rows * binary_dst.cols);
cudaError_t error = cudaSuccess;
char *device_a, *device_b;
error = cudaMalloc((void **)&device_a, binary_dst.rows * binary_dst.cols);
error = cudaMalloc((void **)&device_b, binary_dst.rows * binary_dst.cols);
if (error != cudaSuccess) {
printf("Fail to cudaMalloc on GPU");
return 1;
}
//GPU parallel start
gettimeofday(&start, NULL);
cudaMemcpy(device_a, binary_dst.data, binary_dst.rows * binary_dst.cols, cudaMemcpyHostToDevice);
int gridsize = (int)ceil(sqrt(ceil(binary_dst.rows / (BLOCKSIZE * BLOCKSIZE))));
dim3 dimBlock(BLOCKSIZE, BLOCKSIZE, 1);
dim3 dimGrid(gridsize, gridsize, 1);
erosion<<<dimGrid, dimBlock>>>(device_a, device_b, binary_dst.rows, binary_dst.cols);
cudaThreadSynchronize();
cudaMemcpy(erosion_dst_cuda.data, device_b, binary_dst.rows * binary_dst.cols, cudaMemcpyDeviceToHost);
gettimeofday(&end, NULL);
cuda_timeuse = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec;
printf("The Cuda const time is %lf ms\n", cuda_timeuse / 1000);
//GPU parallel end
//CPU serial start
gettimeofday(&start, NULL);
for (int i = 0; i < binary_dst.rows; i++) {
for (int j = 0; j < binary_dst.cols; j++) {
if((i == 0) || (i == binary_dst.rows - 1) || (j == 0) || (j == binary_dst.cols - 1)){
set_pixel(erosion_dst, i, j, 0);
continue;
}
int origin = get_pixel(binary_dst, i, j);
int upper = get_pixel(binary_dst, i, j - 1);
int left = get_pixel(binary_dst, i - 1, j);
int lower = get_pixel(binary_dst, i, j + 1);
int right = get_pixel(binary_dst, i + 1, j);
if (upper && origin && left && lower && right) {
set_pixel(erosion_dst, i, j, 255);
}
else {
set_pixel(erosion_dst, i, j, 0);
}
}
}
gettimeofday(&end, NULL);
serial_timeuse = 1000000 * (end.tv_sec - start.tv_sec) + end.tv_usec - start.tv_usec;
printf("The Serial const time is %f ms\n", serial_timeuse / 1000);
//CPU serial end
imwrite("serial.png", erosion_dst);
imwrite("cuda.png", erosion_dst_cuda);
//check
int errorNum = 0;
for (int i = 1; i < binary_dst.rows - 1; i++) {
for (int j = 1; j < binary_dst.cols - 1; j++) {
if(get_pixel(erosion_dst, i, j) != get_pixel(erosion_dst_cuda, i, j)) {
errorNum ++;
printf("%d %d\n", get_pixel(erosion_dst, i, j), get_pixel(erosion_dst_cuda, i, j));
}
}
}
if (errorNum == 0) {
printf("Successfully run on GPU and CPU!\n");
} else {
printf("%d error(s) occurs!\n", errorNum);
}
waitKey();
cudaFree(device_a);
cudaFree(device_b);
return 0;
}
|
ae40be50f7868be2091678e74874e39795792521.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstring>
#include <malloc.h>
#include "config.h"
#include "simpleImp.h"
#include "NaiveGPUTranspose.h"
#include "MatrixTransposeShared.h"
#include "MatrixTransposeSolveBankConflicts.h"
#include "MatrixTransposeUnloop.h"
using namespace std;
const int blockx = BLOCK_X; const int blocky = BLOCK_Y;
const int threadx = BLOCK_SIZE; const int thready = BLOCK_SIZE;
int row = blocky * thready;
int column = blockx * threadx;
typedef void(*transFunc)(const int *, int *, const int, const int);
void transpose0();
void transpose1();
void transpose2();
void transpose3();
void transpose4();
void testFunc(transFunc fun, bool);
int main(int argc, char * argv[]) {
string type;
if (argc > 1) {
type = string(argv[1]);
} else {
cout<<"Usage: Enter parameter which range from 0 to 4."<<endl;
cout<<"0:CPU, 1:NaiveGPU, 2:SharedMemory, 3:SolveConflicts, 4:Unroll"<<endl;
return 0;
}
if (type.length() > 1) {
cout<<"Usage: Enter parameter which range from 0 to 4."<<endl;
cout<<"0:CPU, 1:NaiveGPU, 2:SharedMemory, 3:SolveConflicts, 4:Unroll"<<endl;
return 0;
}
char t = type[0];
switch (t) {
case '0':
transpose0();
break;
case '1':
transpose1();
break;
case '2':
transpose2();
break;
case '3':
transpose3();
break;
case '4':
transpose4();
break;
default:
cout<<"Usage: Enter parameter which range from 0 to 4."<<endl;
cout<<"0:CPU, 1:NaiveGPU, 2:SharedMemory, 3:SolveConflicts, 4:Unroll"<<endl;
return 0;
}
return 0;
}
void transpose0() {
int values = 0;
int **source = new int*[row];
for (int i = 0; i < row; ++i) {
source[i] = new int[column];
for (int j = 0; j < column; ++j) {
source[i][j] = values;
values++;
}
}
int **dest = new int*[column];
for (int i=0; i<column; ++i) {
dest[i] = new int[row];
}
simpleTransposeMatrix(source, dest, row, column);
for (int i=0; i<column; ++i) {
for (int j=0; j<row; ++j) {
cout<<dest[i][j]<<" ";
}
cout<<endl;
}
for (int i=0; i<row; ++i) delete [] source[i];
delete [] source;
for (int i=0; i<column; ++i) delete [] dest[i];
delete [] dest;
}
void transpose1() {
transFunc fun = &naiveGPUTranspose;
testFunc(fun, false);
}
void transpose2() {
transFunc fun = &matrixTransposeShared;
testFunc(fun, false);
}
void transpose3() {
transFunc fun = &matrixTransposeSolveBankConflicts;
testFunc(fun, false);
}
void transpose4() {
transFunc fun = &matrixTransposeUnloop;
testFunc(fun, true);
}
void testFunc(transFunc fun, bool flag) {
int values = 0;
int *source, *dest;
int *d_source, *d_dest;
size_t size = row * column * sizeof(int);
source = (int *)malloc(size);
dest = (int *)malloc(size);
hipMalloc((void **)&d_source, size);
hipMalloc((void **)&d_dest, size);
for (int i = 0; i < row; ++i) {
for (int j = 0; j < column; ++j) {
source[i*column+j] = values;
values++;
}
}
hipMemcpy(d_source, source, size, hipMemcpyHostToDevice);
if (!flag) {
dim3 threadPerBlock(threadx, thready);
dim3 numBlocks(blockx, blocky);
hipLaunchKernelGGL(( fun), dim3(numBlocks), dim3(threadPerBlock), 0, 0, d_source, d_dest, row, column);
}
else {
dim3 threadPerBlock(TILE, SIDE);
dim3 numBlocks(blockx, blocky);
hipLaunchKernelGGL(( fun), dim3(numBlocks), dim3(threadPerBlock), 0, 0, d_source, d_dest, row, column);
}
hipMemcpy(dest, d_dest, size, hipMemcpyDeviceToHost);
for (int i=0; i < column; ++i) {
for (int j = 0; j < row; ++j) {
cout<<dest[i*row+j]<<' ';
}
cout<<endl;
}
hipFree(d_source);
hipFree(d_dest);
free(source);
free(dest);
}
| ae40be50f7868be2091678e74874e39795792521.cu | #include <iostream>
#include <cstring>
#include <malloc.h>
#include "config.h"
#include "simpleImp.h"
#include "NaiveGPUTranspose.h"
#include "MatrixTransposeShared.h"
#include "MatrixTransposeSolveBankConflicts.h"
#include "MatrixTransposeUnloop.h"
using namespace std;
const int blockx = BLOCK_X; const int blocky = BLOCK_Y;
const int threadx = BLOCK_SIZE; const int thready = BLOCK_SIZE;
int row = blocky * thready;
int column = blockx * threadx;
typedef void(*transFunc)(const int *, int *, const int, const int);
void transpose0();
void transpose1();
void transpose2();
void transpose3();
void transpose4();
void testFunc(transFunc fun, bool);
int main(int argc, char * argv[]) {
string type;
if (argc > 1) {
type = string(argv[1]);
} else {
cout<<"Usage: Enter parameter which range from 0 to 4."<<endl;
cout<<"0:CPU, 1:NaiveGPU, 2:SharedMemory, 3:SolveConflicts, 4:Unroll"<<endl;
return 0;
}
if (type.length() > 1) {
cout<<"Usage: Enter parameter which range from 0 to 4."<<endl;
cout<<"0:CPU, 1:NaiveGPU, 2:SharedMemory, 3:SolveConflicts, 4:Unroll"<<endl;
return 0;
}
char t = type[0];
switch (t) {
case '0':
transpose0();
break;
case '1':
transpose1();
break;
case '2':
transpose2();
break;
case '3':
transpose3();
break;
case '4':
transpose4();
break;
default:
cout<<"Usage: Enter parameter which range from 0 to 4."<<endl;
cout<<"0:CPU, 1:NaiveGPU, 2:SharedMemory, 3:SolveConflicts, 4:Unroll"<<endl;
return 0;
}
return 0;
}
void transpose0() {
int values = 0;
int **source = new int*[row];
for (int i = 0; i < row; ++i) {
source[i] = new int[column];
for (int j = 0; j < column; ++j) {
source[i][j] = values;
values++;
}
}
int **dest = new int*[column];
for (int i=0; i<column; ++i) {
dest[i] = new int[row];
}
simpleTransposeMatrix(source, dest, row, column);
for (int i=0; i<column; ++i) {
for (int j=0; j<row; ++j) {
cout<<dest[i][j]<<" ";
}
cout<<endl;
}
for (int i=0; i<row; ++i) delete [] source[i];
delete [] source;
for (int i=0; i<column; ++i) delete [] dest[i];
delete [] dest;
}
void transpose1() {
transFunc fun = &naiveGPUTranspose;
testFunc(fun, false);
}
void transpose2() {
transFunc fun = &matrixTransposeShared;
testFunc(fun, false);
}
void transpose3() {
transFunc fun = &matrixTransposeSolveBankConflicts;
testFunc(fun, false);
}
void transpose4() {
transFunc fun = &matrixTransposeUnloop;
testFunc(fun, true);
}
void testFunc(transFunc fun, bool flag) {
int values = 0;
int *source, *dest;
int *d_source, *d_dest;
size_t size = row * column * sizeof(int);
source = (int *)malloc(size);
dest = (int *)malloc(size);
cudaMalloc((void **)&d_source, size);
cudaMalloc((void **)&d_dest, size);
for (int i = 0; i < row; ++i) {
for (int j = 0; j < column; ++j) {
source[i*column+j] = values;
values++;
}
}
cudaMemcpy(d_source, source, size, cudaMemcpyHostToDevice);
if (!flag) {
dim3 threadPerBlock(threadx, thready);
dim3 numBlocks(blockx, blocky);
fun<<<numBlocks, threadPerBlock>>>(d_source, d_dest, row, column);
}
else {
dim3 threadPerBlock(TILE, SIDE);
dim3 numBlocks(blockx, blocky);
fun<<<numBlocks, threadPerBlock>>>(d_source, d_dest, row, column);
}
cudaMemcpy(dest, d_dest, size, cudaMemcpyDeviceToHost);
for (int i=0; i < column; ++i) {
for (int j = 0; j < row; ++j) {
cout<<dest[i*row+j]<<' ';
}
cout<<endl;
}
cudaFree(d_source);
cudaFree(d_dest);
free(source);
free(dest);
}
|
a94a763d6b75efd1fd8e7179b98239ae6517abd1.hip | // !!! This is a file automatically generated by hipify!!!
/*
* EDDL Library - European Distributed Deep Learning Library.
* Version: 0.6
* copyright (c) 2020, Universidad Politcnica de Valencia (UPV), PRHLT Research Centre
* Date: April 2020
* Author: PRHLT Research Centre, UPV, (rparedes@prhlt.upv.es), (jon@prhlt.upv.es)
* All rights reserved
*/
#include <string.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <hip/hip_runtime.h>
#include "eddl/hardware/gpu/gpu_kernels.h"
__global__ void abs_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=fabsf(a[thread_id_x]);
}
__global__ void acos_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=acosf(a[thread_id_x]);
}
__global__ void add_(float* a, long int size, float v){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]+=v;
}
__global__ void asin_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=asinf(a[thread_id_x]);
}
__global__ void atan_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=atanf(a[thread_id_x]);
}
__global__ void ceil_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=ceilf(a[thread_id_x]);
}
__global__ void clamp_(float* a, long int size, float min, float max){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
if (a[thread_id_x] < min){
a[thread_id_x] = min;
} else if(a[thread_id_x] > max){
a[thread_id_x] = max;
}
}
__global__ void cos_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=cosf(a[thread_id_x]);
}
__global__ void cosh_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=coshf(a[thread_id_x]);
}
__global__ void exp_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=expf(a[thread_id_x]);
}
__global__ void floor_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=floorf(a[thread_id_x]);
}
__global__ void log_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=logf(a[thread_id_x]);
}
__global__ void log2_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=log2f(a[thread_id_x]);
}
__global__ void log10_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=log10f(a[thread_id_x]);
}
__global__ void logn_(float* a, long int size, float n){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=logf(a[thread_id_x])/logf(n);
}
__global__ void mod_(float* a, long int size, float v){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=fmodf(a[thread_id_x], v);
}
__global__ void inv_(float* a, float v, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x] = v/a[thread_id_x];
}
__global__ void mult_(float* a, long int size, float v){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x] *= v;
}
__global__ void normalize_(float* a, long int size, float min_ori, float max_ori, float min, float max){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=(max-min)/(max_ori-min_ori) * (a[thread_id_x]-min_ori) + min;
}
__global__ void pow_(float* a, long int size, float exp){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=powf(a[thread_id_x], exp);
}
__global__ void powb_(float* a, long int size, float base){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=powf(base, a[thread_id_x]);
}
__global__ void reciprocal_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=1.0f/a[thread_id_x];
}
__global__ void remainder_(float* a, long int size, float v){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x] = (int)(a[thread_id_x]/v);
}
__global__ void round_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=roundf(a[thread_id_x]);
}
__global__ void rsqrt_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=1.0f/sqrtf(a[thread_id_x]);
}
__global__ void sigmoid_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x] = expf(a[thread_id_x])/(expf(a[thread_id_x])+1.0f);
}
__global__ void sign_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size){
if(a[thread_id_x] > 0.0f){
a[thread_id_x] = 1.0f;
}else if(a[thread_id_x] < 0.0f){
a[thread_id_x] = -1.0f;
}else{
a[thread_id_x] = 0.0f;
}
}
}
__global__ void sin_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=sinf(a[thread_id_x]);
}
__global__ void sinh_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=sinhf(a[thread_id_x]);
}
__global__ void sqr_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]*=a[thread_id_x];
}
__global__ void sqrt_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=sqrtf(a[thread_id_x]);
}
__global__ void tan_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=tanf(a[thread_id_x]);
}
__global__ void tanh_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=tanhf(a[thread_id_x]);
}
__global__ void trunc_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]= truncf(a[thread_id_x]);
}
///////////////////////////////////////////
__global__ void addc(float scA,float* a,float scB,float *b, float *c,long int incC, long int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size) {
if (incC) c[thread_id_x]+=scA*a[thread_id_x]+scB*b[thread_id_x];
else c[thread_id_x]=scA*a[thread_id_x]+scB*b[thread_id_x];
}
}
__global__ void el_mult(float* a, float *b, float *c, long int incC, long int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
if (incC) c[thread_id_x]+=a[thread_id_x]*b[thread_id_x];
else c[thread_id_x]=a[thread_id_x]*b[thread_id_x];
}
__global__ void el_div(float* a, float *b, float *c, long int incC, long int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
if (incC) c[thread_id_x]+=a[thread_id_x]/(b[thread_id_x]);
else c[thread_id_x]=a[thread_id_x]/(b[thread_id_x]);
}
__global__ void sum_mat_row(float* a, float* b, float* c, long int rows,long int cols)
{
long int ops=rows*cols;
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < ops)
c[thread_id_x]=a[thread_id_x]+b[thread_id_x%cols];
}
__global__ void sum_mat_col(float* a, float* b, float* c, long int rows,long int cols)
{
long int ops=rows*cols;
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < ops)
c[thread_id_x]=a[thread_id_x]+b[thread_id_x/cols];
}
__global__ void reduce_sum2D(float *a,float *b,long int rows,long int cols,long int axis)
{
long int ops=rows*cols;
long int thread_id_x = threadIdx.x+(blockDim.x*blockIdx.x);
if (thread_id_x < ops){
if (axis==0)
atomicAdd(&(b[thread_id_x%cols]),a[thread_id_x]);
else
atomicAdd(&(b[thread_id_x/cols]),a[thread_id_x]);
}
}
| a94a763d6b75efd1fd8e7179b98239ae6517abd1.cu | /*
* EDDL Library - European Distributed Deep Learning Library.
* Version: 0.6
* copyright (c) 2020, Universidad Politécnica de Valencia (UPV), PRHLT Research Centre
* Date: April 2020
* Author: PRHLT Research Centre, UPV, (rparedes@prhlt.upv.es), (jon@prhlt.upv.es)
* All rights reserved
*/
#include <string.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <cuda.h>
#include "eddl/hardware/gpu/gpu_kernels.h"
__global__ void abs_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=fabsf(a[thread_id_x]);
}
__global__ void acos_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=acosf(a[thread_id_x]);
}
__global__ void add_(float* a, long int size, float v){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]+=v;
}
__global__ void asin_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=asinf(a[thread_id_x]);
}
__global__ void atan_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=atanf(a[thread_id_x]);
}
__global__ void ceil_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=ceilf(a[thread_id_x]);
}
__global__ void clamp_(float* a, long int size, float min, float max){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
if (a[thread_id_x] < min){
a[thread_id_x] = min;
} else if(a[thread_id_x] > max){
a[thread_id_x] = max;
}
}
__global__ void cos_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=cosf(a[thread_id_x]);
}
__global__ void cosh_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=coshf(a[thread_id_x]);
}
__global__ void exp_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=expf(a[thread_id_x]);
}
__global__ void floor_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=floorf(a[thread_id_x]);
}
__global__ void log_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=logf(a[thread_id_x]);
}
__global__ void log2_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=log2f(a[thread_id_x]);
}
__global__ void log10_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=log10f(a[thread_id_x]);
}
__global__ void logn_(float* a, long int size, float n){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=logf(a[thread_id_x])/logf(n);
}
__global__ void mod_(float* a, long int size, float v){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=fmodf(a[thread_id_x], v);
}
__global__ void inv_(float* a, float v, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x] = v/a[thread_id_x];
}
__global__ void mult_(float* a, long int size, float v){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x] *= v;
}
__global__ void normalize_(float* a, long int size, float min_ori, float max_ori, float min, float max){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=(max-min)/(max_ori-min_ori) * (a[thread_id_x]-min_ori) + min;
}
__global__ void pow_(float* a, long int size, float exp){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=powf(a[thread_id_x], exp);
}
__global__ void powb_(float* a, long int size, float base){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=powf(base, a[thread_id_x]);
}
__global__ void reciprocal_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=1.0f/a[thread_id_x];
}
__global__ void remainder_(float* a, long int size, float v){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x] = (int)(a[thread_id_x]/v);
}
__global__ void round_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=roundf(a[thread_id_x]);
}
__global__ void rsqrt_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=1.0f/sqrtf(a[thread_id_x]);
}
__global__ void sigmoid_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x] = expf(a[thread_id_x])/(expf(a[thread_id_x])+1.0f);
}
__global__ void sign_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size){
if(a[thread_id_x] > 0.0f){
a[thread_id_x] = 1.0f;
}else if(a[thread_id_x] < 0.0f){
a[thread_id_x] = -1.0f;
}else{
a[thread_id_x] = 0.0f;
}
}
}
__global__ void sin_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=sinf(a[thread_id_x]);
}
__global__ void sinh_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=sinhf(a[thread_id_x]);
}
__global__ void sqr_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]*=a[thread_id_x];
}
__global__ void sqrt_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=sqrtf(a[thread_id_x]);
}
__global__ void tan_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=tanf(a[thread_id_x]);
}
__global__ void tanh_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]=tanhf(a[thread_id_x]);
}
__global__ void trunc_(float* a, long int size){
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
a[thread_id_x]= truncf(a[thread_id_x]);
}
///////////////////////////////////////////
__global__ void addc(float scA,float* a,float scB,float *b, float *c,long int incC, long int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size) {
if (incC) c[thread_id_x]+=scA*a[thread_id_x]+scB*b[thread_id_x];
else c[thread_id_x]=scA*a[thread_id_x]+scB*b[thread_id_x];
}
}
__global__ void el_mult(float* a, float *b, float *c, long int incC, long int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
if (incC) c[thread_id_x]+=a[thread_id_x]*b[thread_id_x];
else c[thread_id_x]=a[thread_id_x]*b[thread_id_x];
}
__global__ void el_div(float* a, float *b, float *c, long int incC, long int size)
{
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < size)
if (incC) c[thread_id_x]+=a[thread_id_x]/(b[thread_id_x]);
else c[thread_id_x]=a[thread_id_x]/(b[thread_id_x]);
}
__global__ void sum_mat_row(float* a, float* b, float* c, long int rows,long int cols)
{
long int ops=rows*cols;
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < ops)
c[thread_id_x]=a[thread_id_x]+b[thread_id_x%cols];
}
__global__ void sum_mat_col(float* a, float* b, float* c, long int rows,long int cols)
{
long int ops=rows*cols;
long int thread_id_x = threadIdx.x+blockIdx.x*blockDim.x;
if (thread_id_x < ops)
c[thread_id_x]=a[thread_id_x]+b[thread_id_x/cols];
}
__global__ void reduce_sum2D(float *a,float *b,long int rows,long int cols,long int axis)
{
long int ops=rows*cols;
long int thread_id_x = threadIdx.x+(blockDim.x*blockIdx.x);
if (thread_id_x < ops){
if (axis==0)
atomicAdd(&(b[thread_id_x%cols]),a[thread_id_x]);
else
atomicAdd(&(b[thread_id_x/cols]),a[thread_id_x]);
}
}
|
f62811c78829fdc8fc96050c30b6eeccf12c6c82.hip | // !!! This is a file automatically generated by hipify!!!
/* Before you use this version, double check the GPU memory capacity,
* Typically, we need GPU to able to take the size of proj_data_size*2 + volume_size*8.
* Otherwise you have to choose old versions, or try to modify this version to use CPU computing/storage as much as possible
**/
#include <cmath>
#include <fstream>
#include <time.h>
#include <iostream>
#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <assert.h>
#define Default_GPU 0
#define Number_of_Devices 2 // if it is an odd, please modify your volume and projection size
#define FBCT 0 // 0: CBCT; 1: multiple layer FBCT
#define DEBUG 1
using namespace std;
// Mode selection
const int FISTA = 1;
// -2: execution configuration test
// -1: adjoint operator check
// 0: SART + TV
// 1: SART + TV + FISTA
const float lambda_TV = 0.00f; //regularization parameter for the tv norm
const float lambda_L1 = 0.00f; //regularization parameter for the l1 norm
const int Iter_num = 1;
const float Lip_con = 32.0f;
// Distances
const float DSO = 1.0f;
const float DOD = -1.0f;
// Reconstructed volume properties
const int M = 512; // reconstruction volume x range
const int N = 512; // reconstruction volume y range
const int ZETA = 512; // reconstruction volume z range
const int NO_X = M;
const int NO_Y = N;
const int NO_Z = ZETA;
const int NO_VOXEL = M*N*ZETA;
const float volumn_x = 1e-4 ; // (in meter)
const float inv_volumn_x = 1.0/volumn_x;
const int M_Offset = 0; // for half detector use
const float boundary_voxel_x = -volumn_x*(float(M)/2.0f+M_Offset);
const float volumn_y = volumn_x ;
const float inv_volumn_y = 1.0/volumn_y;
const float boundary_voxel_y = -volumn_y*(float(N)/2.0f);
const float volumn_z = volumn_x ;
const float inv_volumn_z = 1.0/volumn_z;
const float boundary_voxel_z = -volumn_z*(float(ZETA)/2.0f);
// parameters for half detector offset
const float Offset = 0;
// Source properties
const float Source_x = DSO;
const float Source_y = Offset;
const float Source_z = 0;
// Projection properties
const int R = 1024; // detector width
const int Z_prj = 1024; // detector height
// Note: for FBCT, Z_prj = ZETA
const float Detector_pixel_x = 1.2e-4;
const float Detector_Ymin = -Detector_pixel_x*(float(R)/2.0f - 0.5f) + Offset;
const float Detector_Zmin = -Detector_pixel_x*(float(Z_prj)/2.0f - 0.5f);
const float PI = 3.141592653589793f;
// acquisition parameters
const int Nviews = 220;
const float us_rate = 1.00f;
const float initialAngle= 0.00f ;
const float shiftAngle= 0.0f;
const float MAX_infi = 1e16;
const int DenoiseOption = 4;
#include "InitGPU.h"
#include "kernel_tool_functions.cu"
#include "host_tool_functions.cu"
/* If you want to use the code in which the backprojection is implemented in pixel-driven,
* please uncomment the follwing two files and comment out the counterparts
*/
// #include "pixel_driven_backprj/kernel_IterativeRecon_CBCT.cu"
// #include "pixel_driven_backprj/host_IterativeRecon_CBCT.c"
// #if FBCT==1
// #include "kernel_IterativeRecon_FBCT.cu"
// #else
// #include "kernel_IterativeRecon_CBCT.cu"
// #endif
#include "kernel_IterativeRecon_universal.cu" //This version intergrate both CBCT and FBCT;
#include "kernel_IterativeRecon_universal_multiGPU_v2.cu" // Always be inlcuded
// #include "host_IterativeRecon_CBCT.c"
#include "host_IterativeRecon_CBCT_multiGPU_v2.c"
#include "host_FGP_Denoise_CPU.h"
#include "kernel_FGP_Denoise_GPUx4.cu"
#include "host_FPG_Denoise_GPUx4.c"
#include "kernel_FGP_Denoise_GPUx7.cu"
#include "host_FGP_Denoise_GPUx7.cu"
main(int argc, char ** argv)
{
// print CUDA information
if (!InitCUDA())
{
return 0;
}
/* ************* User defined parameters ************/
char directory[]="/home/huifeng/CUDA_multiGPU/CBCT/";
char objectName[]="SLPhantom2";
char outputFolder[]="/Recon_Phantom_512/";
int Niter_denoise = 20; //iter number for denoising problem
/*********** other declared variables ************/
float step_size = 2.0f/Lip_con;
float lambda_denoise_TV = 2.0f*lambda_TV/Lip_con;
double data_fidelity = 0.0f;
double tv_value = 0.0f;
double object_function_value_xk;
double *object_function_array = new double [Iter_num*3];
bzero(object_function_array, sizeof(double)*Iter_num*3);
float t_k;
float t_k_1=1.0f;
FILE *fp;
char filename[200];
char fn[200];
float endAngle = initialAngle + (Nviews - 1)*us_rate;
/**************** CPU memory allocation *****************/
// for 3D reconstructed volume
float *F_Y_k = new float [M*N*ZETA]; // Y(k)
bzero(F_Y_k, sizeof(float)*M*N*ZETA);
float *F_X_k_1 = new float [M*N*ZETA]; // X(k-1)
bzero(F_X_k_1, sizeof(float)*M*N*ZETA);
float *F_recon;
checkCuda( hipHostMalloc((void**)&F_recon, sizeof(float)*M*N*ZETA, hipHostMallocDefault) ); // host pinned memory
// for 2D projection dataset
float *h_proj_forward = new float [R*Z_prj*Nviews];
bzero(h_proj_forward, sizeof(float)*R*Z_prj*Nviews);
float *h_proj_measured = new float [R*Z_prj*Nviews];
bzero(h_proj_measured, sizeof(float)*R*Z_prj*Nviews);
/**************** GPU memory allocation *****************/
size_t d_proj_data_size = sizeof(float)*R*Z_prj*Nviews;
size_t d_volume_size = sizeof(float)*M*N*ZETA;
// allocate GPU memory for the whole measurement data
float *d_proj_data = NULL;
hipMalloc((void**)&d_proj_data, d_proj_data_size);
hipMemcpy(d_proj_data, h_proj_measured, d_proj_data_size, hipMemcpyHostToDevice);
// allocate GPU memory for the recon volume
float *d_recon = NULL;
hipMalloc((void**)&d_recon, d_volume_size);
hipMemset(d_recon, 0, d_volume_size);
/********** Read Projections **************/
// printf("Read projection files ...\n");
//
// for (int j=0;j<Nviews;j++)
// {
// fileAngle = float(j*us_rate + initialAngle);
// if ((CT_TOMO == 1) && (j>=(Nviews/2)))
// {
// fileAngle = 180+ (j-Nviews/2)*us_rate + initialAngle;
// }
// if (fileAngle < 0)
// fileAngle = fileAngle + 360;
//
// strcpy(filename,directory);
// sprintf(fn,"/AnalyticalForwardProjection/CBCT_spheres_Projections/phi_%.02f.proj", fileAngle);
// strcat(filename,fn);
// cout<<fn<<endl;
// if ( (fp = fopen(filename,"rb")) == NULL )
// {
// printf("can not open projection files for main function \n");
// printf("%s\n",filename);
// exit(0);
// }
// // fseek(fp,sizeof(float)*R*(int(2048/2-Z_prj/2)),0); // If you want to read part of the projections
// fread(h_proj_measured + j*Z_prj*R, sizeof(float)*Z_prj*R,1,fp);
// fclose(fp);
// }
/********** Inverse Crime study **************/
// load volumetric image
strcpy(filename,directory);
sprintf(fn,"SLphantom3d_512.dat");
strcat(filename,fn);
cout<<"Loading "<<fn<<endl;
if ( (fp = fopen(filename,"rb")) == NULL )
{
printf("Can not load volumetric image \n");
printf("%s\n",filename);
goto endProgram;
}
fread(F_recon, sizeof(float)*M*N*ZETA,1,fp);
fclose(fp);
cout<<"Load Phantom Sucessfully!"<<endl;
Forward_3D_ray_driven_siddon(F_recon, d_proj_data);
// SaveDeviceDataToFile(d_proj_data,R*Z_prj*Nviews,"../GeneratedProjection.dat");
/********** Load initial guess **************/
// strcpy(filename,directory);
// sprintf(fn,"ReconTemp.recon");
// strcat(filename,fn);
// cout<<"Loading "<<fn<<endl;
// if ( (fp = fopen(filename,"rb")) == NULL )
// {
// printf("Can not load volumetric image \n");
// printf("%s\n",filename);
// exit(0);
// }
// fread(F_recon, sizeof(float)*M*N*ZETA,1,fp);
// fclose(fp);
// cout<<"Load Initial Guess Sucessfully!"<<endl;
/****************Iteration Reconstruction**************************/
//Set Timer 1
struct timeval t1,t2;
gettimeofday(&t1,NULL);
for (int k=1;k<=Iter_num;k++)
{
// if (FISTA==-2) // "occupancy calculator", check the best execution configuration. Refer to the program guide
// {
// int numBlocks; // Occupancy in terms of active blocks
// int blockSize = 128;
// int activeWarps;
// int maxWarps;
//
// hipDeviceProp_t prop;
// hipGetDeviceProperties(&prop, Default_GPU);
//
// hipOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks,backprj_ray_driven_3d_kernel,blockSize,0);
// activeWarps = numBlocks * blockSize / prop.warpSize;
// maxWarps = prop.maxThreadsPerMultiProcessor / prop.warpSize;
// std::cout << "Occupancy: " << (double)activeWarps / maxWarps * 100 << "%" << std::endl;
// std::cout << "MaxActiveBlocksPerMultiprocessor: " << numBlocks << std::endl;
// goto endProgram;
// }
if (FISTA==-1)
{
/*Note: You need to first uncomment the phantom loading code to initialize a valid F_recon*/
CheckMatchedJointOperator(F_recon);
goto endProgram;
}
if (FISTA==0)
{
printf("Undergoing SART updating... relaxation = %f\n", step_size);
Reconstruction_3D_ray_driven_CBCT(F_recon, d_proj_data, step_size);
if (lambda_TV>0.0f)
{
printf("Undergoing TV regularization ...\n");
hipMemcpy(d_recon, F_recon, d_volume_size, hipMemcpyHostToDevice);
switch(DenoiseOption) // Denoise options
{
case 1 : FGP_denoise_GPUx7_exact(d_recon, lambda_denoise_TV, Niter_denoise);
break; // require x7 volume size, fast
case 2 : FGP_denoise_GPUx4_exact(d_recon, lambda_denoise_TV, Niter_denoise);
break; // require x4 volume size, slowest
case 3 : FGP_denoise_GPUx4_apprx(d_recon, lambda_denoise_TV, Niter_denoise);
break; // require x4 volume size, slower
case 4 : GP_denoise_GPUx4_fast(d_recon, lambda_denoise_TV, Niter_denoise);
break; // require x4 volume size, fast, slow in convergence
}
hipMemcpy(F_recon, d_recon, d_volume_size, hipMemcpyDeviceToHost);
std::cout<<"TV regularization finished.\n";
}
else
hipMemcpyAsync(d_recon, F_recon, d_volume_size, hipMemcpyHostToDevice);
}
if (FISTA==1)
{
printf("Undergoing SART updating... relaxation = %f\n", step_size);
memcpy(F_recon, F_Y_k, d_volume_size);
Reconstruction_3D_ray_driven_CBCT(F_recon, d_proj_data, step_size);
if (lambda_TV>0.0f)
{
hipMemcpy(d_recon, F_recon, d_volume_size, hipMemcpyHostToDevice);
printf("Undergoing TV regularization ...\n");
switch(DenoiseOption) // Denoise options
{
case 1 : FGP_denoise_GPUx7_exact(d_recon, lambda_denoise_TV, Niter_denoise);
break; // require x7 volume size, fast
case 2 : FGP_denoise_GPUx4_exact(d_recon, lambda_denoise_TV, Niter_denoise);
break; // require x4 volume size, slowest
case 3 : FGP_denoise_GPUx4_apprx(d_recon, lambda_denoise_TV, Niter_denoise);
break; // require x4 volume size, slower
case 4 : GP_denoise_GPUx4_fast(d_recon, lambda_denoise_TV, Niter_denoise);
break; // require x4 volume size, fast, slow in convergence
}
hipMemcpy(F_recon, d_recon, d_volume_size, hipMemcpyDeviceToHost);
std::cout<<"TV regularization finished.\n";
}
else
hipMemcpyAsync(d_recon, F_recon, d_volume_size, hipMemcpyHostToDevice);
t_k = (1.0f + sqrt(1.0f + 4.0f*t_k_1*t_k_1) )/2.0f;
// Note: t(k) = [1+sqrt(1+4*t(k-1)^2)]/2
for (int i=0;i<NO_VOXEL;i++)
F_Y_k[i] = F_recon[i] + (t_k_1 -1.0f)/t_k * (F_recon[i] - F_X_k_1[i]);
// Note: y(k) = x(k) + [t(k-1) -1]/t(k) * [x(k)-x(k-1)]
t_k_1 = t_k;
// Note: Update t(k-1): t(k-1) <- t(k)
memcpy(F_X_k_1,F_recon,sizeof(float)*M*N*ZETA);
// Note: Update x(k-1): x(k-1) <- x(k)
}
/*****************Calculating Obj Func Value ********************/
std::cout<<"Calculating Object Func Value ...\n";
//Note: object function value || Ax - b ||_2 + 2*lambda_TV*||f||_tvnorm + lambda_L1*||\phi f ||_L1 ;
/*** data fidelity ****/
std::cout<<" - calculating data fidelity ... \n";
float *d_proj_forward = NULL;
hipMalloc((void**)&d_proj_forward, d_proj_data_size);
hipMemset(d_proj_forward, 0, d_proj_data_size);
Forward_3D_ray_driven_siddon(F_recon, d_proj_forward);
data_fidelity = L2_norm_gpu(d_proj_forward, d_proj_data);
std::cout<<" * L2 Norm="<<data_fidelity<<endl;
hipFree(d_proj_forward);
/*** TV norm ****/
std::cout<<" - calculating TV norm ... \n";
tv_value = TV_norm_gpu(d_recon);
std::cout<<" * TV value="<<tv_value<<endl;
/***** obj function ******/
object_function_value_xk = data_fidelity + 2.0f*lambda_TV*tv_value;
//Note: object_function_value_xk = data_fidelity + 2.0f*lambda_TV*tv_value + 1.0f*lambda_L1*l1_value;
object_function_array[k*3-3] = tv_value;
object_function_array[k*3-2] = data_fidelity;
object_function_array[k*3-1] = object_function_value_xk;
std::cout<<"Object function value for x(k) = "<< tv_value <<" + "<< data_fidelity <<" = "<<object_function_value_xk <<std::endl;
/***************** Saving ********************/
strcpy(filename,directory);
sprintf(fn,"%s/%s_%d_%d_%d_%.0fum_new_view_%d_(%.0f,%.0f)_TV_%.2f_L1_%.2f_Lip_%.2f.recon",outputFolder, objectName, M,N,ZETA, volumn_x*1000000, Nviews, initialAngle, endAngle, lambda_TV, lambda_L1, Lip_con);
strcat(filename,fn);
if ( (fp = fopen(filename,"wb")) == NULL )
{
printf("can not open file to write the intermediate reconstructed image \n");
printf("%s\n",filename);
exit(0);
}
fwrite(F_recon,sizeof(float)*M*N*ZETA,1,fp);
fclose(fp);
strcpy(filename,directory);
sprintf(fn,"%s/object_func_%s_view_%d_(%.0f,%.0f)_TV_%.2f_Lip_%.2f.bin",outputFolder, objectName, Nviews, initialAngle, endAngle, lambda_TV, Lip_con);
strcat(filename,fn);
if ( (fp = fopen(filename,"wb")) == NULL )
{
printf("can not open file to write the tv_value_file \n");
printf("%s\n",filename);
exit(0);
}
fwrite(object_function_array,sizeof(double)*k*3,1,fp);
fclose(fp);
if (k%5==1)
{
strcpy(filename,directory);
sprintf(fn,"%s/%s_%d_%d_%d_%.0fum_iterative_%d_view_%d_(%.0f,%.0f)_TV_%.2f_L1_%.2f_Lip_%.2f.recon",outputFolder, objectName, M,N,ZETA, volumn_x*1000000, k, Nviews, initialAngle, endAngle, lambda_TV, lambda_L1, Lip_con);
strcat(filename,fn);
if ( (fp = fopen(filename,"wb")) == NULL )
{
printf("can not open file to write the reconstructed image \n");
printf("%s\n",filename);
exit(0);
}
fwrite(F_recon,sizeof(float)*M*N*ZETA,1,fp);
fclose(fp);
}
// Note: F[i,j,k] = F [k*M*N+j*M+i]; i:row index; j:column index; k:layer index
std::cout<<"Have done "<< k <<" iteration(s)"<<std::endl<<endl;
}
// End timer
gettimeofday(&t2,NULL);
printf("Whole computing (gettimeofday): %f (s)\n\n\n", (t2.tv_sec-t1.tv_sec + (t2.tv_usec-t1.tv_usec)/1000000.0));
endProgram: ;
hipFree(d_proj_data);
hipFree(d_recon);
hipHostFree(F_recon);
delete []F_Y_k;
delete []F_X_k_1;
delete []h_proj_forward;
delete []h_proj_measured;
delete []object_function_array;
return 0;
}
| f62811c78829fdc8fc96050c30b6eeccf12c6c82.cu | /* Before you use this version, double check the GPU memory capacity,
* Typically, we need GPU to able to take the size of proj_data_size*2 + volume_size*8.
* Otherwise you have to choose old versions, or try to modify this version to use CPU computing/storage as much as possible
**/
#include <cmath>
#include <fstream>
#include <time.h>
#include <iostream>
#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <assert.h>
#define Default_GPU 0
#define Number_of_Devices 2 // if it is an odd, please modify your volume and projection size
#define FBCT 0 // 0: CBCT; 1: multiple layer FBCT
#define DEBUG 1
using namespace std;
// Mode selection
const int FISTA = 1;
// -2: execution configuration test
// -1: adjoint operator check
// 0: SART + TV
// 1: SART + TV + FISTA
const float lambda_TV = 0.00f; //regularization parameter for the tv norm
const float lambda_L1 = 0.00f; //regularization parameter for the l1 norm
const int Iter_num = 1;
const float Lip_con = 32.0f;
// Distances
const float DSO = 1.0f;
const float DOD = -1.0f;
// Reconstructed volume properties
const int M = 512; // reconstruction volume x range
const int N = 512; // reconstruction volume y range
const int ZETA = 512; // reconstruction volume z range
const int NO_X = M;
const int NO_Y = N;
const int NO_Z = ZETA;
const int NO_VOXEL = M*N*ZETA;
const float volumn_x = 1e-4 ; // (in meter)
const float inv_volumn_x = 1.0/volumn_x;
const int M_Offset = 0; // for half detector use
const float boundary_voxel_x = -volumn_x*(float(M)/2.0f+M_Offset);
const float volumn_y = volumn_x ;
const float inv_volumn_y = 1.0/volumn_y;
const float boundary_voxel_y = -volumn_y*(float(N)/2.0f);
const float volumn_z = volumn_x ;
const float inv_volumn_z = 1.0/volumn_z;
const float boundary_voxel_z = -volumn_z*(float(ZETA)/2.0f);
// parameters for half detector offset
const float Offset = 0;
// Source properties
const float Source_x = DSO;
const float Source_y = Offset;
const float Source_z = 0;
// Projection properties
const int R = 1024; // detector width
const int Z_prj = 1024; // detector height
// Note: for FBCT, Z_prj = ZETA
const float Detector_pixel_x = 1.2e-4;
const float Detector_Ymin = -Detector_pixel_x*(float(R)/2.0f - 0.5f) + Offset;
const float Detector_Zmin = -Detector_pixel_x*(float(Z_prj)/2.0f - 0.5f);
const float PI = 3.141592653589793f;
// acquisition parameters
const int Nviews = 220;
const float us_rate = 1.00f;
const float initialAngle= 0.00f ;
const float shiftAngle= 0.0f;
const float MAX_infi = 1e16;
const int DenoiseOption = 4;
#include "InitGPU.h"
#include "kernel_tool_functions.cu"
#include "host_tool_functions.cu"
/* If you want to use the code in which the backprojection is implemented in pixel-driven,
* please uncomment the follwing two files and comment out the counterparts
*/
// #include "pixel_driven_backprj/kernel_IterativeRecon_CBCT.cu"
// #include "pixel_driven_backprj/host_IterativeRecon_CBCT.c"
// #if FBCT==1
// #include "kernel_IterativeRecon_FBCT.cu"
// #else
// #include "kernel_IterativeRecon_CBCT.cu"
// #endif
#include "kernel_IterativeRecon_universal.cu" //This version intergrate both CBCT and FBCT;
#include "kernel_IterativeRecon_universal_multiGPU_v2.cu" // Always be inlcuded
// #include "host_IterativeRecon_CBCT.c"
#include "host_IterativeRecon_CBCT_multiGPU_v2.c"
#include "host_FGP_Denoise_CPU.h"
#include "kernel_FGP_Denoise_GPUx4.cu"
#include "host_FPG_Denoise_GPUx4.c"
#include "kernel_FGP_Denoise_GPUx7.cu"
#include "host_FGP_Denoise_GPUx7.cu"
main(int argc, char ** argv)
{
// print CUDA information
if (!InitCUDA())
{
return 0;
}
/* ************* User defined parameters ************/
char directory[]="/home/huifeng/CUDA_multiGPU/CBCT/";
char objectName[]="SLPhantom2";
char outputFolder[]="/Recon_Phantom_512/";
int Niter_denoise = 20; //iter number for denoising problem
/*********** other declared variables ************/
float step_size = 2.0f/Lip_con;
float lambda_denoise_TV = 2.0f*lambda_TV/Lip_con;
double data_fidelity = 0.0f;
double tv_value = 0.0f;
double object_function_value_xk;
double *object_function_array = new double [Iter_num*3];
bzero(object_function_array, sizeof(double)*Iter_num*3);
float t_k;
float t_k_1=1.0f;
FILE *fp;
char filename[200];
char fn[200];
float endAngle = initialAngle + (Nviews - 1)*us_rate;
/**************** CPU memory allocation *****************/
// for 3D reconstructed volume
float *F_Y_k = new float [M*N*ZETA]; // Y(k)
bzero(F_Y_k, sizeof(float)*M*N*ZETA);
float *F_X_k_1 = new float [M*N*ZETA]; // X(k-1)
bzero(F_X_k_1, sizeof(float)*M*N*ZETA);
float *F_recon;
checkCuda( cudaHostAlloc((void**)&F_recon, sizeof(float)*M*N*ZETA, cudaHostAllocDefault) ); // host pinned memory
// for 2D projection dataset
float *h_proj_forward = new float [R*Z_prj*Nviews];
bzero(h_proj_forward, sizeof(float)*R*Z_prj*Nviews);
float *h_proj_measured = new float [R*Z_prj*Nviews];
bzero(h_proj_measured, sizeof(float)*R*Z_prj*Nviews);
/**************** GPU memory allocation *****************/
size_t d_proj_data_size = sizeof(float)*R*Z_prj*Nviews;
size_t d_volume_size = sizeof(float)*M*N*ZETA;
// allocate GPU memory for the whole measurement data
float *d_proj_data = NULL;
cudaMalloc((void**)&d_proj_data, d_proj_data_size);
cudaMemcpy(d_proj_data, h_proj_measured, d_proj_data_size, cudaMemcpyHostToDevice);
// allocate GPU memory for the recon volume
float *d_recon = NULL;
cudaMalloc((void**)&d_recon, d_volume_size);
cudaMemset(d_recon, 0, d_volume_size);
/********** Read Projections **************/
// printf("Read projection files ...\n");
//
// for (int j=0;j<Nviews;j++)
// {
// fileAngle = float(j*us_rate + initialAngle);
// if ((CT_TOMO == 1) && (j>=(Nviews/2)))
// {
// fileAngle = 180+ (j-Nviews/2)*us_rate + initialAngle;
// }
// if (fileAngle < 0)
// fileAngle = fileAngle + 360;
//
// strcpy(filename,directory);
// sprintf(fn,"/AnalyticalForwardProjection/CBCT_spheres_Projections/phi_%.02f.proj", fileAngle);
// strcat(filename,fn);
// cout<<fn<<endl;
// if ( (fp = fopen(filename,"rb")) == NULL )
// {
// printf("can not open projection files for main function \n");
// printf("%s\n",filename);
// exit(0);
// }
// // fseek(fp,sizeof(float)*R*(int(2048/2-Z_prj/2)),0); // If you want to read part of the projections
// fread(h_proj_measured + j*Z_prj*R, sizeof(float)*Z_prj*R,1,fp);
// fclose(fp);
// }
/********** Inverse Crime study **************/
// load volumetric image
strcpy(filename,directory);
sprintf(fn,"SLphantom3d_512.dat");
strcat(filename,fn);
cout<<"Loading "<<fn<<endl;
if ( (fp = fopen(filename,"rb")) == NULL )
{
printf("Can not load volumetric image \n");
printf("%s\n",filename);
goto endProgram;
}
fread(F_recon, sizeof(float)*M*N*ZETA,1,fp);
fclose(fp);
cout<<"Load Phantom Sucessfully!"<<endl;
Forward_3D_ray_driven_siddon(F_recon, d_proj_data);
// SaveDeviceDataToFile(d_proj_data,R*Z_prj*Nviews,"../GeneratedProjection.dat");
/********** Load initial guess **************/
// strcpy(filename,directory);
// sprintf(fn,"ReconTemp.recon");
// strcat(filename,fn);
// cout<<"Loading "<<fn<<endl;
// if ( (fp = fopen(filename,"rb")) == NULL )
// {
// printf("Can not load volumetric image \n");
// printf("%s\n",filename);
// exit(0);
// }
// fread(F_recon, sizeof(float)*M*N*ZETA,1,fp);
// fclose(fp);
// cout<<"Load Initial Guess Sucessfully!"<<endl;
/****************Iteration Reconstruction**************************/
//Set Timer 1
struct timeval t1,t2;
gettimeofday(&t1,NULL);
for (int k=1;k<=Iter_num;k++)
{
// if (FISTA==-2) // "occupancy calculator", check the best execution configuration. Refer to the program guide
// {
// int numBlocks; // Occupancy in terms of active blocks
// int blockSize = 128;
// int activeWarps;
// int maxWarps;
//
// cudaDeviceProp prop;
// cudaGetDeviceProperties(&prop, Default_GPU);
//
// cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks,backprj_ray_driven_3d_kernel,blockSize,0);
// activeWarps = numBlocks * blockSize / prop.warpSize;
// maxWarps = prop.maxThreadsPerMultiProcessor / prop.warpSize;
// std::cout << "Occupancy: " << (double)activeWarps / maxWarps * 100 << "%" << std::endl;
// std::cout << "MaxActiveBlocksPerMultiprocessor: " << numBlocks << std::endl;
// goto endProgram;
// }
if (FISTA==-1)
{
/*Note: You need to first uncomment the phantom loading code to initialize a valid F_recon*/
CheckMatchedJointOperator(F_recon);
goto endProgram;
}
if (FISTA==0)
{
printf("Undergoing SART updating... relaxation = %f\n", step_size);
Reconstruction_3D_ray_driven_CBCT(F_recon, d_proj_data, step_size);
if (lambda_TV>0.0f)
{
printf("Undergoing TV regularization ...\n");
cudaMemcpy(d_recon, F_recon, d_volume_size, cudaMemcpyHostToDevice);
switch(DenoiseOption) // Denoise options
{
case 1 : FGP_denoise_GPUx7_exact(d_recon, lambda_denoise_TV, Niter_denoise);
break; // require x7 volume size, fast
case 2 : FGP_denoise_GPUx4_exact(d_recon, lambda_denoise_TV, Niter_denoise);
break; // require x4 volume size, slowest
case 3 : FGP_denoise_GPUx4_apprx(d_recon, lambda_denoise_TV, Niter_denoise);
break; // require x4 volume size, slower
case 4 : GP_denoise_GPUx4_fast(d_recon, lambda_denoise_TV, Niter_denoise);
break; // require x4 volume size, fast, slow in convergence
}
cudaMemcpy(F_recon, d_recon, d_volume_size, cudaMemcpyDeviceToHost);
std::cout<<"TV regularization finished.\n";
}
else
cudaMemcpyAsync(d_recon, F_recon, d_volume_size, cudaMemcpyHostToDevice);
}
if (FISTA==1)
{
printf("Undergoing SART updating... relaxation = %f\n", step_size);
memcpy(F_recon, F_Y_k, d_volume_size);
Reconstruction_3D_ray_driven_CBCT(F_recon, d_proj_data, step_size);
if (lambda_TV>0.0f)
{
cudaMemcpy(d_recon, F_recon, d_volume_size, cudaMemcpyHostToDevice);
printf("Undergoing TV regularization ...\n");
switch(DenoiseOption) // Denoise options
{
case 1 : FGP_denoise_GPUx7_exact(d_recon, lambda_denoise_TV, Niter_denoise);
break; // require x7 volume size, fast
case 2 : FGP_denoise_GPUx4_exact(d_recon, lambda_denoise_TV, Niter_denoise);
break; // require x4 volume size, slowest
case 3 : FGP_denoise_GPUx4_apprx(d_recon, lambda_denoise_TV, Niter_denoise);
break; // require x4 volume size, slower
case 4 : GP_denoise_GPUx4_fast(d_recon, lambda_denoise_TV, Niter_denoise);
break; // require x4 volume size, fast, slow in convergence
}
cudaMemcpy(F_recon, d_recon, d_volume_size, cudaMemcpyDeviceToHost);
std::cout<<"TV regularization finished.\n";
}
else
cudaMemcpyAsync(d_recon, F_recon, d_volume_size, cudaMemcpyHostToDevice);
t_k = (1.0f + sqrt(1.0f + 4.0f*t_k_1*t_k_1) )/2.0f;
// Note: t(k) = [1+sqrt(1+4*t(k-1)^2)]/2
for (int i=0;i<NO_VOXEL;i++)
F_Y_k[i] = F_recon[i] + (t_k_1 -1.0f)/t_k * (F_recon[i] - F_X_k_1[i]);
// Note: y(k) = x(k) + [t(k-1) -1]/t(k) * [x(k)-x(k-1)]
t_k_1 = t_k;
// Note: Update t(k-1): t(k-1) <- t(k)
memcpy(F_X_k_1,F_recon,sizeof(float)*M*N*ZETA);
// Note: Update x(k-1): x(k-1) <- x(k)
}
/*****************Calculating Obj Func Value ********************/
std::cout<<"Calculating Object Func Value ...\n";
//Note: object function value || Ax - b ||_2 + 2*lambda_TV*||f||_tvnorm + lambda_L1*||\phi f ||_L1 ;
/*** data fidelity ****/
std::cout<<" - calculating data fidelity ... \n";
float *d_proj_forward = NULL;
cudaMalloc((void**)&d_proj_forward, d_proj_data_size);
cudaMemset(d_proj_forward, 0, d_proj_data_size);
Forward_3D_ray_driven_siddon(F_recon, d_proj_forward);
data_fidelity = L2_norm_gpu(d_proj_forward, d_proj_data);
std::cout<<" * L2 Norm="<<data_fidelity<<endl;
cudaFree(d_proj_forward);
/*** TV norm ****/
std::cout<<" - calculating TV norm ... \n";
tv_value = TV_norm_gpu(d_recon);
std::cout<<" * TV value="<<tv_value<<endl;
/***** obj function ******/
object_function_value_xk = data_fidelity + 2.0f*lambda_TV*tv_value;
//Note: object_function_value_xk = data_fidelity + 2.0f*lambda_TV*tv_value + 1.0f*lambda_L1*l1_value;
object_function_array[k*3-3] = tv_value;
object_function_array[k*3-2] = data_fidelity;
object_function_array[k*3-1] = object_function_value_xk;
std::cout<<"Object function value for x(k) = "<< tv_value <<" + "<< data_fidelity <<" = "<<object_function_value_xk <<std::endl;
/***************** Saving ********************/
strcpy(filename,directory);
sprintf(fn,"%s/%s_%d_%d_%d_%.0fum_new_view_%d_(%.0f,%.0f)_TV_%.2f_L1_%.2f_Lip_%.2f.recon",outputFolder, objectName, M,N,ZETA, volumn_x*1000000, Nviews, initialAngle, endAngle, lambda_TV, lambda_L1, Lip_con);
strcat(filename,fn);
if ( (fp = fopen(filename,"wb")) == NULL )
{
printf("can not open file to write the intermediate reconstructed image \n");
printf("%s\n",filename);
exit(0);
}
fwrite(F_recon,sizeof(float)*M*N*ZETA,1,fp);
fclose(fp);
strcpy(filename,directory);
sprintf(fn,"%s/object_func_%s_view_%d_(%.0f,%.0f)_TV_%.2f_Lip_%.2f.bin",outputFolder, objectName, Nviews, initialAngle, endAngle, lambda_TV, Lip_con);
strcat(filename,fn);
if ( (fp = fopen(filename,"wb")) == NULL )
{
printf("can not open file to write the tv_value_file \n");
printf("%s\n",filename);
exit(0);
}
fwrite(object_function_array,sizeof(double)*k*3,1,fp);
fclose(fp);
if (k%5==1)
{
strcpy(filename,directory);
sprintf(fn,"%s/%s_%d_%d_%d_%.0fum_iterative_%d_view_%d_(%.0f,%.0f)_TV_%.2f_L1_%.2f_Lip_%.2f.recon",outputFolder, objectName, M,N,ZETA, volumn_x*1000000, k, Nviews, initialAngle, endAngle, lambda_TV, lambda_L1, Lip_con);
strcat(filename,fn);
if ( (fp = fopen(filename,"wb")) == NULL )
{
printf("can not open file to write the reconstructed image \n");
printf("%s\n",filename);
exit(0);
}
fwrite(F_recon,sizeof(float)*M*N*ZETA,1,fp);
fclose(fp);
}
// Note: F[i,j,k] = F [k*M*N+j*M+i]; i:row index; j:column index; k:layer index
std::cout<<"Have done "<< k <<" iteration(s)"<<std::endl<<endl;
}
// End timer
gettimeofday(&t2,NULL);
printf("Whole computing (gettimeofday): %f (s)\n\n\n", (t2.tv_sec-t1.tv_sec + (t2.tv_usec-t1.tv_usec)/1000000.0));
endProgram: ;
cudaFree(d_proj_data);
cudaFree(d_recon);
cudaFreeHost(F_recon);
delete []F_Y_k;
delete []F_X_k_1;
delete []h_proj_forward;
delete []h_proj_measured;
delete []object_function_array;
return 0;
}
|
f3ef940dd37ebbf3d67dc8441f352dca470a5e26.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* gradientclass.cpp
*
* Created on: Mar 4, 2014
* Author: p054
*/
#include "gradientclass.h"
gradient_class::gradient_class(float *imgIn, float *imgOut, int w, int h,
int nc, int nc_out) {
// TODO Auto-generated constructor stub
hipMalloc(&dev_imgIn, (size_t) w * h * nc);
hipMalloc(&dev_imgOut, (size_t) w * h * nc_out);
hipMalloc(&dx_imgIn, (size_t) w * h * nc);
hipMalloc(&dy_imgIn, (size_t) w * h * nc);
this->w = w;
this->h = h;
this->nc = nc;
this->nc_out = nc_out;
hipMemcpy(dev_imgIn, imgIn, (size_t) w * h * nc, hipMemcpyHostToDevice);
}
gradient_class::~gradient_class() {
// TODO Auto-generated destructor stub
hipFree(dev_imgIn);
hipFree(dev_imgOut);
hipFree(dx_imgIn);
hipFree(dy_imgIn);
}
void gradient_class::create_thread_env(int x, int y, int z) {
block = dim3(x, y, z);
grid = dim3((w + block.x - 1) / block.x, (h + block.y - 1) / block.y, 1);
}
void gradient_class::get_gradient_norm( float * img_Out) {
hipLaunchKernelGGL(( get_norm), dim3(grid), dim3(block), 0, 0, dev_imgIn,dev_imgOut, dx_imgIn, dy_imgIn, w,h,nc);
hipMemcpy(img_Out, dev_imgOut, (size_t) w*h*nc_out, hipMemcpyDeviceToHost);
}
__global__ friend void get_norm(float *dev_imgIn, float *dev_imgOut,
float *dx_imgIn, float *dy_imgIn, int w, int h, int nc) {
// COnsidering 2D block and grid
int base_index = (gridDim.x * blockIdx.y * blockDim) + blockIdx.x * blockDim
+ threadIdx.y * blockDim.x + threadIdx.x;
dev_imgOut[base_index] = 0;
int current_index;
if (base_index < (w * h)) {
for (int i = 0; i < nc; i++) {
current_index = base_index + (size_t) w * h * i;
if ((blockIdx.x == gridDim.x - 1) && (threadIdx.x == blockDim.x - 1)) {
// Making it cyclic
dx_imgIn[current_index] = dev_imgIn[current_index - w + 1]
- dev_imgIn[current_index];
} else {
dx_imgIn[current_index] = dev_imgIn[current_index + 1]
- dev_imgIn[current_index];
}
if ((blockIdx.y == gridDim.y - 1) && (threadIdx.y == blockDim.y - 1)) {
dy_imgIn[current_index] = dev_imgIn[current_index - (h - 1) * w]
- dev_imgIn[current_index];
} else {
dy_imgIn[current_index] = dev_imgIn[current_index + w]
- dev_imgIn[current_index];
}
dev_imgOut[base_index] += ((dx_imgIn[current_index]
* dx_imgIn[current_index])
+ (dy_imgIn[current_index] * dy_imgIn[current_index]));
}
}
}
| f3ef940dd37ebbf3d67dc8441f352dca470a5e26.cu | /*
* gradientclass.cpp
*
* Created on: Mar 4, 2014
* Author: p054
*/
#include "gradientclass.h"
gradient_class::gradient_class(float *imgIn, float *imgOut, int w, int h,
int nc, int nc_out) {
// TODO Auto-generated constructor stub
cudaMalloc(&dev_imgIn, (size_t) w * h * nc);
cudaMalloc(&dev_imgOut, (size_t) w * h * nc_out);
cudaMalloc(&dx_imgIn, (size_t) w * h * nc);
cudaMalloc(&dy_imgIn, (size_t) w * h * nc);
this->w = w;
this->h = h;
this->nc = nc;
this->nc_out = nc_out;
cudaMemcpy(dev_imgIn, imgIn, (size_t) w * h * nc, cudaMemcpyHostToDevice);
}
gradient_class::~gradient_class() {
// TODO Auto-generated destructor stub
cudaFree(dev_imgIn);
cudaFree(dev_imgOut);
cudaFree(dx_imgIn);
cudaFree(dy_imgIn);
}
void gradient_class::create_thread_env(int x, int y, int z) {
block = dim3(x, y, z);
grid = dim3((w + block.x - 1) / block.x, (h + block.y - 1) / block.y, 1);
}
void gradient_class::get_gradient_norm( float * img_Out) {
get_norm<<<grid, block>>>(dev_imgIn,dev_imgOut, dx_imgIn, dy_imgIn, w,h,nc);
cudaMemcpy(img_Out, dev_imgOut, (size_t) w*h*nc_out, cudaMemcpyDeviceToHost);
}
__global__ friend void get_norm(float *dev_imgIn, float *dev_imgOut,
float *dx_imgIn, float *dy_imgIn, int w, int h, int nc) {
// COnsidering 2D block and grid
int base_index = (gridDim.x * blockIdx.y * blockDim) + blockIdx.x * blockDim
+ threadIdx.y * blockDim.x + threadIdx.x;
dev_imgOut[base_index] = 0;
int current_index;
if (base_index < (w * h)) {
for (int i = 0; i < nc; i++) {
current_index = base_index + (size_t) w * h * i;
if ((blockIdx.x == gridDim.x - 1) && (threadIdx.x == blockDim.x - 1)) {
// Making it cyclic
dx_imgIn[current_index] = dev_imgIn[current_index - w + 1]
- dev_imgIn[current_index];
} else {
dx_imgIn[current_index] = dev_imgIn[current_index + 1]
- dev_imgIn[current_index];
}
if ((blockIdx.y == gridDim.y - 1) && (threadIdx.y == blockDim.y - 1)) {
dy_imgIn[current_index] = dev_imgIn[current_index - (h - 1) * w]
- dev_imgIn[current_index];
} else {
dy_imgIn[current_index] = dev_imgIn[current_index + w]
- dev_imgIn[current_index];
}
dev_imgOut[base_index] += ((dx_imgIn[current_index]
* dx_imgIn[current_index])
+ (dy_imgIn[current_index] * dy_imgIn[current_index]));
}
}
}
|
b52678511814d58917790ddd43c7b875612b6acd.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/Pow.h>
namespace at { namespace native {
// Forward declare some unary kernels
void rsqrt_kernel_cuda(TensorIteratorBase& iter);
void sqrt_kernel_cuda(TensorIteratorBase& iter);
void reciprocal_kernel_cuda(TensorIteratorBase& iter);
namespace {
// SFINAE doesn't work well with NVCC under Windows for math functions like pow and sqrt.
// So we need to define the functions with the explicit function signatures.
// As for pow, the following signatures are defined as the device function:
// pow(float, int)
// pow(double, int)
// pow(float, float)
// pow(double, double)
#ifdef _MSC_VER
// Functions for pow
// pow for at::Half
static inline __host__ __device__ at::Half pow_(at::Half base, at::Half exp) {
return static_cast<at::Half>(::pow(static_cast<float>(base), static_cast<float>(exp)));
}
// pow for at::BFloat16
static inline __host__ __device__ at::BFloat16 pow_(at::BFloat16 base, at::BFloat16 exp) {
return static_cast<at::BFloat16>(::pow(static_cast<float>(base), static_cast<float>(exp)));
}
// pow (floating, floating/int)
template <typename Base_type, typename Exp_type>
static inline __host__ __device__ typename std::enable_if<std::is_floating_point<Base_type>::value && (std::is_same<Base_type, Exp_type>::value || std::is_same<Exp_type, int>::value), Base_type>::type
pow_(Base_type base, Exp_type exp) {
return ::pow(base, exp);
}
// pow (Otherwise)
template <typename Base_type, typename Exp_type>
static inline __host__ __device__ typename std::enable_if<!std::is_same<Base_type, Exp_type>::value && !std::is_same<Exp_type, int>::value, Base_type>::type
pow_(Base_type base, Exp_type exp) {
return static_cast<Base_type>(::pow(static_cast<double>(base), static_cast<double>(exp)));
}
#else
template <typename Base_type, typename Exp_type>
static inline __host__ __device__ Base_type pow_(Base_type base, Exp_type exp) {
return ::pow(base, exp);
}
#endif
template <typename T>
static inline __host__ __device__ std::enable_if_t<std::is_integral<T>::value, T> pow_(
T base, T exp) {
return at::native::powi(base, exp);
}
template <typename T>
static inline __host__ __device__ c10::complex<T> pow_(c10::complex<T> base, c10::complex<T> exp) {
return c10_complex_math::pow(base, exp);
}
void pow_tensor_scalar_kernel(TensorIteratorBase& iter, const Scalar& exp_scalar);
template <typename scalar_t>
void pow_scalar_tensor_impl(TensorIteratorBase& iter, scalar_t base) {
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t exp) -> scalar_t {
return pow_(base, exp);
});
}
template <typename value_t>
void pow_scalar_tensor_impl(TensorIteratorBase& iter, c10::complex<value_t> base) {
// For complex, thrust::pow uses the identity
// pow(a, b) = exp(log(a) * b)
const auto fct = ::log(base);
gpu_kernel(iter, [=]GPU_LAMBDA(c10::complex<value_t> exp) -> c10::complex<value_t> {
return ::exp(fct * exp);
});
}
void pow_tensor_tensor_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
kHalf, kBFloat16, iter.common_dtype(), "pow_cuda", [&] {
if (iter.is_cpu_scalar(1)) {
const auto base = iter.scalar_value<scalar_t>(1);
iter.remove_operand(1);
pow_scalar_tensor_impl(iter, base);
} else if (iter.is_cpu_scalar(2)) {
const auto exp = iter.scalar_value<scalar_t>(2);
iter.remove_operand(2);
pow_tensor_scalar_kernel(iter, exp);
} else {
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t {
return pow_(base, exp);
});
}
});
}
template<typename Base_type, typename Exp_type>
void pow_tensor_scalar_kernel_impl(TensorIteratorBase& iter,
Exp_type exp) {
const auto d_exp = static_cast<double>(exp);
// .5 (sqrt), -.5 (rsqrt) and -1 (reciprocal) specializations are handled
// in pow_tensor_scalar_kernel
if (d_exp == 2) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return base * base;
});
} else if (d_exp == 3) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return base * base * base;
});
} else if (d_exp == -2) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return 1.0 / (base * base);
});
} else {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return pow_(base, exp);
});
}
}
void pow_tensor_scalar_kernel(TensorIteratorBase& iter, const Scalar& exp_scalar) {
// Dispatch to fast specialization for sqrt, rsqrt and reciprocal
if (!exp_scalar.isComplex()) {
if (exp_scalar.equal(.5)) {
return sqrt_kernel_cuda(iter);
} else if (exp_scalar.equal(-0.5)) {
return rsqrt_kernel_cuda(iter);
} else if (exp_scalar.equal(-1.0)) {
return reciprocal_kernel_cuda(iter);
}
}
if (isComplexType(iter.common_dtype()) || exp_scalar.isComplex()) {
AT_DISPATCH_COMPLEX_TYPES(iter.common_dtype(), "pow_cuda", [&]() {
const auto exp = exp_scalar.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t base) -> scalar_t {
return pow_(base, exp);
});
});
} else if (isFloatingType(iter.common_dtype()) || exp_scalar.isIntegral(false)) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "pow_cuda", [&]() {
const auto exp = exp_scalar.to<scalar_t>();
pow_tensor_scalar_kernel_impl<scalar_t>(iter, exp);
});
} else {
const auto exp = exp_scalar.to<float>();
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "pow_cuda", [&]() {
pow_tensor_scalar_kernel_impl<scalar_t>(iter, exp);
});
}
}
} // anonymous namespace
REGISTER_DISPATCH(pow_tensor_tensor_stub, &pow_tensor_tensor_kernel);
REGISTER_DISPATCH(pow_tensor_scalar_stub, &pow_tensor_scalar_kernel);
}} // namespace at::native
| b52678511814d58917790ddd43c7b875612b6acd.cu | #include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/Pow.h>
namespace at { namespace native {
// Forward declare some unary kernels
void rsqrt_kernel_cuda(TensorIteratorBase& iter);
void sqrt_kernel_cuda(TensorIteratorBase& iter);
void reciprocal_kernel_cuda(TensorIteratorBase& iter);
namespace {
// SFINAE doesn't work well with NVCC under Windows for math functions like pow and sqrt.
// So we need to define the functions with the explicit function signatures.
// As for pow, the following signatures are defined as the device function:
// pow(float, int)
// pow(double, int)
// pow(float, float)
// pow(double, double)
#ifdef _MSC_VER
// Functions for pow
// pow for at::Half
static inline __host__ __device__ at::Half pow_(at::Half base, at::Half exp) {
return static_cast<at::Half>(std::pow(static_cast<float>(base), static_cast<float>(exp)));
}
// pow for at::BFloat16
static inline __host__ __device__ at::BFloat16 pow_(at::BFloat16 base, at::BFloat16 exp) {
return static_cast<at::BFloat16>(std::pow(static_cast<float>(base), static_cast<float>(exp)));
}
// pow (floating, floating/int)
template <typename Base_type, typename Exp_type>
static inline __host__ __device__ typename std::enable_if<std::is_floating_point<Base_type>::value && (std::is_same<Base_type, Exp_type>::value || std::is_same<Exp_type, int>::value), Base_type>::type
pow_(Base_type base, Exp_type exp) {
return std::pow(base, exp);
}
// pow (Otherwise)
template <typename Base_type, typename Exp_type>
static inline __host__ __device__ typename std::enable_if<!std::is_same<Base_type, Exp_type>::value && !std::is_same<Exp_type, int>::value, Base_type>::type
pow_(Base_type base, Exp_type exp) {
return static_cast<Base_type>(std::pow(static_cast<double>(base), static_cast<double>(exp)));
}
#else
template <typename Base_type, typename Exp_type>
static inline __host__ __device__ Base_type pow_(Base_type base, Exp_type exp) {
return ::pow(base, exp);
}
#endif
template <typename T>
static inline __host__ __device__ std::enable_if_t<std::is_integral<T>::value, T> pow_(
T base, T exp) {
return at::native::powi(base, exp);
}
template <typename T>
static inline __host__ __device__ c10::complex<T> pow_(c10::complex<T> base, c10::complex<T> exp) {
return c10_complex_math::pow(base, exp);
}
void pow_tensor_scalar_kernel(TensorIteratorBase& iter, const Scalar& exp_scalar);
template <typename scalar_t>
void pow_scalar_tensor_impl(TensorIteratorBase& iter, scalar_t base) {
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t exp) -> scalar_t {
return pow_(base, exp);
});
}
template <typename value_t>
void pow_scalar_tensor_impl(TensorIteratorBase& iter, c10::complex<value_t> base) {
// For complex, thrust::pow uses the identity
// pow(a, b) = exp(log(a) * b)
const auto fct = std::log(base);
gpu_kernel(iter, [=]GPU_LAMBDA(c10::complex<value_t> exp) -> c10::complex<value_t> {
return std::exp(fct * exp);
});
}
void pow_tensor_tensor_kernel(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(
kHalf, kBFloat16, iter.common_dtype(), "pow_cuda", [&] {
if (iter.is_cpu_scalar(1)) {
const auto base = iter.scalar_value<scalar_t>(1);
iter.remove_operand(1);
pow_scalar_tensor_impl(iter, base);
} else if (iter.is_cpu_scalar(2)) {
const auto exp = iter.scalar_value<scalar_t>(2);
iter.remove_operand(2);
pow_tensor_scalar_kernel(iter, exp);
} else {
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t base, scalar_t exp) -> scalar_t {
return pow_(base, exp);
});
}
});
}
template<typename Base_type, typename Exp_type>
void pow_tensor_scalar_kernel_impl(TensorIteratorBase& iter,
Exp_type exp) {
const auto d_exp = static_cast<double>(exp);
// .5 (sqrt), -.5 (rsqrt) and -1 (reciprocal) specializations are handled
// in pow_tensor_scalar_kernel
if (d_exp == 2) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return base * base;
});
} else if (d_exp == 3) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return base * base * base;
});
} else if (d_exp == -2) {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return 1.0 / (base * base);
});
} else {
gpu_kernel(iter, [=]GPU_LAMBDA(Base_type base) -> Base_type {
return pow_(base, exp);
});
}
}
void pow_tensor_scalar_kernel(TensorIteratorBase& iter, const Scalar& exp_scalar) {
// Dispatch to fast specialization for sqrt, rsqrt and reciprocal
if (!exp_scalar.isComplex()) {
if (exp_scalar.equal(.5)) {
return sqrt_kernel_cuda(iter);
} else if (exp_scalar.equal(-0.5)) {
return rsqrt_kernel_cuda(iter);
} else if (exp_scalar.equal(-1.0)) {
return reciprocal_kernel_cuda(iter);
}
}
if (isComplexType(iter.common_dtype()) || exp_scalar.isComplex()) {
AT_DISPATCH_COMPLEX_TYPES(iter.common_dtype(), "pow_cuda", [&]() {
const auto exp = exp_scalar.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t base) -> scalar_t {
return pow_(base, exp);
});
});
} else if (isFloatingType(iter.common_dtype()) || exp_scalar.isIntegral(false)) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "pow_cuda", [&]() {
const auto exp = exp_scalar.to<scalar_t>();
pow_tensor_scalar_kernel_impl<scalar_t>(iter, exp);
});
} else {
const auto exp = exp_scalar.to<float>();
AT_DISPATCH_INTEGRAL_TYPES(iter.common_dtype(), "pow_cuda", [&]() {
pow_tensor_scalar_kernel_impl<scalar_t>(iter, exp);
});
}
}
} // anonymous namespace
REGISTER_DISPATCH(pow_tensor_tensor_stub, &pow_tensor_tensor_kernel);
REGISTER_DISPATCH(pow_tensor_scalar_stub, &pow_tensor_scalar_kernel);
}} // namespace at::native
|
59372389d53afdefda363f309fdeaa75df6d005e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
extern "C" {
#include "bmp.h"
}
typedef struct Color {
unsigned int r, g, b;
} Color;
#define THREADS 1024
void CheckCudaError(char sms[], int line) {
hipError_t error;
error = hipGetLastError();
if (error) {
printf("(ERROR) %s - %s in %s at line %d\n", sms, hipGetErrorString(error), __FILE__, line);
exit(EXIT_FAILURE);
}
}
int square(int value) {
return value * value;
}
void display_means(Color means[], int counts[], int N_colors) {
int i;
for (i = 0; i < N_colors; ++i) {
fprintf(stderr, "mean %d: ", i);
fprintf(stderr, "r: %d, ", means[i].r);
fprintf(stderr, "g: %d, ", means[i].g);
fprintf(stderr, "b: %d, ", means[i].b);
fprintf(stderr, "count: %d\n", counts[i]);
}
fprintf(stderr, "\n");
}
void display_assigns(int assigns[], int Size) {
int i;
for (i = 0; i < Size; ++i) {
fprintf(stderr, "%d: %d\n", i, assigns[i]);
}
}
void init_means(Color means[], unsigned char *im, int Size_row, int N_colors, int Size) {
int r;
int i;
for (i = 0; i < N_colors; ++i) {
r = rand() % Size;
int index = (r*3/Size_row) * Size_row + ((r*3)%Size_row);
means[i].r = im[index+2];
means[i].g = im[index+1];
means[i].b = im[index];
}
}
void find_best_mean_seq(Color means[], int assigns[], unsigned char *im, int N, int ncolors, int Size_row) {
int i;
for (i = 0; i < N; ++i) {
int j;
int index = (i*3/Size_row) * Size_row + ((i*3)%Size_row);
int dist_min = -1;
int dist_act, assign;
for (j = 0; j < ncolors; ++j) {
dist_act = (im[index+2] - means[j].r)*(im[index+2] - means[j].r) + (im[index+1] - means[j].g)*(im[index+1] - means[j].g) + (im[index] - means[j].b)*(im[index] - means[j].b);
if (dist_min == -1 || dist_act < dist_min) {
dist_min = dist_act;
assign = j;
}
}
assigns[i] = assign;
}
}
__global__ void find_best_mean_par(Color means[], int assigns[], unsigned char *im, int N, int ncolors, int Size_row) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < N) {
int j;
int index = (id*3/Size_row) * Size_row + ((id*3)%Size_row);
int dist_min = -1;
int dist_act, assign;
for (j = 0; j < ncolors; ++j) {
dist_act = (im[index+2] - means[j].r)*(im[index+2] - means[j].r) + (im[index+1] - means[j].g)*(im[index+1] - means[j].g) + (im[index] - means[j].b)*(im[index] - means[j].b);
if (dist_min == -1 || dist_act < dist_min) {
dist_min = dist_act;
assign = j;
}
}
assigns[id] = assign;
}
}
void divide_sums_by_counts_seq(Color means_host[], int N_colors, Color new_means[], int counts[]) {
int i;
for (i = 0; i < N_colors; ++i) {
//Turn 0/0 into 0/1 to avoid zero division.
if(counts[i] == 0) counts[i] = 1;
means_host[i].r = new_means[i].r / counts[i];
means_host[i].g = new_means[i].g / counts[i];
means_host[i].b = new_means[i].b / counts[i];
}
}
__global__ void divide_sums_by_counts_par(Color means_device[], int N_colors, Color new_means[], int counts[]) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < N_colors) {
//Turn 0/0 into 0/1 to avoid zero division.
if(counts[id] == 0) counts[id] = 1;
means_device[id].r = new_means[id].r / counts[id];
means_device[id].g = new_means[id].g / counts[id];
means_device[id].b = new_means[id].b / counts[id];
}
}
void sum_up_and_count_points_seq(Color new_means[], int assigns[], unsigned char *im, int counts[], int Size_row, int Size) {
int i;
for (i = 0; i < Size; ++i) {
int index = (i*3/Size_row) * Size_row + ((i*3)%Size_row);
int imeans = assigns[i];
new_means[imeans].r += im[index+2];
new_means[imeans].g += im[index+1];
new_means[imeans].b += im[index];
counts[imeans] += 1;
}
}
__global__ void matrix_reduction_color(Color new_means[], int assigns[], unsigned char *im, int Size_row, int Size, int N_colors, int offset) {
extern __shared__ unsigned int shared[];
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
//init shared
for (int j = 0; j < N_colors; ++j) {
unsigned int aux = 0;
if (id < Size && j == assigns[id]) {
int index = (id*3/Size_row) * Size_row + ((id*3)%Size_row);
aux += im[index+offset];
}
if (id + blockDim.x < Size && j == assigns[id + blockDim.x]) {
int index = ((id + blockDim.x)*3/Size_row) * Size_row + (((id + blockDim.x)*3)%Size_row);
aux += im[index+offset];
}
shared[tid*N_colors + j] = aux;
}
__syncthreads();
//reduccio
unsigned int s;
for(s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
for (int j = 0; j < N_colors; ++j) {
shared[tid*N_colors + j] += shared[(tid + s)*N_colors + j];
}
}
__syncthreads();
}
//copiar valors:
if (tid == 0) {
for (int j = 0; j < N_colors; ++j) {
if (offset == 2) new_means[blockIdx.x*N_colors + j].r = shared[j];
else if (offset == 1) new_means[blockIdx.x*N_colors + j].g = shared[j];
else new_means[blockIdx.x*N_colors + j].b = shared[j];
}
}
}
__global__ void matrix_reduction_color_2(Color new_means_2[], Color new_means[], int Size_row, int Size, int N_colors, int offset) {
extern __shared__ unsigned int shared[];
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
//init shared
for (int j = 0; j < N_colors; ++j) {
if (offset == 2) shared[tid*N_colors + j] = new_means[id*N_colors + j].r + new_means[(id + blockDim.x) *N_colors + j].r;
else if (offset == 1) shared[tid*N_colors + j] = new_means[id*N_colors + j].g + new_means[(id + blockDim.x) * N_colors + j].g;
else shared[tid*N_colors + j] = new_means[id*N_colors + j].b + new_means[(id + blockDim.x) *N_colors + j].b;
}
__syncthreads();
//reduccio
unsigned int s;
for(s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
for (int j = 0; j < N_colors; ++j) {
shared[tid*N_colors + j] += shared[(tid + s)*N_colors + j];
}
}
__syncthreads();
}
//copiar valors:
if (tid == 0) {
for (int j = 0; j < N_colors; ++j) {
if (offset == 2) new_means_2[blockIdx.x*N_colors + j].r = shared[j];
else if (offset == 1) new_means_2[blockIdx.x*N_colors + j].g = shared[j];
else new_means_2[blockIdx.x*N_colors + j].b = shared[j];
}
}
}
__global__ void matrix_reduction_count(int counts[], int assigns[], unsigned char *im, int Size_row, int Size, int N_colors) {
extern __shared__ unsigned int shared[];
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
//init shared
for (int j = 0; j < N_colors; ++j) {
unsigned int aux = 0;
if (j == assigns[id]) {
aux += 1;
}
if (j == assigns[id + blockDim.x]) {
aux += 1;
}
shared[tid*N_colors + j] = aux;
}
__syncthreads();
unsigned int s;
for(s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
for (int j = 0; j < N_colors; ++j) {
shared[tid*N_colors + j] += shared[(tid + s)*N_colors + j];
}
}
__syncthreads();
}
//copiar valors:
if (tid == 0) {
for (int j = 0; j < N_colors; ++j) {
counts[blockIdx.x*N_colors + j] = shared[j];
}
}
}
__global__ void matrix_reduction_count_2(int counts_2[], int counts[], int Size_row, int Size, int N_colors) {
extern __shared__ unsigned int shared[];
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
//init shared
for (int j = 0; j < N_colors; ++j) {
shared[tid*N_colors + j] = counts[id*N_colors + j] + counts[((id + blockDim.x) * N_colors) + j];
}
__syncthreads();
//reduccio
unsigned int s;
for(s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
for (int j = 0; j < N_colors; ++j) {
shared[tid*N_colors + j] += shared[(tid + s)*N_colors + j];
}
}
__syncthreads();
}
//copiar valors:
if (tid == 0) {
for (int j = 0; j < N_colors; ++j) {
counts_2[blockIdx.x*N_colors + j] = shared[j];
}
}
}
__global__ void sum_up_and_count_points_par(Color new_means[], int assigns[], unsigned char *im, int counts[],
int Size_row, int Size, int N_colors, int s_counts[], Color s_new_means[]) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x*blockDim.x + threadIdx.x;
//inicialitzar
for (int j = 0; j < N_colors; ++j) {
if (j == assigns[id]) {
int index = (id*3/Size_row) * Size_row + ((id*3)%Size_row);
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].r = im[index+2];
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].g = im[index+1];
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].b = im[index];
s_counts[blockIdx.x*blockDim.x + tid*N_colors + j] = 1;
}
else {
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].r = 0;
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].g = 0;
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].b = 0;
s_counts[blockIdx.x*blockDim.x + tid*N_colors + j] = 0;
}
}
__syncthreads();
//reduccio
unsigned int s;
for(s=1; s < blockDim.x; s *= 2) {
if (tid % (2*s) == 0) {
for (int j = 0; j < N_colors; ++j) {
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].r += s_new_means[(tid + s)*N_colors + j].r;
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].g += s_new_means[(tid + s)*N_colors + j].g;
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].b += s_new_means[(tid + s)*N_colors + j].b;
s_counts[blockIdx.x*blockDim.x + tid*N_colors + j] += s_counts[(tid + s)*N_colors + j];
}
}
__syncthreads();
}
__syncthreads();
//copiar valors:
if (tid == 0) {
for (int j = 0; j < N_colors; ++j) {
new_means[blockIdx.x*N_colors + j].r = s_new_means[j].r;
new_means[blockIdx.x*N_colors + j].g = s_new_means[j].g;
new_means[blockIdx.x*N_colors + j].b = s_new_means[j].b;
counts[j] = s_counts[j];
}
}
}
__global__ void findandsum(Color means[],Color new_means[], int assigns[], unsigned char *im, int counts[],
int Size_row, int Size, int ncolors) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < Size) {
int j;
int index = (id*3/Size_row) * Size_row + ((id*3)%Size_row);
int dist_min = -1;
int dist_act, assign;
for (j = 0; j < ncolors; ++j) {
dist_act = (im[index+2] - means[j].r)*(im[index+2] - means[j].r) + (im[index+1] - means[j].g)*(im[index+1] - means[j].g) + (im[index] - means[j].b)*(im[index] - means[j].b);
if (dist_min == -1 || dist_act < dist_min) {
dist_min = dist_act;
assign = j;
}
}
assigns[id] = assign;
atomicAdd(&new_means[assign].r, im[index+2]);
atomicAdd(&new_means[assign].g, im[index+1]);
atomicAdd(&new_means[assign].b, im[index]);
atomicAdd(&counts[assign], 1);
}
}
void assign_colors_seq(Color means[], int assigns[], unsigned char *im, int Size_row, int Size) {
int i;
for (i = 0; i < Size; ++i) {
int index = (i*3/Size_row) * Size_row + ((i*3)%Size_row);
im[index]=means[assigns[i]].b;
im[index + 1]=means[assigns[i]].g;
im[index + 2]=means[assigns[i]].r;
}
}
__global__ void assign_colors_par(Color means[], int assigns[], unsigned char *im, int Size_row, int Size) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < Size) {
int index = (id*3/Size_row) * Size_row + ((id*3)%Size_row);
im[index]=means[assigns[id]].b;
im[index + 1]=means[assigns[id]].g;
im[index + 2]=means[assigns[id]].r;
}
}
int main(int c, char *v[])
{
int N_colors;
if (c < 4 || c > 5) {
fprintf(stderr, "usage: %s ppm_file n_iterations seed n_colors\n", v[0]);
return -1;
}
else if (c == 4) N_colors = 16;
else if (c == 5) N_colors = atoi(v[4]) ? : 16;
//read image:
bmpInfoHeader infoHeader;
unsigned char *im_host = LoadBMP(v[1], &infoHeader);
//init variables:
float elapsedTime;
int N_iterations = atoi(v[2]);
int Size_row = ((infoHeader.width*3 + 3) / 4) * 4;
int width = infoHeader.width;
int height = infoHeader.height;
int Size = width * height;
//init seed
srand(atoi(v[3]));
//init grid, block, nThreads:
unsigned int nBlocks, nBlocksMeans, nThreads;
nThreads = THREADS;
nBlocks = (Size + nThreads - 1)/nThreads;
dim3 dimGrid(nBlocks, 1, 1);
dim3 dimBlock(nThreads, 1, 1);
nBlocksMeans = (N_colors + nThreads - 1)/nThreads;
dim3 dimGridMeans(nBlocksMeans, 1, 1);
//obtenir memoria HOST:
Color *means_host;
means_host = (Color*) malloc(N_colors*sizeof(Color));
int *counts_host;
counts_host = (int*) malloc(sizeof(int) * N_colors);
Color *means_host_red;
means_host_red = (Color*) malloc((nBlocks/(4*nThreads)) * N_colors*sizeof(Color));
int *counts_host_red;
counts_host_red = (int*) malloc((nBlocks/(4*nThreads)) * sizeof(int) * N_colors);
//inicialitzar means:
init_means(means_host, im_host, Size_row, N_colors, Size);
//cuda events:
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//obtenir memoria DEVICE:
Color *means_device;
Color *new_means;
int *counts;
Color *new_means_2;
int *counts_2;
int *assigns;
unsigned char *im_device;
hipMalloc((Color**)&means_device, N_colors*sizeof(Color));
hipMalloc((Color**)&new_means, (nBlocks/(nThreads)) * N_colors*sizeof(Color));
hipMalloc((int**)&counts, (nBlocks/(nThreads)) * N_colors * sizeof (int));
hipMalloc((Color**)&new_means_2, (nBlocks/(2*nThreads)) * N_colors*sizeof(Color));
hipMalloc((int**)&counts_2, (nBlocks/(2*nThreads)) * N_colors * sizeof (int));
hipMalloc((int**)&assigns, Size*sizeof(int));
hipMalloc((unsigned char**)&im_device, infoHeader.imgsize* sizeof(unsigned char));
CheckCudaError((char *) "Obtener Memoria en el device", __LINE__);
//copiar dades al device:
hipMemcpy(im_device, im_host, infoHeader.imgsize * sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(means_device, means_host, N_colors*sizeof(Color), hipMemcpyHostToDevice);
CheckCudaError((char *) "Copiar Datos Host --> Device", __LINE__);
int shared_memory_size = N_colors*THREADS * sizeof(unsigned int);
//START RECORD!!
hipEventRecord(start, 0);
//executem k means:
int it;
for (it = 0; it < N_iterations; ++it) {
//set counts and new_means to 0
hipMemset (counts, 0, nBlocks/(nThreads) * sizeof (int) * N_colors);
hipMemset (new_means, 0, nBlocks/(nThreads)* sizeof (Color) * N_colors);
//for each pixel find the best mean.
hipLaunchKernelGGL(( find_best_mean_par), dim3(dimGrid), dim3(dimBlock), 0, 0, means_device, assigns, im_device, Size, N_colors, Size_row);
hipDeviceSynchronize();
/*
//Sum up and count points for each cluster.
sum_up_and_count_points_par<<<dimGrid, dimBlock>>>(new_means, assigns, im_device, counts, Size_row, Size, N_colors, s_counts, s_new_means);
hipDeviceSynchronize();
*/
hipLaunchKernelGGL(( matrix_reduction_count), dim3(nBlocks/(2*nThreads)), dim3(dimBlock), shared_memory_size, 0, counts, assigns, im_device, Size_row, Size, N_colors);
hipLaunchKernelGGL(( matrix_reduction_color), dim3(nBlocks/(2*nThreads)), dim3(dimBlock), shared_memory_size, 0, new_means, assigns, im_device, Size_row, Size, N_colors, 2);
hipLaunchKernelGGL(( matrix_reduction_color), dim3(nBlocks/(2*nThreads)), dim3(dimBlock), shared_memory_size, 0, new_means, assigns, im_device, Size_row, Size, N_colors, 1);
hipLaunchKernelGGL(( matrix_reduction_color), dim3(nBlocks/(2*nThreads)), dim3(dimBlock), shared_memory_size, 0, new_means, assigns, im_device, Size_row, Size, N_colors, 0);
hipDeviceSynchronize();
//volmemos a hacer otra reduccion
hipLaunchKernelGGL(( matrix_reduction_count_2), dim3(nBlocks/(4*nThreads)), dim3(dimBlock), shared_memory_size, 0, counts_2, counts, Size_row, Size, N_colors);
hipLaunchKernelGGL(( matrix_reduction_color_2), dim3(nBlocks/(4*nThreads)), dim3(dimBlock), shared_memory_size, 0, new_means_2, new_means, Size_row, Size, N_colors, 2);
hipLaunchKernelGGL(( matrix_reduction_color_2), dim3(nBlocks/(4*nThreads)), dim3(dimBlock), shared_memory_size, 0, new_means_2, new_means, Size_row, Size, N_colors, 1);
hipLaunchKernelGGL(( matrix_reduction_color_2), dim3(nBlocks/(4*nThreads)), dim3(dimBlock), shared_memory_size, 0, new_means_2, new_means, Size_row, Size, N_colors, 0);
hipMemcpy(means_host_red, new_means_2, (nBlocks/(4*nThreads)) * N_colors * sizeof(Color), hipMemcpyDeviceToHost);
hipMemcpy(counts_host_red, counts_2, (nBlocks/(4*nThreads)) * N_colors * sizeof(int), hipMemcpyDeviceToHost);
memset(counts_host, 0, sizeof (int) * N_colors);
memset(means_host, 0, sizeof (Color) * N_colors);
int i, j;
for (i = 0; i < nBlocks/(4*nThreads); ++i) {
for (j = 0; j < N_colors; ++j) {
counts_host[j] += counts_host_red[i*N_colors + j];
means_host[j].r += means_host_red[i*N_colors + j].r;
means_host[j].g += means_host_red[i*N_colors + j].g;
means_host[j].b += means_host_red[i*N_colors + j].b;
}
}
//aqui tenemos los vectores finales ya reducidos
hipMemcpy(new_means, means_host, N_colors * sizeof(Color), hipMemcpyHostToDevice);
hipMemcpy(counts, counts_host, N_colors * sizeof(int), hipMemcpyHostToDevice);
/*
findandsum<<<dimGrid, dimBlock>>>(means_device,new_means, assigns, im_device, counts, Size_row, Size, N_colors);
hipDeviceSynchronize();
*/
//Divide sums by counts to get new centroids.
hipLaunchKernelGGL(( divide_sums_by_counts_par), dim3(dimGridMeans), dim3(dimBlock), 0, 0, means_device, N_colors, new_means, counts);
hipDeviceSynchronize();
}
//assignem colors:
hipLaunchKernelGGL(( assign_colors_par), dim3(dimGrid), dim3(dimBlock), 0, 0, means_device, assigns, im_device, Size_row, Size);
//copy to host:
hipMemcpy(im_host, im_device, infoHeader.imgsize * sizeof(unsigned char), hipMemcpyDeviceToHost);
//STOP RECORD!!
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
//save image
SaveBMP("sortida.bmp", &infoHeader, im_host);
DisplayInfo("sortida.bmp", &infoHeader);
int bytes_read_written = 2 * infoHeader.imgsize* sizeof(unsigned char) + //leer imagen y copiarla
N_iterations * ( //en cada iteracion se hace:
sizeof (int) * 2 * N_colors + //leer y modificar counts
sizeof (Color) * N_colors + //leer y modificar medias
Size * 2 * sizeof(int) + //leer y modificar las asignaciones
Size * 3 * sizeof (unsigned char) //leer datos de imagen
);
printf("\Quantization CUDA\n");
printf("Image Size: %d\n", Size);
printf("nThreads: %d\n", nThreads);
printf("nBlocks: %d\n", nBlocks);
printf("Tiempo Total Version 5 = %4.6f ms\n", elapsedTime);
hipEventDestroy(start);
hipEventDestroy(stop);
//alliberar memoria HOST:
free(im_host);
free(means_host);
//alliberar memoria DEVICE:
hipFree(means_device);
hipFree(new_means);
hipFree(new_means_2);
hipFree(assigns);
hipFree(im_device);
hipFree(counts);
hipFree(counts_2);
return 0;
}
| 59372389d53afdefda363f309fdeaa75df6d005e.cu | #include <time.h>
#include <stdio.h>
#include <stdlib.h>
extern "C" {
#include "bmp.h"
}
typedef struct Color {
unsigned int r, g, b;
} Color;
#define THREADS 1024
void CheckCudaError(char sms[], int line) {
cudaError_t error;
error = cudaGetLastError();
if (error) {
printf("(ERROR) %s - %s in %s at line %d\n", sms, cudaGetErrorString(error), __FILE__, line);
exit(EXIT_FAILURE);
}
}
int square(int value) {
return value * value;
}
void display_means(Color means[], int counts[], int N_colors) {
int i;
for (i = 0; i < N_colors; ++i) {
fprintf(stderr, "mean %d: ", i);
fprintf(stderr, "r: %d, ", means[i].r);
fprintf(stderr, "g: %d, ", means[i].g);
fprintf(stderr, "b: %d, ", means[i].b);
fprintf(stderr, "count: %d\n", counts[i]);
}
fprintf(stderr, "\n");
}
void display_assigns(int assigns[], int Size) {
int i;
for (i = 0; i < Size; ++i) {
fprintf(stderr, "%d: %d\n", i, assigns[i]);
}
}
void init_means(Color means[], unsigned char *im, int Size_row, int N_colors, int Size) {
int r;
int i;
for (i = 0; i < N_colors; ++i) {
r = rand() % Size;
int index = (r*3/Size_row) * Size_row + ((r*3)%Size_row);
means[i].r = im[index+2];
means[i].g = im[index+1];
means[i].b = im[index];
}
}
void find_best_mean_seq(Color means[], int assigns[], unsigned char *im, int N, int ncolors, int Size_row) {
int i;
for (i = 0; i < N; ++i) {
int j;
int index = (i*3/Size_row) * Size_row + ((i*3)%Size_row);
int dist_min = -1;
int dist_act, assign;
for (j = 0; j < ncolors; ++j) {
dist_act = (im[index+2] - means[j].r)*(im[index+2] - means[j].r) + (im[index+1] - means[j].g)*(im[index+1] - means[j].g) + (im[index] - means[j].b)*(im[index] - means[j].b);
if (dist_min == -1 || dist_act < dist_min) {
dist_min = dist_act;
assign = j;
}
}
assigns[i] = assign;
}
}
__global__ void find_best_mean_par(Color means[], int assigns[], unsigned char *im, int N, int ncolors, int Size_row) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < N) {
int j;
int index = (id*3/Size_row) * Size_row + ((id*3)%Size_row);
int dist_min = -1;
int dist_act, assign;
for (j = 0; j < ncolors; ++j) {
dist_act = (im[index+2] - means[j].r)*(im[index+2] - means[j].r) + (im[index+1] - means[j].g)*(im[index+1] - means[j].g) + (im[index] - means[j].b)*(im[index] - means[j].b);
if (dist_min == -1 || dist_act < dist_min) {
dist_min = dist_act;
assign = j;
}
}
assigns[id] = assign;
}
}
void divide_sums_by_counts_seq(Color means_host[], int N_colors, Color new_means[], int counts[]) {
int i;
for (i = 0; i < N_colors; ++i) {
//Turn 0/0 into 0/1 to avoid zero division.
if(counts[i] == 0) counts[i] = 1;
means_host[i].r = new_means[i].r / counts[i];
means_host[i].g = new_means[i].g / counts[i];
means_host[i].b = new_means[i].b / counts[i];
}
}
__global__ void divide_sums_by_counts_par(Color means_device[], int N_colors, Color new_means[], int counts[]) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < N_colors) {
//Turn 0/0 into 0/1 to avoid zero division.
if(counts[id] == 0) counts[id] = 1;
means_device[id].r = new_means[id].r / counts[id];
means_device[id].g = new_means[id].g / counts[id];
means_device[id].b = new_means[id].b / counts[id];
}
}
void sum_up_and_count_points_seq(Color new_means[], int assigns[], unsigned char *im, int counts[], int Size_row, int Size) {
int i;
for (i = 0; i < Size; ++i) {
int index = (i*3/Size_row) * Size_row + ((i*3)%Size_row);
int imeans = assigns[i];
new_means[imeans].r += im[index+2];
new_means[imeans].g += im[index+1];
new_means[imeans].b += im[index];
counts[imeans] += 1;
}
}
__global__ void matrix_reduction_color(Color new_means[], int assigns[], unsigned char *im, int Size_row, int Size, int N_colors, int offset) {
extern __shared__ unsigned int shared[];
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
//init shared
for (int j = 0; j < N_colors; ++j) {
unsigned int aux = 0;
if (id < Size && j == assigns[id]) {
int index = (id*3/Size_row) * Size_row + ((id*3)%Size_row);
aux += im[index+offset];
}
if (id + blockDim.x < Size && j == assigns[id + blockDim.x]) {
int index = ((id + blockDim.x)*3/Size_row) * Size_row + (((id + blockDim.x)*3)%Size_row);
aux += im[index+offset];
}
shared[tid*N_colors + j] = aux;
}
__syncthreads();
//reduccio
unsigned int s;
for(s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
for (int j = 0; j < N_colors; ++j) {
shared[tid*N_colors + j] += shared[(tid + s)*N_colors + j];
}
}
__syncthreads();
}
//copiar valors:
if (tid == 0) {
for (int j = 0; j < N_colors; ++j) {
if (offset == 2) new_means[blockIdx.x*N_colors + j].r = shared[j];
else if (offset == 1) new_means[blockIdx.x*N_colors + j].g = shared[j];
else new_means[blockIdx.x*N_colors + j].b = shared[j];
}
}
}
__global__ void matrix_reduction_color_2(Color new_means_2[], Color new_means[], int Size_row, int Size, int N_colors, int offset) {
extern __shared__ unsigned int shared[];
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
//init shared
for (int j = 0; j < N_colors; ++j) {
if (offset == 2) shared[tid*N_colors + j] = new_means[id*N_colors + j].r + new_means[(id + blockDim.x) *N_colors + j].r;
else if (offset == 1) shared[tid*N_colors + j] = new_means[id*N_colors + j].g + new_means[(id + blockDim.x) * N_colors + j].g;
else shared[tid*N_colors + j] = new_means[id*N_colors + j].b + new_means[(id + blockDim.x) *N_colors + j].b;
}
__syncthreads();
//reduccio
unsigned int s;
for(s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
for (int j = 0; j < N_colors; ++j) {
shared[tid*N_colors + j] += shared[(tid + s)*N_colors + j];
}
}
__syncthreads();
}
//copiar valors:
if (tid == 0) {
for (int j = 0; j < N_colors; ++j) {
if (offset == 2) new_means_2[blockIdx.x*N_colors + j].r = shared[j];
else if (offset == 1) new_means_2[blockIdx.x*N_colors + j].g = shared[j];
else new_means_2[blockIdx.x*N_colors + j].b = shared[j];
}
}
}
__global__ void matrix_reduction_count(int counts[], int assigns[], unsigned char *im, int Size_row, int Size, int N_colors) {
extern __shared__ unsigned int shared[];
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
//init shared
for (int j = 0; j < N_colors; ++j) {
unsigned int aux = 0;
if (j == assigns[id]) {
aux += 1;
}
if (j == assigns[id + blockDim.x]) {
aux += 1;
}
shared[tid*N_colors + j] = aux;
}
__syncthreads();
unsigned int s;
for(s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
for (int j = 0; j < N_colors; ++j) {
shared[tid*N_colors + j] += shared[(tid + s)*N_colors + j];
}
}
__syncthreads();
}
//copiar valors:
if (tid == 0) {
for (int j = 0; j < N_colors; ++j) {
counts[blockIdx.x*N_colors + j] = shared[j];
}
}
}
__global__ void matrix_reduction_count_2(int counts_2[], int counts[], int Size_row, int Size, int N_colors) {
extern __shared__ unsigned int shared[];
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
//init shared
for (int j = 0; j < N_colors; ++j) {
shared[tid*N_colors + j] = counts[id*N_colors + j] + counts[((id + blockDim.x) * N_colors) + j];
}
__syncthreads();
//reduccio
unsigned int s;
for(s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
for (int j = 0; j < N_colors; ++j) {
shared[tid*N_colors + j] += shared[(tid + s)*N_colors + j];
}
}
__syncthreads();
}
//copiar valors:
if (tid == 0) {
for (int j = 0; j < N_colors; ++j) {
counts_2[blockIdx.x*N_colors + j] = shared[j];
}
}
}
__global__ void sum_up_and_count_points_par(Color new_means[], int assigns[], unsigned char *im, int counts[],
int Size_row, int Size, int N_colors, int s_counts[], Color s_new_means[]) {
unsigned int tid = threadIdx.x;
unsigned int id = blockIdx.x*blockDim.x + threadIdx.x;
//inicialitzar
for (int j = 0; j < N_colors; ++j) {
if (j == assigns[id]) {
int index = (id*3/Size_row) * Size_row + ((id*3)%Size_row);
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].r = im[index+2];
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].g = im[index+1];
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].b = im[index];
s_counts[blockIdx.x*blockDim.x + tid*N_colors + j] = 1;
}
else {
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].r = 0;
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].g = 0;
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].b = 0;
s_counts[blockIdx.x*blockDim.x + tid*N_colors + j] = 0;
}
}
__syncthreads();
//reduccio
unsigned int s;
for(s=1; s < blockDim.x; s *= 2) {
if (tid % (2*s) == 0) {
for (int j = 0; j < N_colors; ++j) {
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].r += s_new_means[(tid + s)*N_colors + j].r;
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].g += s_new_means[(tid + s)*N_colors + j].g;
s_new_means[blockIdx.x*blockDim.x + tid*N_colors + j].b += s_new_means[(tid + s)*N_colors + j].b;
s_counts[blockIdx.x*blockDim.x + tid*N_colors + j] += s_counts[(tid + s)*N_colors + j];
}
}
__syncthreads();
}
__syncthreads();
//copiar valors:
if (tid == 0) {
for (int j = 0; j < N_colors; ++j) {
new_means[blockIdx.x*N_colors + j].r = s_new_means[j].r;
new_means[blockIdx.x*N_colors + j].g = s_new_means[j].g;
new_means[blockIdx.x*N_colors + j].b = s_new_means[j].b;
counts[j] = s_counts[j];
}
}
}
__global__ void findandsum(Color means[],Color new_means[], int assigns[], unsigned char *im, int counts[],
int Size_row, int Size, int ncolors) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < Size) {
int j;
int index = (id*3/Size_row) * Size_row + ((id*3)%Size_row);
int dist_min = -1;
int dist_act, assign;
for (j = 0; j < ncolors; ++j) {
dist_act = (im[index+2] - means[j].r)*(im[index+2] - means[j].r) + (im[index+1] - means[j].g)*(im[index+1] - means[j].g) + (im[index] - means[j].b)*(im[index] - means[j].b);
if (dist_min == -1 || dist_act < dist_min) {
dist_min = dist_act;
assign = j;
}
}
assigns[id] = assign;
atomicAdd(&new_means[assign].r, im[index+2]);
atomicAdd(&new_means[assign].g, im[index+1]);
atomicAdd(&new_means[assign].b, im[index]);
atomicAdd(&counts[assign], 1);
}
}
void assign_colors_seq(Color means[], int assigns[], unsigned char *im, int Size_row, int Size) {
int i;
for (i = 0; i < Size; ++i) {
int index = (i*3/Size_row) * Size_row + ((i*3)%Size_row);
im[index]=means[assigns[i]].b;
im[index + 1]=means[assigns[i]].g;
im[index + 2]=means[assigns[i]].r;
}
}
__global__ void assign_colors_par(Color means[], int assigns[], unsigned char *im, int Size_row, int Size) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < Size) {
int index = (id*3/Size_row) * Size_row + ((id*3)%Size_row);
im[index]=means[assigns[id]].b;
im[index + 1]=means[assigns[id]].g;
im[index + 2]=means[assigns[id]].r;
}
}
int main(int c, char *v[])
{
int N_colors;
if (c < 4 || c > 5) {
fprintf(stderr, "usage: %s ppm_file n_iterations seed n_colors\n", v[0]);
return -1;
}
else if (c == 4) N_colors = 16;
else if (c == 5) N_colors = atoi(v[4]) ? : 16;
//read image:
bmpInfoHeader infoHeader;
unsigned char *im_host = LoadBMP(v[1], &infoHeader);
//init variables:
float elapsedTime;
int N_iterations = atoi(v[2]);
int Size_row = ((infoHeader.width*3 + 3) / 4) * 4;
int width = infoHeader.width;
int height = infoHeader.height;
int Size = width * height;
//init seed
srand(atoi(v[3]));
//init grid, block, nThreads:
unsigned int nBlocks, nBlocksMeans, nThreads;
nThreads = THREADS;
nBlocks = (Size + nThreads - 1)/nThreads;
dim3 dimGrid(nBlocks, 1, 1);
dim3 dimBlock(nThreads, 1, 1);
nBlocksMeans = (N_colors + nThreads - 1)/nThreads;
dim3 dimGridMeans(nBlocksMeans, 1, 1);
//obtenir memoria HOST:
Color *means_host;
means_host = (Color*) malloc(N_colors*sizeof(Color));
int *counts_host;
counts_host = (int*) malloc(sizeof(int) * N_colors);
Color *means_host_red;
means_host_red = (Color*) malloc((nBlocks/(4*nThreads)) * N_colors*sizeof(Color));
int *counts_host_red;
counts_host_red = (int*) malloc((nBlocks/(4*nThreads)) * sizeof(int) * N_colors);
//inicialitzar means:
init_means(means_host, im_host, Size_row, N_colors, Size);
//cuda events:
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//obtenir memoria DEVICE:
Color *means_device;
Color *new_means;
int *counts;
Color *new_means_2;
int *counts_2;
int *assigns;
unsigned char *im_device;
cudaMalloc((Color**)&means_device, N_colors*sizeof(Color));
cudaMalloc((Color**)&new_means, (nBlocks/(nThreads)) * N_colors*sizeof(Color));
cudaMalloc((int**)&counts, (nBlocks/(nThreads)) * N_colors * sizeof (int));
cudaMalloc((Color**)&new_means_2, (nBlocks/(2*nThreads)) * N_colors*sizeof(Color));
cudaMalloc((int**)&counts_2, (nBlocks/(2*nThreads)) * N_colors * sizeof (int));
cudaMalloc((int**)&assigns, Size*sizeof(int));
cudaMalloc((unsigned char**)&im_device, infoHeader.imgsize* sizeof(unsigned char));
CheckCudaError((char *) "Obtener Memoria en el device", __LINE__);
//copiar dades al device:
cudaMemcpy(im_device, im_host, infoHeader.imgsize * sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(means_device, means_host, N_colors*sizeof(Color), cudaMemcpyHostToDevice);
CheckCudaError((char *) "Copiar Datos Host --> Device", __LINE__);
int shared_memory_size = N_colors*THREADS * sizeof(unsigned int);
//START RECORD!!
cudaEventRecord(start, 0);
//executem k means:
int it;
for (it = 0; it < N_iterations; ++it) {
//set counts and new_means to 0
cudaMemset (counts, 0, nBlocks/(nThreads) * sizeof (int) * N_colors);
cudaMemset (new_means, 0, nBlocks/(nThreads)* sizeof (Color) * N_colors);
//for each pixel find the best mean.
find_best_mean_par<<<dimGrid, dimBlock>>>(means_device, assigns, im_device, Size, N_colors, Size_row);
cudaDeviceSynchronize();
/*
//Sum up and count points for each cluster.
sum_up_and_count_points_par<<<dimGrid, dimBlock>>>(new_means, assigns, im_device, counts, Size_row, Size, N_colors, s_counts, s_new_means);
cudaDeviceSynchronize();
*/
matrix_reduction_count<<<nBlocks/(2*nThreads), dimBlock, shared_memory_size>>>(counts, assigns, im_device, Size_row, Size, N_colors);
matrix_reduction_color<<<nBlocks/(2*nThreads), dimBlock, shared_memory_size>>>(new_means, assigns, im_device, Size_row, Size, N_colors, 2);
matrix_reduction_color<<<nBlocks/(2*nThreads), dimBlock, shared_memory_size>>>(new_means, assigns, im_device, Size_row, Size, N_colors, 1);
matrix_reduction_color<<<nBlocks/(2*nThreads), dimBlock, shared_memory_size>>>(new_means, assigns, im_device, Size_row, Size, N_colors, 0);
cudaDeviceSynchronize();
//volmemos a hacer otra reduccion
matrix_reduction_count_2<<<nBlocks/(4*nThreads), dimBlock, shared_memory_size>>>(counts_2, counts, Size_row, Size, N_colors);
matrix_reduction_color_2<<<nBlocks/(4*nThreads), dimBlock, shared_memory_size>>>(new_means_2, new_means, Size_row, Size, N_colors, 2);
matrix_reduction_color_2<<<nBlocks/(4*nThreads), dimBlock, shared_memory_size>>>(new_means_2, new_means, Size_row, Size, N_colors, 1);
matrix_reduction_color_2<<<nBlocks/(4*nThreads), dimBlock, shared_memory_size>>>(new_means_2, new_means, Size_row, Size, N_colors, 0);
cudaMemcpy(means_host_red, new_means_2, (nBlocks/(4*nThreads)) * N_colors * sizeof(Color), cudaMemcpyDeviceToHost);
cudaMemcpy(counts_host_red, counts_2, (nBlocks/(4*nThreads)) * N_colors * sizeof(int), cudaMemcpyDeviceToHost);
memset(counts_host, 0, sizeof (int) * N_colors);
memset(means_host, 0, sizeof (Color) * N_colors);
int i, j;
for (i = 0; i < nBlocks/(4*nThreads); ++i) {
for (j = 0; j < N_colors; ++j) {
counts_host[j] += counts_host_red[i*N_colors + j];
means_host[j].r += means_host_red[i*N_colors + j].r;
means_host[j].g += means_host_red[i*N_colors + j].g;
means_host[j].b += means_host_red[i*N_colors + j].b;
}
}
//aqui tenemos los vectores finales ya reducidos
cudaMemcpy(new_means, means_host, N_colors * sizeof(Color), cudaMemcpyHostToDevice);
cudaMemcpy(counts, counts_host, N_colors * sizeof(int), cudaMemcpyHostToDevice);
/*
findandsum<<<dimGrid, dimBlock>>>(means_device,new_means, assigns, im_device, counts, Size_row, Size, N_colors);
cudaDeviceSynchronize();
*/
//Divide sums by counts to get new centroids.
divide_sums_by_counts_par<<<dimGridMeans, dimBlock>>>(means_device, N_colors, new_means, counts);
cudaDeviceSynchronize();
}
//assignem colors:
assign_colors_par<<<dimGrid, dimBlock>>>(means_device, assigns, im_device, Size_row, Size);
//copy to host:
cudaMemcpy(im_host, im_device, infoHeader.imgsize * sizeof(unsigned char), cudaMemcpyDeviceToHost);
//STOP RECORD!!
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
//save image
SaveBMP("sortida.bmp", &infoHeader, im_host);
DisplayInfo("sortida.bmp", &infoHeader);
int bytes_read_written = 2 * infoHeader.imgsize* sizeof(unsigned char) + //leer imagen y copiarla
N_iterations * ( //en cada iteracion se hace:
sizeof (int) * 2 * N_colors + //leer y modificar counts
sizeof (Color) * N_colors + //leer y modificar medias
Size * 2 * sizeof(int) + //leer y modificar las asignaciones
Size * 3 * sizeof (unsigned char) //leer datos de imagen
);
printf("\Quantization CUDA\n");
printf("Image Size: %d\n", Size);
printf("nThreads: %d\n", nThreads);
printf("nBlocks: %d\n", nBlocks);
printf("Tiempo Total Version 5 = %4.6f ms\n", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//alliberar memoria HOST:
free(im_host);
free(means_host);
//alliberar memoria DEVICE:
cudaFree(means_device);
cudaFree(new_means);
cudaFree(new_means_2);
cudaFree(assigns);
cudaFree(im_device);
cudaFree(counts);
cudaFree(counts_2);
return 0;
}
|
8174581a86e79b96544960819f1db9f52fef208f.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright 2014 BVLC and contributors.
#include <rocblas.h>
#include <vector>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
Dtype ReshapeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
Forward_cpu(bottom, top);
return Dtype(0);
}
template <typename Dtype>
void ReshapeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down,
vector<Blob<Dtype>*>* bottom) {
Backward_cpu(top, propagate_down, bottom);
}
INSTANTIATE_CLASS(ReshapeLayer);
} // namespace caffe
| 8174581a86e79b96544960819f1db9f52fef208f.cu | // Copyright 2014 BVLC and contributors.
#include <cublas_v2.h>
#include <vector>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
Dtype ReshapeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
Forward_cpu(bottom, top);
return Dtype(0);
}
template <typename Dtype>
void ReshapeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down,
vector<Blob<Dtype>*>* bottom) {
Backward_cpu(top, propagate_down, bottom);
}
INSTANTIATE_CLASS(ReshapeLayer);
} // namespace caffe
|
a98053a784a4df7bf792ec5d46b961a2d800a018.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdbool.h>
#include <hip/hip_runtime.h>
#include "cudahsh.h"
#define UINT4 uint
#define MD5_INPUT_LENGTH 512
/* F, G and H are basic MD5 functions: selection, majority, parity */
#define F(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define G(x, y, z) (((x) & (z)) | ((y) & (~z)))
#define H(x, y, z) ((x) ^ (y) ^ (z))
#define I(x, y, z) ((y) ^ ((x) | (~z)))
/* ROTATE_LEFT rotates x left n bits */
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4 */
/* Rotation is separate from addition to prevent recomputation */
#define FF(a, b, c, d, x, s, ac) \
{(a) += F ((b), (c), (d)) + (x) + (UINT4)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define GG(a, b, c, d, x, s, ac) \
{(a) += G ((b), (c), (d)) + (x) + (UINT4)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define HH(a, b, c, d, x, s, ac) \
{(a) += H ((b), (c), (d)) + (x) + (UINT4)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define II(a, b, c, d, x, s, ac) \
{(a) += I ((b), (c), (d)) + (x) + (UINT4)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#ifdef __cplusplus
extern "C"
#endif
char *md5_unpad(char *input) {
static char md5_unpadded[MD5_INPUT_LENGTH];
unsigned int orig_length;
if (input == NULL) {
return NULL;
}
memset(md5_unpadded, 0, sizeof(md5_unpadded));
orig_length = (*((unsigned int *)input + 14) / 8);
strncpy(md5_unpadded, input, orig_length);
return md5_unpadded;
}
#ifdef __cplusplus
extern "C"
#endif
char *md5_pad(char *input) {
static char md5_padded[MD5_INPUT_LENGTH];
int x;
unsigned int orig_input_length;
if (input == NULL) {
return NULL;
}
// we store the length of the input (in bits) for later
orig_input_length = strlen(input) * 8;
// we would like to split the MD5 into 512 bit chunks with a special ending
// the maximum input we support is currently 512 bits as we are not expecting a
// string password to be larger than this
memset(md5_padded, 0, MD5_INPUT_LENGTH);
for(x = 0; x < strlen(input) && x < 56; x++) {
md5_padded[x] = input[x];
}
md5_padded[x] = 0x80;
// now we need to append the length in bits of the original message
*((unsigned long *)md5_padded + 14) = orig_input_length;
return md5_padded;
}
__device__ void md5(uint *in, uint *hash) {
uint a, b, c, d;
const uint a0 = 0x67452301;
const uint b0 = 0xEFCDAB89;
const uint c0 = 0x98BADCFE;
const uint d0 = 0x10325476;
a = a0;
b = b0;
c = c0;
d = d0;
/* Round 1 */
#define S11 7
#define S12 12
#define S13 17
#define S14 22
FF ( a, b, c, d, in[ 0], S11, 3614090360); /* 1 */
FF ( d, a, b, c, in[ 1], S12, 3905402710); /* 2 */
FF ( c, d, a, b, in[ 2], S13, 606105819); /* 3 */
FF ( b, c, d, a, in[ 3], S14, 3250441966); /* 4 */
FF ( a, b, c, d, in[ 4], S11, 4118548399); /* 5 */
FF ( d, a, b, c, in[ 5], S12, 1200080426); /* 6 */
FF ( c, d, a, b, in[ 6], S13, 2821735955); /* 7 */
FF ( b, c, d, a, in[ 7], S14, 4249261313); /* 8 */
FF ( a, b, c, d, in[ 8], S11, 1770035416); /* 9 */
FF ( d, a, b, c, in[ 9], S12, 2336552879); /* 10 */
FF ( c, d, a, b, in[10], S13, 4294925233); /* 11 */
FF ( b, c, d, a, in[11], S14, 2304563134); /* 12 */
FF ( a, b, c, d, in[12], S11, 1804603682); /* 13 */
FF ( d, a, b, c, in[13], S12, 4254626195); /* 14 */
FF ( c, d, a, b, in[14], S13, 2792965006); /* 15 */
FF ( b, c, d, a, in[15], S14, 1236535329); /* 16 */
/* Round 2 */
#define S21 5
#define S22 9
#define S23 14
#define S24 20
GG ( a, b, c, d, in[ 1], S21, 4129170786); /* 17 */
GG ( d, a, b, c, in[ 6], S22, 3225465664); /* 18 */
GG ( c, d, a, b, in[11], S23, 643717713); /* 19 */
GG ( b, c, d, a, in[ 0], S24, 3921069994); /* 20 */
GG ( a, b, c, d, in[ 5], S21, 3593408605); /* 21 */
GG ( d, a, b, c, in[10], S22, 38016083); /* 22 */
GG ( c, d, a, b, in[15], S23, 3634488961); /* 23 */
GG ( b, c, d, a, in[ 4], S24, 3889429448); /* 24 */
GG ( a, b, c, d, in[ 9], S21, 568446438); /* 25 */
GG ( d, a, b, c, in[14], S22, 3275163606); /* 26 */
GG ( c, d, a, b, in[ 3], S23, 4107603335); /* 27 */
GG ( b, c, d, a, in[ 8], S24, 1163531501); /* 28 */
GG ( a, b, c, d, in[13], S21, 2850285829); /* 29 */
GG ( d, a, b, c, in[ 2], S22, 4243563512); /* 30 */
GG ( c, d, a, b, in[ 7], S23, 1735328473); /* 31 */
GG ( b, c, d, a, in[12], S24, 2368359562); /* 32 */
/* Round 3 */
#define S31 4
#define S32 11
#define S33 16
#define S34 23
HH ( a, b, c, d, in[ 5], S31, 4294588738); /* 33 */
HH ( d, a, b, c, in[ 8], S32, 2272392833); /* 34 */
HH ( c, d, a, b, in[11], S33, 1839030562); /* 35 */
HH ( b, c, d, a, in[14], S34, 4259657740); /* 36 */
HH ( a, b, c, d, in[ 1], S31, 2763975236); /* 37 */
HH ( d, a, b, c, in[ 4], S32, 1272893353); /* 38 */
HH ( c, d, a, b, in[ 7], S33, 4139469664); /* 39 */
HH ( b, c, d, a, in[10], S34, 3200236656); /* 40 */
HH ( a, b, c, d, in[13], S31, 681279174); /* 41 */
HH ( d, a, b, c, in[ 0], S32, 3936430074); /* 42 */
HH ( c, d, a, b, in[ 3], S33, 3572445317); /* 43 */
HH ( b, c, d, a, in[ 6], S34, 76029189); /* 44 */
HH ( a, b, c, d, in[ 9], S31, 3654602809); /* 45 */
HH ( d, a, b, c, in[12], S32, 3873151461); /* 46 */
HH ( c, d, a, b, in[15], S33, 530742520); /* 47 */
HH ( b, c, d, a, in[ 2], S34, 3299628645); /* 48 */
/* Round 4 */
#define S41 6
#define S42 10
#define S43 15
#define S44 21
II ( a, b, c, d, in[ 0], S41, 4096336452); /* 49 */
II ( d, a, b, c, in[ 7], S42, 1126891415); /* 50 */
II ( c, d, a, b, in[14], S43, 2878612391); /* 51 */
II ( b, c, d, a, in[ 5], S44, 4237533241); /* 52 */
II ( a, b, c, d, in[12], S41, 1700485571); /* 53 */
II ( d, a, b, c, in[ 3], S42, 2399980690); /* 54 */
II ( c, d, a, b, in[10], S43, 4293915773); /* 55 */
II ( b, c, d, a, in[ 1], S44, 2240044497); /* 56 */
II ( a, b, c, d, in[ 8], S41, 1873313359); /* 57 */
II ( d, a, b, c, in[15], S42, 4264355552); /* 58 */
II ( c, d, a, b, in[ 6], S43, 2734768916); /* 59 */
II ( b, c, d, a, in[13], S44, 1309151649); /* 60 */
II ( a, b, c, d, in[ 4], S41, 4149444226); /* 61 */
II ( d, a, b, c, in[11], S42, 3174756917); /* 62 */
II ( c, d, a, b, in[ 2], S43, 718787259); /* 63 */
II ( b, c, d, a, in[ 9], S44, 3951481745); /* 64 */
a += a0;
b += b0;
c += c0;
d += d0;
hash[0] = a;
hash[1] = b;
hash[2] = c;
hash[3] = d;
return;
}
__global__ void md5_cuda_calculate(char* hash, char *keyArr, char* key, bool* hash_found) {
unsigned int id;
uint target_hash[4];
uint calculatedHash[4];
memcpy(&target_hash[0], hash, HASH_LENGTH);
id = (blockIdx.x * blockDim.x) + threadIdx.x; // get our thread unique ID in this run
md5((uint*)&keyArr[id], &calculatedHash[0]); // actually calculate the MD5 hash
if (calculatedHash[0] == target_hash[0] && calculatedHash[1] == target_hash[1] && calculatedHash[2] == target_hash[2] && calculatedHash[3] == target_hash[3]) {
// !! WE HAVE A MATCH !!
*hash_found = 1;
memcpy(key, &keyArr[id], (KEY_LENGTH + 2) * sizeof(char));
}
}
extern "C" void md5_calculate(int blocks, int threads, char* hash, char* keyArr, char* key, bool* hash_found) {
#ifdef GPU_BENCHMARK
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipDeviceSynchronize();
#endif
hipLaunchKernelGGL(( md5_cuda_calculate) , dim3(blocks), dim3(threads) , 0, 0, hash, keyArr, key, hash_found);
#ifdef GPU_BENCHMARK
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("CUDA kernel took %fms to calculate %d x %d (%d) hashes\n", time, device->max_blocks, device->max_threads, device->max_blocks * device->max_threads);
// print GPU stats here
#endif
}
| a98053a784a4df7bf792ec5d46b961a2d800a018.cu | #include <stdio.h>
#include <stdbool.h>
#include <cuda.h>
#include "cudahsh.h"
#define UINT4 uint
#define MD5_INPUT_LENGTH 512
/* F, G and H are basic MD5 functions: selection, majority, parity */
#define F(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define G(x, y, z) (((x) & (z)) | ((y) & (~z)))
#define H(x, y, z) ((x) ^ (y) ^ (z))
#define I(x, y, z) ((y) ^ ((x) | (~z)))
/* ROTATE_LEFT rotates x left n bits */
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4 */
/* Rotation is separate from addition to prevent recomputation */
#define FF(a, b, c, d, x, s, ac) \
{(a) += F ((b), (c), (d)) + (x) + (UINT4)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define GG(a, b, c, d, x, s, ac) \
{(a) += G ((b), (c), (d)) + (x) + (UINT4)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define HH(a, b, c, d, x, s, ac) \
{(a) += H ((b), (c), (d)) + (x) + (UINT4)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define II(a, b, c, d, x, s, ac) \
{(a) += I ((b), (c), (d)) + (x) + (UINT4)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#ifdef __cplusplus
extern "C"
#endif
char *md5_unpad(char *input) {
static char md5_unpadded[MD5_INPUT_LENGTH];
unsigned int orig_length;
if (input == NULL) {
return NULL;
}
memset(md5_unpadded, 0, sizeof(md5_unpadded));
orig_length = (*((unsigned int *)input + 14) / 8);
strncpy(md5_unpadded, input, orig_length);
return md5_unpadded;
}
#ifdef __cplusplus
extern "C"
#endif
char *md5_pad(char *input) {
static char md5_padded[MD5_INPUT_LENGTH];
int x;
unsigned int orig_input_length;
if (input == NULL) {
return NULL;
}
// we store the length of the input (in bits) for later
orig_input_length = strlen(input) * 8;
// we would like to split the MD5 into 512 bit chunks with a special ending
// the maximum input we support is currently 512 bits as we are not expecting a
// string password to be larger than this
memset(md5_padded, 0, MD5_INPUT_LENGTH);
for(x = 0; x < strlen(input) && x < 56; x++) {
md5_padded[x] = input[x];
}
md5_padded[x] = 0x80;
// now we need to append the length in bits of the original message
*((unsigned long *)md5_padded + 14) = orig_input_length;
return md5_padded;
}
__device__ void md5(uint *in, uint *hash) {
uint a, b, c, d;
const uint a0 = 0x67452301;
const uint b0 = 0xEFCDAB89;
const uint c0 = 0x98BADCFE;
const uint d0 = 0x10325476;
a = a0;
b = b0;
c = c0;
d = d0;
/* Round 1 */
#define S11 7
#define S12 12
#define S13 17
#define S14 22
FF ( a, b, c, d, in[ 0], S11, 3614090360); /* 1 */
FF ( d, a, b, c, in[ 1], S12, 3905402710); /* 2 */
FF ( c, d, a, b, in[ 2], S13, 606105819); /* 3 */
FF ( b, c, d, a, in[ 3], S14, 3250441966); /* 4 */
FF ( a, b, c, d, in[ 4], S11, 4118548399); /* 5 */
FF ( d, a, b, c, in[ 5], S12, 1200080426); /* 6 */
FF ( c, d, a, b, in[ 6], S13, 2821735955); /* 7 */
FF ( b, c, d, a, in[ 7], S14, 4249261313); /* 8 */
FF ( a, b, c, d, in[ 8], S11, 1770035416); /* 9 */
FF ( d, a, b, c, in[ 9], S12, 2336552879); /* 10 */
FF ( c, d, a, b, in[10], S13, 4294925233); /* 11 */
FF ( b, c, d, a, in[11], S14, 2304563134); /* 12 */
FF ( a, b, c, d, in[12], S11, 1804603682); /* 13 */
FF ( d, a, b, c, in[13], S12, 4254626195); /* 14 */
FF ( c, d, a, b, in[14], S13, 2792965006); /* 15 */
FF ( b, c, d, a, in[15], S14, 1236535329); /* 16 */
/* Round 2 */
#define S21 5
#define S22 9
#define S23 14
#define S24 20
GG ( a, b, c, d, in[ 1], S21, 4129170786); /* 17 */
GG ( d, a, b, c, in[ 6], S22, 3225465664); /* 18 */
GG ( c, d, a, b, in[11], S23, 643717713); /* 19 */
GG ( b, c, d, a, in[ 0], S24, 3921069994); /* 20 */
GG ( a, b, c, d, in[ 5], S21, 3593408605); /* 21 */
GG ( d, a, b, c, in[10], S22, 38016083); /* 22 */
GG ( c, d, a, b, in[15], S23, 3634488961); /* 23 */
GG ( b, c, d, a, in[ 4], S24, 3889429448); /* 24 */
GG ( a, b, c, d, in[ 9], S21, 568446438); /* 25 */
GG ( d, a, b, c, in[14], S22, 3275163606); /* 26 */
GG ( c, d, a, b, in[ 3], S23, 4107603335); /* 27 */
GG ( b, c, d, a, in[ 8], S24, 1163531501); /* 28 */
GG ( a, b, c, d, in[13], S21, 2850285829); /* 29 */
GG ( d, a, b, c, in[ 2], S22, 4243563512); /* 30 */
GG ( c, d, a, b, in[ 7], S23, 1735328473); /* 31 */
GG ( b, c, d, a, in[12], S24, 2368359562); /* 32 */
/* Round 3 */
#define S31 4
#define S32 11
#define S33 16
#define S34 23
HH ( a, b, c, d, in[ 5], S31, 4294588738); /* 33 */
HH ( d, a, b, c, in[ 8], S32, 2272392833); /* 34 */
HH ( c, d, a, b, in[11], S33, 1839030562); /* 35 */
HH ( b, c, d, a, in[14], S34, 4259657740); /* 36 */
HH ( a, b, c, d, in[ 1], S31, 2763975236); /* 37 */
HH ( d, a, b, c, in[ 4], S32, 1272893353); /* 38 */
HH ( c, d, a, b, in[ 7], S33, 4139469664); /* 39 */
HH ( b, c, d, a, in[10], S34, 3200236656); /* 40 */
HH ( a, b, c, d, in[13], S31, 681279174); /* 41 */
HH ( d, a, b, c, in[ 0], S32, 3936430074); /* 42 */
HH ( c, d, a, b, in[ 3], S33, 3572445317); /* 43 */
HH ( b, c, d, a, in[ 6], S34, 76029189); /* 44 */
HH ( a, b, c, d, in[ 9], S31, 3654602809); /* 45 */
HH ( d, a, b, c, in[12], S32, 3873151461); /* 46 */
HH ( c, d, a, b, in[15], S33, 530742520); /* 47 */
HH ( b, c, d, a, in[ 2], S34, 3299628645); /* 48 */
/* Round 4 */
#define S41 6
#define S42 10
#define S43 15
#define S44 21
II ( a, b, c, d, in[ 0], S41, 4096336452); /* 49 */
II ( d, a, b, c, in[ 7], S42, 1126891415); /* 50 */
II ( c, d, a, b, in[14], S43, 2878612391); /* 51 */
II ( b, c, d, a, in[ 5], S44, 4237533241); /* 52 */
II ( a, b, c, d, in[12], S41, 1700485571); /* 53 */
II ( d, a, b, c, in[ 3], S42, 2399980690); /* 54 */
II ( c, d, a, b, in[10], S43, 4293915773); /* 55 */
II ( b, c, d, a, in[ 1], S44, 2240044497); /* 56 */
II ( a, b, c, d, in[ 8], S41, 1873313359); /* 57 */
II ( d, a, b, c, in[15], S42, 4264355552); /* 58 */
II ( c, d, a, b, in[ 6], S43, 2734768916); /* 59 */
II ( b, c, d, a, in[13], S44, 1309151649); /* 60 */
II ( a, b, c, d, in[ 4], S41, 4149444226); /* 61 */
II ( d, a, b, c, in[11], S42, 3174756917); /* 62 */
II ( c, d, a, b, in[ 2], S43, 718787259); /* 63 */
II ( b, c, d, a, in[ 9], S44, 3951481745); /* 64 */
a += a0;
b += b0;
c += c0;
d += d0;
hash[0] = a;
hash[1] = b;
hash[2] = c;
hash[3] = d;
return;
}
__global__ void md5_cuda_calculate(char* hash, char *keyArr, char* key, bool* hash_found) {
unsigned int id;
uint target_hash[4];
uint calculatedHash[4];
memcpy(&target_hash[0], hash, HASH_LENGTH);
id = (blockIdx.x * blockDim.x) + threadIdx.x; // get our thread unique ID in this run
md5((uint*)&keyArr[id], &calculatedHash[0]); // actually calculate the MD5 hash
if (calculatedHash[0] == target_hash[0] && calculatedHash[1] == target_hash[1] && calculatedHash[2] == target_hash[2] && calculatedHash[3] == target_hash[3]) {
// !! WE HAVE A MATCH !!
*hash_found = 1;
memcpy(key, &keyArr[id], (KEY_LENGTH + 2) * sizeof(char));
}
}
extern "C" void md5_calculate(int blocks, int threads, char* hash, char* keyArr, char* key, bool* hash_found) {
#ifdef GPU_BENCHMARK
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaThreadSynchronize();
#endif
md5_cuda_calculate <<< blocks, threads >>> (hash, keyArr, key, hash_found);
#ifdef GPU_BENCHMARK
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("CUDA kernel took %fms to calculate %d x %d (%d) hashes\n", time, device->max_blocks, device->max_threads, device->max_blocks * device->max_threads);
// print GPU stats here
#endif
}
|
ChFluidDynamics.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Milad Rakhsha, Arman Pazouki, Wei Hu
// =============================================================================
//
// Class for performing time integration in fluid system.
// =============================================================================
#include "chrono_fsi/physics/ChFluidDynamics.cuh"
#include "chrono_fsi/physics/ChSphGeneral.cuh"
using std::cout;
using std::endl;
namespace chrono {
namespace fsi {
// -----------------------------------------------------------------------------
// Device function to calculate the share of density influence on a given
// particle from all other particle in a given cell
__device__ void collideCellDensityReInit(Real& numerator,
Real& denominator,
int3 gridPos,
uint index,
Real3 posRadA,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
uint* cellStart,
uint* cellEnd) {
uint gridHash = calcGridHash(gridPos);
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
// iterate over particles in this cell
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real4 rhoPreMuB = sortedRhoPreMu[j];
Real3 dist3 = Distance(posRadA, posRadB);
Real d = length(dist3);
if (d > RESOLUTION_LENGTH_MULT * paramsD.HSML)
continue;
numerator += paramsD.markerMass * W3h(d, sortedPosRad[j].w);
denominator += paramsD.markerMass / rhoPreMuB.x * W3h(d, sortedPosRad[j].w);
}
}
}
// -----------------------------------------------------------------------------
// Kernel to apply periodic BC along x
__global__ void ApplyPeriodicBoundaryXKernel(Real4* posRadD,
Real4* rhoPresMuD,
uint* activityIdentifierD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
uint activity = activityIdentifierD[index];
if (activity == 0)
return; // no need to do anything if it is not an active particle
Real4 rhoPresMu = rhoPresMuD[index];
if (fabs(rhoPresMu.w) < .1)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real h = posRadD[index].w;
if (posRad.x > paramsD.cMax.x) {
posRad.x -= (paramsD.cMax.x - paramsD.cMin.x);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1)
rhoPresMuD[index].y += paramsD.deltaPress.x;
return;
}
if (posRad.x < paramsD.cMin.x) {
posRad.x += (paramsD.cMax.x - paramsD.cMin.x);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1)
rhoPresMuD[index].y -= paramsD.deltaPress.x;
return;
}
}
// -----------------------------------------------------------------------------
// Kernel to apply inlet/outlet BC along x
__global__ void ApplyInletBoundaryXKernel(Real4* posRadD,
Real3* VelMassD,
Real4* rhoPresMuD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
Real4 rhoPresMu = rhoPresMuD[index];
if (rhoPresMu.w > 0.0)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real h = posRadD[index].w;
if (posRad.x > paramsD.cMax.x) {
posRad.x -= (paramsD.cMax.x - paramsD.cMin.x);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w <= 0.0) {
rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.x;
rhoPresMuD[index] = rhoPresMu;
}
}
if (posRad.x < paramsD.cMin.x) {
posRad.x += (paramsD.cMax.x - paramsD.cMin.x);
posRadD[index] = mR4(posRad, h);
VelMassD[index] = mR3(paramsD.V_in.x, 0, 0);
if (rhoPresMu.w <= -.1) {
rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.x;
rhoPresMuD[index] = rhoPresMu;
}
}
if (posRad.x > -paramsD.x_in)
rhoPresMuD[index].y = 0;
if (posRad.x < paramsD.x_in)
VelMassD[index] = mR3(paramsD.V_in.x, 0, 0);
}
// -----------------------------------------------------------------------------
// Kernel to apply periodic BC along y
__global__ void ApplyPeriodicBoundaryYKernel(Real4* posRadD,
Real4* rhoPresMuD,
uint* activityIdentifierD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
uint activity = activityIdentifierD[index];
if (activity == 0)
return; // no need to do anything if it is not an active particle
Real4 rhoPresMu = rhoPresMuD[index];
if (fabs(rhoPresMu.w) < .1)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real h = posRadD[index].w;
if (posRad.y > paramsD.cMax.y) {
posRad.y -= (paramsD.cMax.y - paramsD.cMin.y);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1) {
rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.y;
rhoPresMuD[index] = rhoPresMu;
}
return;
}
if (posRad.y < paramsD.cMin.y) {
posRad.y += (paramsD.cMax.y - paramsD.cMin.y);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1) {
rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.y;
rhoPresMuD[index] = rhoPresMu;
}
return;
}
}
// -----------------------------------------------------------------------------
// Kernel to apply periodic BC along z
__global__ void ApplyPeriodicBoundaryZKernel(Real4* posRadD,
Real4* rhoPresMuD,
uint* activityIdentifierD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
uint activity = activityIdentifierD[index];
if (activity == 0)
return; // no need to do anything if it is not an active particle
Real4 rhoPresMu = rhoPresMuD[index];
if (fabs(rhoPresMu.w) < .1)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real h = posRadD[index].w;
if (posRad.z > paramsD.cMax.z) {
posRad.z -= (paramsD.cMax.z - paramsD.cMin.z);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1) {
rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.z;
rhoPresMuD[index] = rhoPresMu;
}
return;
}
if (posRad.z < paramsD.cMin.z) {
posRad.z += (paramsD.cMax.z - paramsD.cMin.z);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1) {
rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.z;
rhoPresMuD[index] = rhoPresMu;
}
return;
}
}
// -----------------------------------------------------------------------------
// Kernel to keep particle inside the simulation domain
__global__ void ApplyOutOfBoundaryKernel(Real4* posRadD,
Real4* rhoPresMuD,
Real3* velMasD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
Real4 rhoPresMu = rhoPresMuD[index];
if (fabs(rhoPresMu.w) < .1)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real3 vel = mR3(velMasD[index]);
Real h = posRadD[index].w;
if (posRad.x > 0.5 * paramsD.boxDimX)
posRad.x = 0.5 * paramsD.boxDimX;
if (posRad.x < -0.5 * paramsD.boxDimX)
posRad.x = -0.5 * paramsD.boxDimX;
if (posRad.y > 0.5 * paramsD.boxDimY)
posRad.y = 0.5 * paramsD.boxDimY;
if (posRad.y < -0.5 * paramsD.boxDimY)
posRad.y = -0.5 * paramsD.boxDimY;
if (posRad.z > 1.0 * paramsD.boxDimZ)
posRad.z = 1.0 * paramsD.boxDimZ;
if (posRad.z < -0.0 * paramsD.boxDimZ)
posRad.z = -0.0 * paramsD.boxDimZ;
posRadD[index] = mR4(posRad, h);
velMasD[index] = mR3(vel);
return;
}
// -----------------------------------------------------------------------------
// Kernel to update the fluid properities. It updates the stress tensor,
// density, velocity and position relying on explicit Euler scheme.
// Pressure is obtained from the density and an Equation of State.
__global__ void UpdateFluidD(Real4* posRadD,
Real3* velMasD,
Real4* rhoPresMuD,
Real3* tauXxYyZzD,
Real3* tauXyXzYzD,
Real3* vel_XSPH_D,
Real4* derivVelRhoD,
Real3* derivTauXxYyZzD,
Real3* derivTauXyXzYzD,
Real4* sr_tau_I_mu_iD,
uint* activityIdentifierD,
uint* freeSurfaceIdD,
int2 updatePortion,
Real dT,
volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
index += updatePortion.x;
if (index >= updatePortion.y)
return;
uint activity = activityIdentifierD[index];
if (activity == 0)
return;
Real4 derivVelRho = derivVelRhoD[index];
Real4 rhoPresMu = rhoPresMuD[index];
Real h = posRadD[index].w;
Real p_tr, p_n;
if (rhoPresMu.w < 0) {
// This is only implemented for granular material
if (paramsD.elastic_SPH) {
//--------------------------------
// ** total stress tau
//--------------------------------
Real3 tauXxYyZz = tauXxYyZzD[index];
Real3 tauXyXzYz = tauXyXzYzD[index];
Real3 derivTauXxYyZz = derivTauXxYyZzD[index];
Real3 derivTauXyXzYz = derivTauXyXzYzD[index];
Real3 updatedTauXxYyZz = tauXxYyZz + mR3(derivTauXxYyZz) * dT;
Real3 updatedTauXyXzYz = tauXyXzYz + mR3(derivTauXyXzYz) * dT;
// check if there is a plastic flow
p_n = -1.0 / 3.0 * (tauXxYyZz.x + tauXxYyZz.y + tauXxYyZz.z);
tauXxYyZz.x += p_n;
tauXxYyZz.y += p_n;
tauXxYyZz.z += p_n;
p_tr = -1.0 / 3.0 * (updatedTauXxYyZz.x + updatedTauXxYyZz.y + updatedTauXxYyZz.z);
updatedTauXxYyZz.x += p_tr;
updatedTauXxYyZz.y += p_tr;
updatedTauXxYyZz.z += p_tr;
Real tau_tr = square(updatedTauXxYyZz.x) + square(updatedTauXxYyZz.y) +
square(updatedTauXxYyZz.z) + 2.0 * square(updatedTauXyXzYz.x) +
2.0 * square(updatedTauXyXzYz.y) + 2.0 * square(updatedTauXyXzYz.z);
Real tau_n = square(tauXxYyZz.x) + square(tauXxYyZz.y) + square(tauXxYyZz.z) +
2.0 * square(tauXyXzYz.x) + 2.0 * square(tauXyXzYz.y) + 2.0 * square(tauXyXzYz.z);
tau_tr = sqrt(0.5 * tau_tr);
tau_n = sqrt(0.5 * tau_n);
Real Chi = abs(tau_tr - tau_n) * paramsD.INV_G_shear / dT;
// should use the positive magnitude according to "A
// constitutive law for dense granular flows" Nature 2006
Real mu_s = paramsD.mu_fric_s;
Real mu_2 = paramsD.mu_fric_2;
// Real s_0 = mu_s * p_tr;
// Real s_2 = mu_2 * p_tr;
// Real xi = 1.1;
Real dia = paramsD.ave_diam;
Real I0 = paramsD.mu_I0; // xi*dia*sqrt(rhoPresMu.x);//
Real I = Chi * dia * sqrt( paramsD.rho0 / ( p_tr + 1.0e9 ) );
Real coh = paramsD.Coh_coeff;
// Real Chi_cri = 0.1;
// if (Chi < Chi_cri){
// coh = paramsD.Coh_coeff * (1.0 - sin(-1.57 + 3.14 * (Chi / Chi_cri))) / 2.0;
// // coh = paramsD.Coh_coeff * (1.0 - I / I_cri);
// } else {
// coh = 0.0;
// }
Real inv_mus = 1.0 / paramsD.mu_fric_s;
Real p_cri = - coh * inv_mus;
if (p_tr > p_cri) {
Real mu = mu_s + (mu_2 - mu_s) * (I + 1.0e-9) / (I0 + I + 1.0e-9);
// Real G0 = paramsD.G_shear;
// Real alpha = xi*G0*I0*(dT)*sqrt(p_tr);
// Real B0 = s_2 + tau_tr + alpha;
// Real H0 = s_2*tau_tr + s_0*alpha;
// Real tau_n1 = (B0+sqrt(B0*B0-4*H0))/(2*H0+1e-9);
// if(tau_tr>s_0){
// Real coeff = tau_n1/(tau_tr+1e-9);
// updatedTauXxYyZz = updatedTauXxYyZz*coeff;
// updatedTauXyXzYz = updatedTauXyXzYz*coeff;
// }
Real tau_max = p_tr * mu + coh; // p_tr*paramsD.Q_FA;
// should use tau_max instead of s_0 according to
// "A constitutive law for dense granular flows" Nature 2006
if (tau_tr > tau_max) {
Real coeff = tau_max / (tau_tr + 1e-9);
updatedTauXxYyZz = updatedTauXxYyZz * coeff;
updatedTauXyXzYz = updatedTauXyXzYz * coeff;
}
}
// Set stress to zero if the pressure is smaller than the threshold
if (p_tr < p_cri) {
updatedTauXxYyZz = mR3(0.0);
updatedTauXyXzYz = mR3(0.0);
p_tr = 0.0;
// Real coeff = abs(p_cri / (p_tr + 1e-9));
// if (p_tr < 2.0 * p_cri){
// coeff = 0.0;
// } else {
// coeff = abs(1.0 - (p_tr - p_cri) / p_cri);
// }
// updatedTauXxYyZz = updatedTauXxYyZz * coeff;
// updatedTauXyXzYz = updatedTauXyXzYz * coeff;
// p_tr = p_cri * coeff;
}
// Set stress to zero if the particle is close to free surface
if (freeSurfaceIdD[index] == 1) {
updatedTauXxYyZz = mR3(0.0);
updatedTauXyXzYz = mR3(0.0);
p_tr = 0.0;
}
if (paramsD.output_length == 2) {
Real tau_tr = square(updatedTauXxYyZz.x) + square(updatedTauXxYyZz.y) +
square(updatedTauXxYyZz.z) + 2.0 * (square(updatedTauXyXzYz.x) +
square(updatedTauXyXzYz.y) + square(updatedTauXyXzYz.z));
tau_tr = sqrt(0.5 * tau_tr);
sr_tau_I_mu_iD[index].y = tau_tr;
}
tauXxYyZzD[index] = updatedTauXxYyZz - mR3(p_tr);
tauXyXzYzD[index] = updatedTauXyXzYz;
}
//-------------
// ** position
//-------------
Real3 vel_XSPH = velMasD[index] + vel_XSPH_D[index]; // paramsD.EPS_XSPH *
Real3 posRad = mR3(posRadD[index]);
Real3 updatedPositon = posRad + vel_XSPH * dT;
if (!(isfinite(updatedPositon.x) && isfinite(updatedPositon.y) && isfinite(updatedPositon.z))) {
printf("Error! particle position is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel !\n");
*isErrorD = true;
return;
}
posRadD[index] = mR4(updatedPositon, h);
//-------------
// ** velocity
//-------------
// Note that the velocity update should not use the XSPH contribution
// It adds dissipation to the solution, and provides numerical damping
Real3 velMas = velMasD[index] + 0.0 * vel_XSPH_D[index]; // paramsD.EPS_XSPH * vel_XSPH_D[index]
Real3 updatedVelocity = velMas + mR3(derivVelRho) * dT;
velMasD[index] = updatedVelocity;
//-------------
// ** density
//-------------
if (paramsD.elastic_SPH) { // This is only implemented for granular material
rhoPresMu.y = p_tr;
rhoPresMu.x = paramsD.rho0;
} else {
Real rho2 = rhoPresMu.x + derivVelRho.w * dT;
rhoPresMu.y = Eos(rho2, rhoPresMu.w);
rhoPresMu.x = rho2;
}
if (!(isfinite(rhoPresMu.x) && isfinite(rhoPresMu.y) && isfinite(rhoPresMu.z) && isfinite(rhoPresMu.w))) {
printf("Error! particle rho pressure is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel !\n");
*isErrorD = true;
return;
}
rhoPresMuD[index] = rhoPresMu;
}
// Important note: the derivVelRhoD that is calculated by the ChForceExplicitSPH is the negative of actual time
// derivative. That is important to keep the derivVelRhoD to be the force/mass for fsi forces.
// calculate the force that is f=m dv/dt
// derivVelRhoD[index] *= paramsD.markerMass;
}
//------------------------------------------------------------------------------
__global__ void Update_Fluid_State(Real3* new_vel,
Real4* posRad,
Real3* velMas,
Real4* rhoPreMu,
int4 updatePortion,
double dT,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= updatePortion.y)
return;
velMas[i_idx] = new_vel[i_idx];
Real3 newpos = mR3(posRad[i_idx]) + dT * velMas[i_idx];
Real h = posRad[i_idx].w;
posRad[i_idx] = mR4(newpos, h);
if (!(isfinite(posRad[i_idx].x) &&
isfinite(posRad[i_idx].y) && isfinite(posRad[i_idx].z))) {
printf("Error! particle %d position is NAN: thrown from UpdateFluidDKernel %f,%f,%f,%f\n",
i_idx, posRad[i_idx].x, posRad[i_idx].y, posRad[i_idx].z, posRad[i_idx].w);
}
if (!(isfinite(rhoPreMu[i_idx].x) &&
isfinite(rhoPreMu[i_idx].y) && isfinite(rhoPreMu[i_idx].z))) {
printf("Error! particle %d rhoPreMu is NAN: thrown from UpdateFluidDKernel ! %f,%f,%f,%f\n",
i_idx, rhoPreMu[i_idx].x, rhoPreMu[i_idx].y, rhoPreMu[i_idx].z, rhoPreMu[i_idx].w);
}
if (!(isfinite(velMas[i_idx].x) &&
isfinite(velMas[i_idx].y) && isfinite(velMas[i_idx].z))) {
printf("Error! particle %d velocity is NAN: thrown from UpdateFluidDKernel !%f,%f,%f\n",
i_idx, velMas[i_idx].x, velMas[i_idx].y, velMas[i_idx].z);
}
}
// -----------------------------------------------------------------------------
// Kernel for updating the density.
// It calculates the density of the particle. It does include the normalization
// close to the boundaries and free surface.
__global__ void ReCalcDensityD_F1(Real4* dummySortedRhoPreMu,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
uint* gridMarkerIndex,
uint* cellStart,
uint* cellEnd) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
// read particle data from sorted arrays
Real3 posRadA = mR3(sortedPosRad[index]);
Real4 rhoPreMuA = sortedRhoPreMu[index];
// get address in grid
int3 gridPos = calcGridPos(posRadA);
Real numerator = 0.0;
Real denominator = 0.0;
// examine neighbouring cells
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
collideCellDensityReInit(numerator, denominator, neighbourPos, index,
posRadA, sortedPosRad, sortedVelMas, sortedRhoPreMu, cellStart, cellEnd);
}
}
}
rhoPreMuA.x = numerator; // denominator;
// rhoPreMuA.y = Eos(rhoPreMuA.x, rhoPreMuA.w);
dummySortedRhoPreMu[index] = rhoPreMuA;
}
// -----------------------------------------------------------------------------
// Kernel for updating the activity of all particles.
__global__ void UpdateActivityD(Real4* posRadD,
Real3* velMasD,
Real3* posRigidBodiesD,
Real3* pos_fsi_fea_D,
uint* activityIdentifierD,
uint* extendedActivityIdD,
int2 updatePortion,
Real Time,
volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
index += updatePortion.x;
if (index >= updatePortion.y)
return;
// Set the particle as an active particle
activityIdentifierD[index] = 1;
extendedActivityIdD[index] = 1;
// If during the settling phase, all particles are active
if (Time < paramsD.settlingTime)
return;
size_t numRigidBodies = numObjectsD.numRigidBodies;
size_t numFlexNodes = numObjectsD.numFlexNodes;
size_t numTotal = numRigidBodies + numFlexNodes;
// Check the activity of this particle
uint isNotActive = 0;
uint isNotExtended = 0;
Real3 Acdomain = paramsD.bodyActiveDomain;
Real3 ExAcdomain = paramsD.bodyActiveDomain +
mR3(2 * RESOLUTION_LENGTH_MULT * paramsD.HSML);
Real3 posRadA = mR3(posRadD[index]);
for (uint num = 0; num < numRigidBodies; num++) {
Real3 detPos = posRadA - posRigidBodiesD[num];
if (abs(detPos.x) > Acdomain.x || abs(detPos.y) > Acdomain.y ||
abs(detPos.z) > Acdomain.z)
isNotActive = isNotActive + 1;
if (abs(detPos.x) > ExAcdomain.x || abs(detPos.y) > ExAcdomain.y ||
abs(detPos.z) > ExAcdomain.z)
isNotExtended = isNotExtended + 1;
}
for (uint num = 0; num < numFlexNodes; num++) {
Real3 detPos = posRadA - pos_fsi_fea_D[num];
if (abs(detPos.x) > Acdomain.x || abs(detPos.y) > Acdomain.y ||
abs(detPos.z) > Acdomain.z)
isNotActive = isNotActive + 1;
if (abs(detPos.x) > ExAcdomain.x || abs(detPos.y) > ExAcdomain.y ||
abs(detPos.z) > ExAcdomain.z)
isNotExtended = isNotExtended + 1;
}
// Set the particle as an inactive particle if needed
if (isNotActive == numTotal && numTotal > 0) {
activityIdentifierD[index] = 0;
velMasD[index] = mR3(0.0);
}
if (isNotExtended == numTotal && numTotal > 0)
extendedActivityIdD[index] = 0;
return;
}
// -----------------------------------------------------------------------------
// CLASS FOR FLUID DYNAMICS SYSTEM
// -----------------------------------------------------------------------------
ChFluidDynamics::ChFluidDynamics(std::shared_ptr<ChBce> otherBceWorker,
ChSystemFsi_impl& otherFsiSystem,
std::shared_ptr<SimParams> otherParamsH,
std::shared_ptr<ChCounters> otherNumObjects,
TimeIntegrator type,
bool verb)
: fsiSystem(otherFsiSystem),
paramsH(otherParamsH),
numObjectsH(otherNumObjects),
integrator_type(type),
verbose(verb) {
switch (integrator_type) {
case TimeIntegrator::I2SPH:
forceSystem = chrono_types::make_shared<ChFsiForceI2SPH>(
otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD,
fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb);
if (verbose) {
cout << "============================================" << endl;
cout << "====== Created an I2SPH framework ======" << endl;
cout << "============================================" << endl;
}
break;
case TimeIntegrator::IISPH:
forceSystem = chrono_types::make_shared<ChFsiForceIISPH>(
otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD,
fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb);
if (verbose) {
cout << "============================================" << endl;
cout << "====== Created an IISPH framework ======" << endl;
cout << "============================================" << endl;
}
break;
case TimeIntegrator::EXPLICITSPH:
forceSystem = chrono_types::make_shared<ChFsiForceExplicitSPH>(
otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD,
fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb);
if (verbose) {
cout << "============================================" << endl;
cout << "====== Created a WCSPH framework =======" << endl;
cout << "============================================" << endl;
}
break;
// Extend this function with your own linear solvers
default:
forceSystem = chrono_types::make_shared<ChFsiForceExplicitSPH>(
otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD,
fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb);
cout << "Selected integrator type not implemented, reverting back to WCSPH" << endl;
}
}
// -----------------------------------------------------------------------------
ChFluidDynamics::~ChFluidDynamics() {}
// -----------------------------------------------------------------------------
void ChFluidDynamics::Initialize() {
forceSystem->Initialize();
hipMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams));
hipMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(ChCounters));
hipMemcpyFromSymbol(paramsH.get(), paramsD, sizeof(SimParams));
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::IntegrateSPH(std::shared_ptr<SphMarkerDataD> sphMarkersD2,
std::shared_ptr<SphMarkerDataD> sphMarkersD1,
std::shared_ptr<FsiBodiesDataD> fsiBodiesD,
std::shared_ptr<FsiMeshDataD> fsiMeshD,
Real dT,
Real Time) {
if (GetIntegratorType() == TimeIntegrator::EXPLICITSPH) {
this->UpdateActivity(sphMarkersD1, sphMarkersD2, fsiBodiesD, fsiMeshD, Time);
forceSystem->ForceSPH(sphMarkersD2, fsiBodiesD, fsiMeshD);
} else
forceSystem->ForceSPH(sphMarkersD1, fsiBodiesD, fsiMeshD);
if (integrator_type == TimeIntegrator::IISPH)
this->UpdateFluid_Implicit(sphMarkersD2);
else if (GetIntegratorType() == TimeIntegrator::EXPLICITSPH)
this->UpdateFluid(sphMarkersD1, dT);
this->ApplyBoundarySPH_Markers(sphMarkersD2);
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::UpdateActivity(std::shared_ptr<SphMarkerDataD> sphMarkersD1,
std::shared_ptr<SphMarkerDataD> sphMarkersD2,
std::shared_ptr<FsiBodiesDataD> fsiBodiesD,
std::shared_ptr<FsiMeshDataD> fsiMeshD,
Real Time) {
// Update portion of the SPH particles (should be all particles here)
int2 updatePortion = mI2(0, (int)numObjectsH->numAllMarkers);
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
hipMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
//------------------------
uint numBlocks, numThreads;
computeGridSize(updatePortion.y - updatePortion.x, 256, numBlocks, numThreads);
hipLaunchKernelGGL(( UpdateActivityD), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sphMarkersD2->posRadD), mR3CAST(sphMarkersD1->velMasD),
mR3CAST(fsiBodiesD->posRigid_fsiBodies_D),
mR3CAST(fsiMeshD->pos_fsi_fea_D),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD),
U1CAST(fsiSystem.fsiGeneralData->extendedActivityIdD),
updatePortion, Time, isErrorD);
hipDeviceSynchronize();
cudaCheckError();
//------------------------
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true)
throw std::runtime_error("Error! program crashed in UpdateActivityD!\n");
hipFree(isErrorD);
free(isErrorH);
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::UpdateFluid(std::shared_ptr<SphMarkerDataD> sphMarkersD, Real dT) {
// Update portion of the SPH particles (should be fluid particles only here)
int2 updatePortion = mI2(0, fsiSystem.fsiGeneralData->referenceArray[0].y);
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
hipMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
//------------------------
uint numBlocks, numThreads;
computeGridSize(updatePortion.y - updatePortion.x, 256, numBlocks, numThreads);
hipLaunchKernelGGL(( UpdateFluidD), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sphMarkersD->posRadD),
mR3CAST(sphMarkersD->velMasD),
mR4CAST(sphMarkersD->rhoPresMuD),
mR3CAST(sphMarkersD->tauXxYyZzD),
mR3CAST(sphMarkersD->tauXyXzYzD),
mR3CAST(fsiSystem.fsiGeneralData->vel_XSPH_D),
mR4CAST(fsiSystem.fsiGeneralData->derivVelRhoD),
mR3CAST(fsiSystem.fsiGeneralData->derivTauXxYyZzD),
mR3CAST(fsiSystem.fsiGeneralData->derivTauXyXzYzD),
mR4CAST(fsiSystem.fsiGeneralData->sr_tau_I_mu_i),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD),
U1CAST(fsiSystem.fsiGeneralData->freeSurfaceIdD),
updatePortion, dT, isErrorD);
hipDeviceSynchronize();
cudaCheckError();
//------------------------
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true)
throw std::runtime_error("Error! program crashed in UpdateFluidD!\n");
hipFree(isErrorD);
free(isErrorH);
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::UpdateFluid_Implicit(std::shared_ptr<SphMarkerDataD> sphMarkersD) {
uint numThreads, numBlocks;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
int haveGhost = (numObjectsH->numGhostMarkers > 0) ? 1 : 0;
int haveHelper = (numObjectsH->numHelperMarkers > 0) ? 1 : 0;
int4 updatePortion = mI4(fsiSystem.fsiGeneralData->referenceArray[haveHelper].x,
fsiSystem.fsiGeneralData->referenceArray[haveHelper + haveGhost].y, 0, 0);
cout << "time step in UpdateFluid_Implicit " << paramsH->dT << endl;
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
hipMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Update_Fluid_State), dim3(numBlocks), dim3(numThreads), 0, 0,
mR3CAST(fsiSystem.fsiGeneralData->vel_XSPH_D),
mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD),
mR4CAST(sphMarkersD->rhoPresMuD), updatePortion, paramsH->dT, isErrorD);
hipDeviceSynchronize();
cudaCheckError();
hipMemcpy(isErrorH, isErrorD, sizeof(bool), hipMemcpyDeviceToHost);
if (*isErrorH == true)
throw std::runtime_error("Error! program crashed in Update_Fluid_State!\n");
hipFree(isErrorD);
free(isErrorH);
}
// -----------------------------------------------------------------------------
// Apply periodic boundary conditions in x, y, and z directions
void ChFluidDynamics::ApplyBoundarySPH_Markers(std::shared_ptr<SphMarkerDataD> sphMarkersD) {
uint numBlocks, numThreads;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
hipLaunchKernelGGL(( ApplyPeriodicBoundaryXKernel), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
hipDeviceSynchronize();
cudaCheckError();
hipLaunchKernelGGL(( ApplyPeriodicBoundaryYKernel), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
hipDeviceSynchronize();
cudaCheckError();
hipLaunchKernelGGL(( ApplyPeriodicBoundaryZKernel), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
hipDeviceSynchronize();
cudaCheckError();
// ApplyOutOfBoundaryKernel<<<numBlocks, numThreads>>>
// (mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD), mR3CAST(sphMarkersD->velMasD));
// hipDeviceSynchronize();
// cudaCheckError();
}
// -----------------------------------------------------------------------------
// Apply periodic boundary conditions in y, and z.
// The inlet/outlet BC is applied in the x direction.
// This functions needs to be tested.
void ChFluidDynamics::ApplyModifiedBoundarySPH_Markers(std::shared_ptr<SphMarkerDataD> sphMarkersD) {
uint numBlocks, numThreads;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
hipLaunchKernelGGL(( ApplyInletBoundaryXKernel), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD),
mR4CAST(sphMarkersD->rhoPresMuD));
hipDeviceSynchronize();
cudaCheckError();
// these are useful anyway for out of bound particles
hipLaunchKernelGGL(( ApplyPeriodicBoundaryYKernel), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
hipDeviceSynchronize();
cudaCheckError();
hipLaunchKernelGGL(( ApplyPeriodicBoundaryZKernel), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
hipDeviceSynchronize();
cudaCheckError();
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::DensityReinitialization() {
uint numBlocks, numThreads;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
thrust::device_vector<Real4> dummySortedRhoPreMu(numObjectsH->numAllMarkers);
thrust::fill(dummySortedRhoPreMu.begin(), dummySortedRhoPreMu.end(), mR4(0.0));
hipLaunchKernelGGL(( ReCalcDensityD_F1), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(dummySortedRhoPreMu),
mR4CAST(fsiSystem.sortedSphMarkersD->posRadD),
mR3CAST(fsiSystem.sortedSphMarkersD->velMasD),
mR4CAST(fsiSystem.sortedSphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.markersProximityD->gridMarkerIndexD),
U1CAST(fsiSystem.markersProximityD->cellStartD),
U1CAST(fsiSystem.markersProximityD->cellEndD));
hipDeviceSynchronize();
cudaCheckError();
ChFsiForce::CopySortedToOriginal_NonInvasive_R4(
fsiSystem.sphMarkersD1->rhoPresMuD, dummySortedRhoPreMu,
fsiSystem.markersProximityD->gridMarkerIndexD);
ChFsiForce::CopySortedToOriginal_NonInvasive_R4(
fsiSystem.sphMarkersD2->rhoPresMuD, dummySortedRhoPreMu,
fsiSystem.markersProximityD->gridMarkerIndexD);
dummySortedRhoPreMu.clear();
}
} // namespace fsi
} // end namespace chrono
| ChFluidDynamics.cu | // =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Milad Rakhsha, Arman Pazouki, Wei Hu
// =============================================================================
//
// Class for performing time integration in fluid system.
// =============================================================================
#include "chrono_fsi/physics/ChFluidDynamics.cuh"
#include "chrono_fsi/physics/ChSphGeneral.cuh"
using std::cout;
using std::endl;
namespace chrono {
namespace fsi {
// -----------------------------------------------------------------------------
// Device function to calculate the share of density influence on a given
// particle from all other particle in a given cell
__device__ void collideCellDensityReInit(Real& numerator,
Real& denominator,
int3 gridPos,
uint index,
Real3 posRadA,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
uint* cellStart,
uint* cellEnd) {
uint gridHash = calcGridHash(gridPos);
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
// iterate over particles in this cell
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real4 rhoPreMuB = sortedRhoPreMu[j];
Real3 dist3 = Distance(posRadA, posRadB);
Real d = length(dist3);
if (d > RESOLUTION_LENGTH_MULT * paramsD.HSML)
continue;
numerator += paramsD.markerMass * W3h(d, sortedPosRad[j].w);
denominator += paramsD.markerMass / rhoPreMuB.x * W3h(d, sortedPosRad[j].w);
}
}
}
// -----------------------------------------------------------------------------
// Kernel to apply periodic BC along x
__global__ void ApplyPeriodicBoundaryXKernel(Real4* posRadD,
Real4* rhoPresMuD,
uint* activityIdentifierD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
uint activity = activityIdentifierD[index];
if (activity == 0)
return; // no need to do anything if it is not an active particle
Real4 rhoPresMu = rhoPresMuD[index];
if (fabs(rhoPresMu.w) < .1)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real h = posRadD[index].w;
if (posRad.x > paramsD.cMax.x) {
posRad.x -= (paramsD.cMax.x - paramsD.cMin.x);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1)
rhoPresMuD[index].y += paramsD.deltaPress.x;
return;
}
if (posRad.x < paramsD.cMin.x) {
posRad.x += (paramsD.cMax.x - paramsD.cMin.x);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1)
rhoPresMuD[index].y -= paramsD.deltaPress.x;
return;
}
}
// -----------------------------------------------------------------------------
// Kernel to apply inlet/outlet BC along x
__global__ void ApplyInletBoundaryXKernel(Real4* posRadD,
Real3* VelMassD,
Real4* rhoPresMuD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
Real4 rhoPresMu = rhoPresMuD[index];
if (rhoPresMu.w > 0.0)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real h = posRadD[index].w;
if (posRad.x > paramsD.cMax.x) {
posRad.x -= (paramsD.cMax.x - paramsD.cMin.x);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w <= 0.0) {
rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.x;
rhoPresMuD[index] = rhoPresMu;
}
}
if (posRad.x < paramsD.cMin.x) {
posRad.x += (paramsD.cMax.x - paramsD.cMin.x);
posRadD[index] = mR4(posRad, h);
VelMassD[index] = mR3(paramsD.V_in.x, 0, 0);
if (rhoPresMu.w <= -.1) {
rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.x;
rhoPresMuD[index] = rhoPresMu;
}
}
if (posRad.x > -paramsD.x_in)
rhoPresMuD[index].y = 0;
if (posRad.x < paramsD.x_in)
VelMassD[index] = mR3(paramsD.V_in.x, 0, 0);
}
// -----------------------------------------------------------------------------
// Kernel to apply periodic BC along y
__global__ void ApplyPeriodicBoundaryYKernel(Real4* posRadD,
Real4* rhoPresMuD,
uint* activityIdentifierD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
uint activity = activityIdentifierD[index];
if (activity == 0)
return; // no need to do anything if it is not an active particle
Real4 rhoPresMu = rhoPresMuD[index];
if (fabs(rhoPresMu.w) < .1)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real h = posRadD[index].w;
if (posRad.y > paramsD.cMax.y) {
posRad.y -= (paramsD.cMax.y - paramsD.cMin.y);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1) {
rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.y;
rhoPresMuD[index] = rhoPresMu;
}
return;
}
if (posRad.y < paramsD.cMin.y) {
posRad.y += (paramsD.cMax.y - paramsD.cMin.y);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1) {
rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.y;
rhoPresMuD[index] = rhoPresMu;
}
return;
}
}
// -----------------------------------------------------------------------------
// Kernel to apply periodic BC along z
__global__ void ApplyPeriodicBoundaryZKernel(Real4* posRadD,
Real4* rhoPresMuD,
uint* activityIdentifierD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
uint activity = activityIdentifierD[index];
if (activity == 0)
return; // no need to do anything if it is not an active particle
Real4 rhoPresMu = rhoPresMuD[index];
if (fabs(rhoPresMu.w) < .1)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real h = posRadD[index].w;
if (posRad.z > paramsD.cMax.z) {
posRad.z -= (paramsD.cMax.z - paramsD.cMin.z);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1) {
rhoPresMu.y = rhoPresMu.y + paramsD.deltaPress.z;
rhoPresMuD[index] = rhoPresMu;
}
return;
}
if (posRad.z < paramsD.cMin.z) {
posRad.z += (paramsD.cMax.z - paramsD.cMin.z);
posRadD[index] = mR4(posRad, h);
if (rhoPresMu.w < -.1) {
rhoPresMu.y = rhoPresMu.y - paramsD.deltaPress.z;
rhoPresMuD[index] = rhoPresMu;
}
return;
}
}
// -----------------------------------------------------------------------------
// Kernel to keep particle inside the simulation domain
__global__ void ApplyOutOfBoundaryKernel(Real4* posRadD,
Real4* rhoPresMuD,
Real3* velMasD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
Real4 rhoPresMu = rhoPresMuD[index];
if (fabs(rhoPresMu.w) < .1)
return; // no need to do anything if it is a boundary particle
Real3 posRad = mR3(posRadD[index]);
Real3 vel = mR3(velMasD[index]);
Real h = posRadD[index].w;
if (posRad.x > 0.5 * paramsD.boxDimX)
posRad.x = 0.5 * paramsD.boxDimX;
if (posRad.x < -0.5 * paramsD.boxDimX)
posRad.x = -0.5 * paramsD.boxDimX;
if (posRad.y > 0.5 * paramsD.boxDimY)
posRad.y = 0.5 * paramsD.boxDimY;
if (posRad.y < -0.5 * paramsD.boxDimY)
posRad.y = -0.5 * paramsD.boxDimY;
if (posRad.z > 1.0 * paramsD.boxDimZ)
posRad.z = 1.0 * paramsD.boxDimZ;
if (posRad.z < -0.0 * paramsD.boxDimZ)
posRad.z = -0.0 * paramsD.boxDimZ;
posRadD[index] = mR4(posRad, h);
velMasD[index] = mR3(vel);
return;
}
// -----------------------------------------------------------------------------
// Kernel to update the fluid properities. It updates the stress tensor,
// density, velocity and position relying on explicit Euler scheme.
// Pressure is obtained from the density and an Equation of State.
__global__ void UpdateFluidD(Real4* posRadD,
Real3* velMasD,
Real4* rhoPresMuD,
Real3* tauXxYyZzD,
Real3* tauXyXzYzD,
Real3* vel_XSPH_D,
Real4* derivVelRhoD,
Real3* derivTauXxYyZzD,
Real3* derivTauXyXzYzD,
Real4* sr_tau_I_mu_iD,
uint* activityIdentifierD,
uint* freeSurfaceIdD,
int2 updatePortion,
Real dT,
volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
index += updatePortion.x;
if (index >= updatePortion.y)
return;
uint activity = activityIdentifierD[index];
if (activity == 0)
return;
Real4 derivVelRho = derivVelRhoD[index];
Real4 rhoPresMu = rhoPresMuD[index];
Real h = posRadD[index].w;
Real p_tr, p_n;
if (rhoPresMu.w < 0) {
// This is only implemented for granular material
if (paramsD.elastic_SPH) {
//--------------------------------
// ** total stress tau
//--------------------------------
Real3 tauXxYyZz = tauXxYyZzD[index];
Real3 tauXyXzYz = tauXyXzYzD[index];
Real3 derivTauXxYyZz = derivTauXxYyZzD[index];
Real3 derivTauXyXzYz = derivTauXyXzYzD[index];
Real3 updatedTauXxYyZz = tauXxYyZz + mR3(derivTauXxYyZz) * dT;
Real3 updatedTauXyXzYz = tauXyXzYz + mR3(derivTauXyXzYz) * dT;
// check if there is a plastic flow
p_n = -1.0 / 3.0 * (tauXxYyZz.x + tauXxYyZz.y + tauXxYyZz.z);
tauXxYyZz.x += p_n;
tauXxYyZz.y += p_n;
tauXxYyZz.z += p_n;
p_tr = -1.0 / 3.0 * (updatedTauXxYyZz.x + updatedTauXxYyZz.y + updatedTauXxYyZz.z);
updatedTauXxYyZz.x += p_tr;
updatedTauXxYyZz.y += p_tr;
updatedTauXxYyZz.z += p_tr;
Real tau_tr = square(updatedTauXxYyZz.x) + square(updatedTauXxYyZz.y) +
square(updatedTauXxYyZz.z) + 2.0 * square(updatedTauXyXzYz.x) +
2.0 * square(updatedTauXyXzYz.y) + 2.0 * square(updatedTauXyXzYz.z);
Real tau_n = square(tauXxYyZz.x) + square(tauXxYyZz.y) + square(tauXxYyZz.z) +
2.0 * square(tauXyXzYz.x) + 2.0 * square(tauXyXzYz.y) + 2.0 * square(tauXyXzYz.z);
tau_tr = sqrt(0.5 * tau_tr);
tau_n = sqrt(0.5 * tau_n);
Real Chi = abs(tau_tr - tau_n) * paramsD.INV_G_shear / dT;
// should use the positive magnitude according to "A
// constitutive law for dense granular flows" Nature 2006
Real mu_s = paramsD.mu_fric_s;
Real mu_2 = paramsD.mu_fric_2;
// Real s_0 = mu_s * p_tr;
// Real s_2 = mu_2 * p_tr;
// Real xi = 1.1;
Real dia = paramsD.ave_diam;
Real I0 = paramsD.mu_I0; // xi*dia*sqrt(rhoPresMu.x);//
Real I = Chi * dia * sqrt( paramsD.rho0 / ( p_tr + 1.0e9 ) );
Real coh = paramsD.Coh_coeff;
// Real Chi_cri = 0.1;
// if (Chi < Chi_cri){
// coh = paramsD.Coh_coeff * (1.0 - sin(-1.57 + 3.14 * (Chi / Chi_cri))) / 2.0;
// // coh = paramsD.Coh_coeff * (1.0 - I / I_cri);
// } else {
// coh = 0.0;
// }
Real inv_mus = 1.0 / paramsD.mu_fric_s;
Real p_cri = - coh * inv_mus;
if (p_tr > p_cri) {
Real mu = mu_s + (mu_2 - mu_s) * (I + 1.0e-9) / (I0 + I + 1.0e-9);
// Real G0 = paramsD.G_shear;
// Real alpha = xi*G0*I0*(dT)*sqrt(p_tr);
// Real B0 = s_2 + tau_tr + alpha;
// Real H0 = s_2*tau_tr + s_0*alpha;
// Real tau_n1 = (B0+sqrt(B0*B0-4*H0))/(2*H0+1e-9);
// if(tau_tr>s_0){
// Real coeff = tau_n1/(tau_tr+1e-9);
// updatedTauXxYyZz = updatedTauXxYyZz*coeff;
// updatedTauXyXzYz = updatedTauXyXzYz*coeff;
// }
Real tau_max = p_tr * mu + coh; // p_tr*paramsD.Q_FA;
// should use tau_max instead of s_0 according to
// "A constitutive law for dense granular flows" Nature 2006
if (tau_tr > tau_max) {
Real coeff = tau_max / (tau_tr + 1e-9);
updatedTauXxYyZz = updatedTauXxYyZz * coeff;
updatedTauXyXzYz = updatedTauXyXzYz * coeff;
}
}
// Set stress to zero if the pressure is smaller than the threshold
if (p_tr < p_cri) {
updatedTauXxYyZz = mR3(0.0);
updatedTauXyXzYz = mR3(0.0);
p_tr = 0.0;
// Real coeff = abs(p_cri / (p_tr + 1e-9));
// if (p_tr < 2.0 * p_cri){
// coeff = 0.0;
// } else {
// coeff = abs(1.0 - (p_tr - p_cri) / p_cri);
// }
// updatedTauXxYyZz = updatedTauXxYyZz * coeff;
// updatedTauXyXzYz = updatedTauXyXzYz * coeff;
// p_tr = p_cri * coeff;
}
// Set stress to zero if the particle is close to free surface
if (freeSurfaceIdD[index] == 1) {
updatedTauXxYyZz = mR3(0.0);
updatedTauXyXzYz = mR3(0.0);
p_tr = 0.0;
}
if (paramsD.output_length == 2) {
Real tau_tr = square(updatedTauXxYyZz.x) + square(updatedTauXxYyZz.y) +
square(updatedTauXxYyZz.z) + 2.0 * (square(updatedTauXyXzYz.x) +
square(updatedTauXyXzYz.y) + square(updatedTauXyXzYz.z));
tau_tr = sqrt(0.5 * tau_tr);
sr_tau_I_mu_iD[index].y = tau_tr;
}
tauXxYyZzD[index] = updatedTauXxYyZz - mR3(p_tr);
tauXyXzYzD[index] = updatedTauXyXzYz;
}
//-------------
// ** position
//-------------
Real3 vel_XSPH = velMasD[index] + vel_XSPH_D[index]; // paramsD.EPS_XSPH *
Real3 posRad = mR3(posRadD[index]);
Real3 updatedPositon = posRad + vel_XSPH * dT;
if (!(isfinite(updatedPositon.x) && isfinite(updatedPositon.y) && isfinite(updatedPositon.z))) {
printf("Error! particle position is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel !\n");
*isErrorD = true;
return;
}
posRadD[index] = mR4(updatedPositon, h);
//-------------
// ** velocity
//-------------
// Note that the velocity update should not use the XSPH contribution
// It adds dissipation to the solution, and provides numerical damping
Real3 velMas = velMasD[index] + 0.0 * vel_XSPH_D[index]; // paramsD.EPS_XSPH * vel_XSPH_D[index]
Real3 updatedVelocity = velMas + mR3(derivVelRho) * dT;
velMasD[index] = updatedVelocity;
//-------------
// ** density
//-------------
if (paramsD.elastic_SPH) { // This is only implemented for granular material
rhoPresMu.y = p_tr;
rhoPresMu.x = paramsD.rho0;
} else {
Real rho2 = rhoPresMu.x + derivVelRho.w * dT;
rhoPresMu.y = Eos(rho2, rhoPresMu.w);
rhoPresMu.x = rho2;
}
if (!(isfinite(rhoPresMu.x) && isfinite(rhoPresMu.y) && isfinite(rhoPresMu.z) && isfinite(rhoPresMu.w))) {
printf("Error! particle rho pressure is NAN: thrown from ChFluidDynamics.cu, UpdateFluidDKernel !\n");
*isErrorD = true;
return;
}
rhoPresMuD[index] = rhoPresMu;
}
// Important note: the derivVelRhoD that is calculated by the ChForceExplicitSPH is the negative of actual time
// derivative. That is important to keep the derivVelRhoD to be the force/mass for fsi forces.
// calculate the force that is f=m dv/dt
// derivVelRhoD[index] *= paramsD.markerMass;
}
//------------------------------------------------------------------------------
__global__ void Update_Fluid_State(Real3* new_vel,
Real4* posRad,
Real3* velMas,
Real4* rhoPreMu,
int4 updatePortion,
double dT,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= updatePortion.y)
return;
velMas[i_idx] = new_vel[i_idx];
Real3 newpos = mR3(posRad[i_idx]) + dT * velMas[i_idx];
Real h = posRad[i_idx].w;
posRad[i_idx] = mR4(newpos, h);
if (!(isfinite(posRad[i_idx].x) &&
isfinite(posRad[i_idx].y) && isfinite(posRad[i_idx].z))) {
printf("Error! particle %d position is NAN: thrown from UpdateFluidDKernel %f,%f,%f,%f\n",
i_idx, posRad[i_idx].x, posRad[i_idx].y, posRad[i_idx].z, posRad[i_idx].w);
}
if (!(isfinite(rhoPreMu[i_idx].x) &&
isfinite(rhoPreMu[i_idx].y) && isfinite(rhoPreMu[i_idx].z))) {
printf("Error! particle %d rhoPreMu is NAN: thrown from UpdateFluidDKernel ! %f,%f,%f,%f\n",
i_idx, rhoPreMu[i_idx].x, rhoPreMu[i_idx].y, rhoPreMu[i_idx].z, rhoPreMu[i_idx].w);
}
if (!(isfinite(velMas[i_idx].x) &&
isfinite(velMas[i_idx].y) && isfinite(velMas[i_idx].z))) {
printf("Error! particle %d velocity is NAN: thrown from UpdateFluidDKernel !%f,%f,%f\n",
i_idx, velMas[i_idx].x, velMas[i_idx].y, velMas[i_idx].z);
}
}
// -----------------------------------------------------------------------------
// Kernel for updating the density.
// It calculates the density of the particle. It does include the normalization
// close to the boundaries and free surface.
__global__ void ReCalcDensityD_F1(Real4* dummySortedRhoPreMu,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
uint* gridMarkerIndex,
uint* cellStart,
uint* cellEnd) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
// read particle data from sorted arrays
Real3 posRadA = mR3(sortedPosRad[index]);
Real4 rhoPreMuA = sortedRhoPreMu[index];
// get address in grid
int3 gridPos = calcGridPos(posRadA);
Real numerator = 0.0;
Real denominator = 0.0;
// examine neighbouring cells
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
collideCellDensityReInit(numerator, denominator, neighbourPos, index,
posRadA, sortedPosRad, sortedVelMas, sortedRhoPreMu, cellStart, cellEnd);
}
}
}
rhoPreMuA.x = numerator; // denominator;
// rhoPreMuA.y = Eos(rhoPreMuA.x, rhoPreMuA.w);
dummySortedRhoPreMu[index] = rhoPreMuA;
}
// -----------------------------------------------------------------------------
// Kernel for updating the activity of all particles.
__global__ void UpdateActivityD(Real4* posRadD,
Real3* velMasD,
Real3* posRigidBodiesD,
Real3* pos_fsi_fea_D,
uint* activityIdentifierD,
uint* extendedActivityIdD,
int2 updatePortion,
Real Time,
volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
index += updatePortion.x;
if (index >= updatePortion.y)
return;
// Set the particle as an active particle
activityIdentifierD[index] = 1;
extendedActivityIdD[index] = 1;
// If during the settling phase, all particles are active
if (Time < paramsD.settlingTime)
return;
size_t numRigidBodies = numObjectsD.numRigidBodies;
size_t numFlexNodes = numObjectsD.numFlexNodes;
size_t numTotal = numRigidBodies + numFlexNodes;
// Check the activity of this particle
uint isNotActive = 0;
uint isNotExtended = 0;
Real3 Acdomain = paramsD.bodyActiveDomain;
Real3 ExAcdomain = paramsD.bodyActiveDomain +
mR3(2 * RESOLUTION_LENGTH_MULT * paramsD.HSML);
Real3 posRadA = mR3(posRadD[index]);
for (uint num = 0; num < numRigidBodies; num++) {
Real3 detPos = posRadA - posRigidBodiesD[num];
if (abs(detPos.x) > Acdomain.x || abs(detPos.y) > Acdomain.y ||
abs(detPos.z) > Acdomain.z)
isNotActive = isNotActive + 1;
if (abs(detPos.x) > ExAcdomain.x || abs(detPos.y) > ExAcdomain.y ||
abs(detPos.z) > ExAcdomain.z)
isNotExtended = isNotExtended + 1;
}
for (uint num = 0; num < numFlexNodes; num++) {
Real3 detPos = posRadA - pos_fsi_fea_D[num];
if (abs(detPos.x) > Acdomain.x || abs(detPos.y) > Acdomain.y ||
abs(detPos.z) > Acdomain.z)
isNotActive = isNotActive + 1;
if (abs(detPos.x) > ExAcdomain.x || abs(detPos.y) > ExAcdomain.y ||
abs(detPos.z) > ExAcdomain.z)
isNotExtended = isNotExtended + 1;
}
// Set the particle as an inactive particle if needed
if (isNotActive == numTotal && numTotal > 0) {
activityIdentifierD[index] = 0;
velMasD[index] = mR3(0.0);
}
if (isNotExtended == numTotal && numTotal > 0)
extendedActivityIdD[index] = 0;
return;
}
// -----------------------------------------------------------------------------
// CLASS FOR FLUID DYNAMICS SYSTEM
// -----------------------------------------------------------------------------
ChFluidDynamics::ChFluidDynamics(std::shared_ptr<ChBce> otherBceWorker,
ChSystemFsi_impl& otherFsiSystem,
std::shared_ptr<SimParams> otherParamsH,
std::shared_ptr<ChCounters> otherNumObjects,
TimeIntegrator type,
bool verb)
: fsiSystem(otherFsiSystem),
paramsH(otherParamsH),
numObjectsH(otherNumObjects),
integrator_type(type),
verbose(verb) {
switch (integrator_type) {
case TimeIntegrator::I2SPH:
forceSystem = chrono_types::make_shared<ChFsiForceI2SPH>(
otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD,
fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb);
if (verbose) {
cout << "============================================" << endl;
cout << "====== Created an I2SPH framework ======" << endl;
cout << "============================================" << endl;
}
break;
case TimeIntegrator::IISPH:
forceSystem = chrono_types::make_shared<ChFsiForceIISPH>(
otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD,
fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb);
if (verbose) {
cout << "============================================" << endl;
cout << "====== Created an IISPH framework ======" << endl;
cout << "============================================" << endl;
}
break;
case TimeIntegrator::EXPLICITSPH:
forceSystem = chrono_types::make_shared<ChFsiForceExplicitSPH>(
otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD,
fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb);
if (verbose) {
cout << "============================================" << endl;
cout << "====== Created a WCSPH framework =======" << endl;
cout << "============================================" << endl;
}
break;
// Extend this function with your own linear solvers
default:
forceSystem = chrono_types::make_shared<ChFsiForceExplicitSPH>(
otherBceWorker, fsiSystem.sortedSphMarkersD, fsiSystem.markersProximityD,
fsiSystem.fsiGeneralData, paramsH, numObjectsH, verb);
cout << "Selected integrator type not implemented, reverting back to WCSPH" << endl;
}
}
// -----------------------------------------------------------------------------
ChFluidDynamics::~ChFluidDynamics() {}
// -----------------------------------------------------------------------------
void ChFluidDynamics::Initialize() {
forceSystem->Initialize();
cudaMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams));
cudaMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(ChCounters));
cudaMemcpyFromSymbol(paramsH.get(), paramsD, sizeof(SimParams));
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::IntegrateSPH(std::shared_ptr<SphMarkerDataD> sphMarkersD2,
std::shared_ptr<SphMarkerDataD> sphMarkersD1,
std::shared_ptr<FsiBodiesDataD> fsiBodiesD,
std::shared_ptr<FsiMeshDataD> fsiMeshD,
Real dT,
Real Time) {
if (GetIntegratorType() == TimeIntegrator::EXPLICITSPH) {
this->UpdateActivity(sphMarkersD1, sphMarkersD2, fsiBodiesD, fsiMeshD, Time);
forceSystem->ForceSPH(sphMarkersD2, fsiBodiesD, fsiMeshD);
} else
forceSystem->ForceSPH(sphMarkersD1, fsiBodiesD, fsiMeshD);
if (integrator_type == TimeIntegrator::IISPH)
this->UpdateFluid_Implicit(sphMarkersD2);
else if (GetIntegratorType() == TimeIntegrator::EXPLICITSPH)
this->UpdateFluid(sphMarkersD1, dT);
this->ApplyBoundarySPH_Markers(sphMarkersD2);
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::UpdateActivity(std::shared_ptr<SphMarkerDataD> sphMarkersD1,
std::shared_ptr<SphMarkerDataD> sphMarkersD2,
std::shared_ptr<FsiBodiesDataD> fsiBodiesD,
std::shared_ptr<FsiMeshDataD> fsiMeshD,
Real Time) {
// Update portion of the SPH particles (should be all particles here)
int2 updatePortion = mI2(0, (int)numObjectsH->numAllMarkers);
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
cudaMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
//------------------------
uint numBlocks, numThreads;
computeGridSize(updatePortion.y - updatePortion.x, 256, numBlocks, numThreads);
UpdateActivityD<<<numBlocks, numThreads>>>(
mR4CAST(sphMarkersD2->posRadD), mR3CAST(sphMarkersD1->velMasD),
mR3CAST(fsiBodiesD->posRigid_fsiBodies_D),
mR3CAST(fsiMeshD->pos_fsi_fea_D),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD),
U1CAST(fsiSystem.fsiGeneralData->extendedActivityIdD),
updatePortion, Time, isErrorD);
cudaDeviceSynchronize();
cudaCheckError();
//------------------------
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true)
throw std::runtime_error("Error! program crashed in UpdateActivityD!\n");
cudaFree(isErrorD);
free(isErrorH);
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::UpdateFluid(std::shared_ptr<SphMarkerDataD> sphMarkersD, Real dT) {
// Update portion of the SPH particles (should be fluid particles only here)
int2 updatePortion = mI2(0, fsiSystem.fsiGeneralData->referenceArray[0].y);
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
cudaMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
//------------------------
uint numBlocks, numThreads;
computeGridSize(updatePortion.y - updatePortion.x, 256, numBlocks, numThreads);
UpdateFluidD<<<numBlocks, numThreads>>>(
mR4CAST(sphMarkersD->posRadD),
mR3CAST(sphMarkersD->velMasD),
mR4CAST(sphMarkersD->rhoPresMuD),
mR3CAST(sphMarkersD->tauXxYyZzD),
mR3CAST(sphMarkersD->tauXyXzYzD),
mR3CAST(fsiSystem.fsiGeneralData->vel_XSPH_D),
mR4CAST(fsiSystem.fsiGeneralData->derivVelRhoD),
mR3CAST(fsiSystem.fsiGeneralData->derivTauXxYyZzD),
mR3CAST(fsiSystem.fsiGeneralData->derivTauXyXzYzD),
mR4CAST(fsiSystem.fsiGeneralData->sr_tau_I_mu_i),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD),
U1CAST(fsiSystem.fsiGeneralData->freeSurfaceIdD),
updatePortion, dT, isErrorD);
cudaDeviceSynchronize();
cudaCheckError();
//------------------------
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true)
throw std::runtime_error("Error! program crashed in UpdateFluidD!\n");
cudaFree(isErrorD);
free(isErrorH);
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::UpdateFluid_Implicit(std::shared_ptr<SphMarkerDataD> sphMarkersD) {
uint numThreads, numBlocks;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
int haveGhost = (numObjectsH->numGhostMarkers > 0) ? 1 : 0;
int haveHelper = (numObjectsH->numHelperMarkers > 0) ? 1 : 0;
int4 updatePortion = mI4(fsiSystem.fsiGeneralData->referenceArray[haveHelper].x,
fsiSystem.fsiGeneralData->referenceArray[haveHelper + haveGhost].y, 0, 0);
cout << "time step in UpdateFluid_Implicit " << paramsH->dT << endl;
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
cudaMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
Update_Fluid_State<<<numBlocks, numThreads>>>(
mR3CAST(fsiSystem.fsiGeneralData->vel_XSPH_D),
mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD),
mR4CAST(sphMarkersD->rhoPresMuD), updatePortion, paramsH->dT, isErrorD);
cudaDeviceSynchronize();
cudaCheckError();
cudaMemcpy(isErrorH, isErrorD, sizeof(bool), cudaMemcpyDeviceToHost);
if (*isErrorH == true)
throw std::runtime_error("Error! program crashed in Update_Fluid_State!\n");
cudaFree(isErrorD);
free(isErrorH);
}
// -----------------------------------------------------------------------------
// Apply periodic boundary conditions in x, y, and z directions
void ChFluidDynamics::ApplyBoundarySPH_Markers(std::shared_ptr<SphMarkerDataD> sphMarkersD) {
uint numBlocks, numThreads;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
ApplyPeriodicBoundaryXKernel<<<numBlocks, numThreads>>>(
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
cudaDeviceSynchronize();
cudaCheckError();
ApplyPeriodicBoundaryYKernel<<<numBlocks, numThreads>>>(
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
cudaDeviceSynchronize();
cudaCheckError();
ApplyPeriodicBoundaryZKernel<<<numBlocks, numThreads>>>(
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
cudaDeviceSynchronize();
cudaCheckError();
// ApplyOutOfBoundaryKernel<<<numBlocks, numThreads>>>
// (mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD), mR3CAST(sphMarkersD->velMasD));
// cudaDeviceSynchronize();
// cudaCheckError();
}
// -----------------------------------------------------------------------------
// Apply periodic boundary conditions in y, and z.
// The inlet/outlet BC is applied in the x direction.
// This functions needs to be tested.
void ChFluidDynamics::ApplyModifiedBoundarySPH_Markers(std::shared_ptr<SphMarkerDataD> sphMarkersD) {
uint numBlocks, numThreads;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
ApplyInletBoundaryXKernel<<<numBlocks, numThreads>>>(
mR4CAST(sphMarkersD->posRadD), mR3CAST(sphMarkersD->velMasD),
mR4CAST(sphMarkersD->rhoPresMuD));
cudaDeviceSynchronize();
cudaCheckError();
// these are useful anyway for out of bound particles
ApplyPeriodicBoundaryYKernel<<<numBlocks, numThreads>>>(
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
cudaDeviceSynchronize();
cudaCheckError();
ApplyPeriodicBoundaryZKernel<<<numBlocks, numThreads>>>(
mR4CAST(sphMarkersD->posRadD), mR4CAST(sphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.fsiGeneralData->activityIdentifierD));
cudaDeviceSynchronize();
cudaCheckError();
}
// -----------------------------------------------------------------------------
void ChFluidDynamics::DensityReinitialization() {
uint numBlocks, numThreads;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
thrust::device_vector<Real4> dummySortedRhoPreMu(numObjectsH->numAllMarkers);
thrust::fill(dummySortedRhoPreMu.begin(), dummySortedRhoPreMu.end(), mR4(0.0));
ReCalcDensityD_F1<<<numBlocks, numThreads>>>(
mR4CAST(dummySortedRhoPreMu),
mR4CAST(fsiSystem.sortedSphMarkersD->posRadD),
mR3CAST(fsiSystem.sortedSphMarkersD->velMasD),
mR4CAST(fsiSystem.sortedSphMarkersD->rhoPresMuD),
U1CAST(fsiSystem.markersProximityD->gridMarkerIndexD),
U1CAST(fsiSystem.markersProximityD->cellStartD),
U1CAST(fsiSystem.markersProximityD->cellEndD));
cudaDeviceSynchronize();
cudaCheckError();
ChFsiForce::CopySortedToOriginal_NonInvasive_R4(
fsiSystem.sphMarkersD1->rhoPresMuD, dummySortedRhoPreMu,
fsiSystem.markersProximityD->gridMarkerIndexD);
ChFsiForce::CopySortedToOriginal_NonInvasive_R4(
fsiSystem.sphMarkersD2->rhoPresMuD, dummySortedRhoPreMu,
fsiSystem.markersProximityD->gridMarkerIndexD);
dummySortedRhoPreMu.clear();
}
} // namespace fsi
} // end namespace chrono
|
08a00e6a0bde16a68c214b4e15b05ca7f9dbdcfc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string>
#include <math.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include "cta_config.h"
#include "../common/cuda_check.h"
extern __global__ void kNN(float* x, float* y, float* dist, int N,
float point_x, float point_y);
void RandFloatArray(float* ptr, int length) {
for (int i = 0; i < length; ++i) {
float val = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
ptr[i] = val;
}
return;
}
void AssertArrayEqual(float* ptr1, float* ptr2, int length, float precision = 1e-5) {
for (int i = 0; i < length; ++i) {
assert(fabs(ptr1[i] - ptr2[i]) <
precision * max(fabs(ptr1[i]), fabs(ptr2[i]))
);
}
return;
}
int main(int argc, char** argv) {
if (argc < 2) {
printf("Usage ./knn <number of data points>");
return -1;
}
int num_points = atoi(argv[1]);
printf("Running the knn for %d points\n", num_points);
float* host_x = (float*) malloc(num_points * sizeof(float));
float* host_y = (float*) malloc(num_points * sizeof(float));
float* host_dist = (float*) malloc(num_points * sizeof(float));
RandFloatArray(host_x, num_points);
RandFloatArray(host_y, num_points);
float point_x = 0.5;
float point_y = 0.5;
for (int i = 0; i < num_points; ++i) {
float diff_x = host_x[i] - point_x;
float diff_y = host_y[i] - point_y;
host_dist[i] = (diff_x * diff_x + diff_y * diff_y);
}
printf("Completed ground truth computation!\n");
float* device_x;
float* device_y;
float* device_dist;
CUDA_CHECK(hipMalloc((void**) &device_x, num_points * sizeof(float)));
CUDA_CHECK(hipMalloc((void**) &device_y, num_points * sizeof(float)));
CUDA_CHECK(hipMalloc((void**) &device_dist, num_points * sizeof(float)));
float* results = (float*) malloc(num_points * sizeof(float));
CUDA_CHECK(hipMemcpy(device_x, host_x, num_points * sizeof(float),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(device_y, host_y, num_points * sizeof(float),
hipMemcpyHostToDevice));
#ifdef MEASURE_POWER
while (true) {
#endif
hipLaunchKernelGGL(( kNN), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0,
device_x, device_y, device_dist, num_points, point_x, point_y);
hipDeviceSynchronize();
#ifdef MEASURE_POWER
}
#endif
printf("Completed GPU computation!\n");
CUDA_CHECK(hipMemcpy(results, device_dist, num_points * sizeof(float),
hipMemcpyDeviceToHost));
AssertArrayEqual(results, host_dist, num_points);
printf("Correctness Check: Accepted!\n");
free(host_x);
free(host_y);
free(host_dist);
free(results);
CUDA_CHECK(hipFree(device_x));
CUDA_CHECK(hipFree(device_y));
CUDA_CHECK(hipFree(device_dist));
return 0;
}
| 08a00e6a0bde16a68c214b4e15b05ca7f9dbdcfc.cu | #include <stdlib.h>
#include <stdio.h>
#include <string>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include "cta_config.h"
#include "../common/cuda_check.h"
extern __global__ void kNN(float* x, float* y, float* dist, int N,
float point_x, float point_y);
void RandFloatArray(float* ptr, int length) {
for (int i = 0; i < length; ++i) {
float val = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
ptr[i] = val;
}
return;
}
void AssertArrayEqual(float* ptr1, float* ptr2, int length, float precision = 1e-5) {
for (int i = 0; i < length; ++i) {
assert(fabs(ptr1[i] - ptr2[i]) <
precision * max(fabs(ptr1[i]), fabs(ptr2[i]))
);
}
return;
}
int main(int argc, char** argv) {
if (argc < 2) {
printf("Usage ./knn <number of data points>");
return -1;
}
int num_points = atoi(argv[1]);
printf("Running the knn for %d points\n", num_points);
float* host_x = (float*) malloc(num_points * sizeof(float));
float* host_y = (float*) malloc(num_points * sizeof(float));
float* host_dist = (float*) malloc(num_points * sizeof(float));
RandFloatArray(host_x, num_points);
RandFloatArray(host_y, num_points);
float point_x = 0.5;
float point_y = 0.5;
for (int i = 0; i < num_points; ++i) {
float diff_x = host_x[i] - point_x;
float diff_y = host_y[i] - point_y;
host_dist[i] = (diff_x * diff_x + diff_y * diff_y);
}
printf("Completed ground truth computation!\n");
float* device_x;
float* device_y;
float* device_dist;
CUDA_CHECK(cudaMalloc((void**) &device_x, num_points * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**) &device_y, num_points * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**) &device_dist, num_points * sizeof(float)));
float* results = (float*) malloc(num_points * sizeof(float));
CUDA_CHECK(cudaMemcpy(device_x, host_x, num_points * sizeof(float),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(device_y, host_y, num_points * sizeof(float),
cudaMemcpyHostToDevice));
#ifdef MEASURE_POWER
while (true) {
#endif
kNN<<<NUM_BLOCKS, NUM_THREADS>>>(
device_x, device_y, device_dist, num_points, point_x, point_y);
cudaDeviceSynchronize();
#ifdef MEASURE_POWER
}
#endif
printf("Completed GPU computation!\n");
CUDA_CHECK(cudaMemcpy(results, device_dist, num_points * sizeof(float),
cudaMemcpyDeviceToHost));
AssertArrayEqual(results, host_dist, num_points);
printf("Correctness Check: Accepted!\n");
free(host_x);
free(host_y);
free(host_dist);
free(results);
CUDA_CHECK(cudaFree(device_x));
CUDA_CHECK(cudaFree(device_y));
CUDA_CHECK(cudaFree(device_dist));
return 0;
}
|
DST_III_Column_Inverse.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Inverse Discrete Sine Transform in Column wise (DST three)
* DST_III_Column_Inverse
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DST_III_Column_Inverse(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "DST_III_Column_Inverse.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define TILE_DIM 16
#define DEFAULT_DIM 32 // Tile dimension
#define DELTA(i, j) ((i==j)?1:0)
//const double PI_d = 3.141592653589793238462643383279502884; //pi
__global__ void DSTIII_Column_Inverse_Kernel(double *A, double *C,
int numARows, int numAColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
const double PI_d = 3.141592653589793238462643383279502884; //pi
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ double As[TILE_DIM][TILE_DIM];
__shared__ double Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + numARows - 1) / TILE_DIM; k++) {
//As[threadIdx.y][threadIdx.x] = cos(((2 * Row + 1) / (2.0 * numARows))*PI_d*(threadIdx.x + (k*TILE_DIM)))*sqrt(1.0 / (1 + DELTA((threadIdx.x + (k*TILE_DIM)) + 1, 1)))*sqrt(2.0 / numARows);
if (k*TILE_DIM + threadIdx.x < numARows && Row < numARows) { As[threadIdx.y][threadIdx.x] = sin(((Row + 0.5)*PI_d*((threadIdx.x + (k*TILE_DIM)) + 1)) / (numARows))*sqrt((2.0 - DELTA((threadIdx.x + (k*TILE_DIM)) + 1, numARows)) / (numARows)); }
else { As[threadIdx.y][threadIdx.x] = 0.0; }
if (k*TILE_DIM + threadIdx.y < numARows && Col < numAColumns){ Bs[threadIdx.y][threadIdx.x] = A[(k*TILE_DIM + threadIdx.y)*numAColumns + Col]; }
else { Bs[threadIdx.y][threadIdx.x] = 0.0; }
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; }
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; }
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
extern "C" void CalculateTransformDSTInverseColumnThree(double * A, double * C, int numARows,
int numAColumns, int numCRows, int numCColumns)
{
double * hostA = A; // The A matrix
//double * hostB = B; // The B matrix
double * hostC = C; // The output C matrix
//float * hostComputedC;
double * deviceA;
//double * deviceB;
double * deviceC;
//hostA = (float *)malloc(sizeof(float)*numARows*numAColumns);
//hostB = (float *)malloc(sizeof(float)*numBRows*numBColumns);
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numAColumns;
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
// Allocating GPU memory
gpuErrchk(hipMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns));
//hipMalloc((void **)&deviceB, sizeof(double)*numBRows*numBColumns);
gpuErrchk(hipMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns));
//thrust::device_ptr< double >dev_ptr_A(deviceA);
//thrust::device_ptr< double >dev_ptr_C(deviceC);
// Copy memory to the GPU
gpuErrchk(hipMemcpy(deviceA, hostA, sizeof(double)*numARows*numAColumns, hipMemcpyHostToDevice));
//hipMemcpy(deviceB, hostB, sizeof(double)*numBRows*numBColumns, hipMemcpyHostToDevice);
/////////////////////////////////////////////////////////
dim3 dimBlock(TILE_DIM, TILE_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTIII_Column_Inverse_Kernel << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, hipMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(hipFree(deviceA));
//hipFree(deviceB);
gpuErrchk(hipFree(deviceC));
return;
}
| DST_III_Column_Inverse.cu | /*
* Inverse Discrete Sine Transform in Column wise (DST three)
* DST_III_Column_Inverse
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DST_III_Column_Inverse(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "DST_III_Column_Inverse.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cuda.h>
#include <cuda_runtime.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define TILE_DIM 16
#define DEFAULT_DIM 32 // Tile dimension
#define DELTA(i, j) ((i==j)?1:0)
//const double PI_d = 3.141592653589793238462643383279502884; //pi
__global__ void DSTIII_Column_Inverse_Kernel(double *A, double *C,
int numARows, int numAColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
const double PI_d = 3.141592653589793238462643383279502884; //pi
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ double As[TILE_DIM][TILE_DIM];
__shared__ double Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + numARows - 1) / TILE_DIM; k++) {
//As[threadIdx.y][threadIdx.x] = cos(((2 * Row + 1) / (2.0 * numARows))*PI_d*(threadIdx.x + (k*TILE_DIM)))*sqrt(1.0 / (1 + DELTA((threadIdx.x + (k*TILE_DIM)) + 1, 1)))*sqrt(2.0 / numARows);
if (k*TILE_DIM + threadIdx.x < numARows && Row < numARows) { As[threadIdx.y][threadIdx.x] = sin(((Row + 0.5)*PI_d*((threadIdx.x + (k*TILE_DIM)) + 1)) / (numARows))*sqrt((2.0 - DELTA((threadIdx.x + (k*TILE_DIM)) + 1, numARows)) / (numARows)); }
else { As[threadIdx.y][threadIdx.x] = 0.0; }
if (k*TILE_DIM + threadIdx.y < numARows && Col < numAColumns){ Bs[threadIdx.y][threadIdx.x] = A[(k*TILE_DIM + threadIdx.y)*numAColumns + Col]; }
else { Bs[threadIdx.y][threadIdx.x] = 0.0; }
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; }
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; }
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
extern "C" void CalculateTransformDSTInverseColumnThree(double * A, double * C, int numARows,
int numAColumns, int numCRows, int numCColumns)
{
double * hostA = A; // The A matrix
//double * hostB = B; // The B matrix
double * hostC = C; // The output C matrix
//float * hostComputedC;
double * deviceA;
//double * deviceB;
double * deviceC;
//hostA = (float *)malloc(sizeof(float)*numARows*numAColumns);
//hostB = (float *)malloc(sizeof(float)*numBRows*numBColumns);
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numAColumns;
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
// Allocating GPU memory
gpuErrchk(cudaMalloc((void **)&deviceA, sizeof(double)*numARows*numAColumns));
//cudaMalloc((void **)&deviceB, sizeof(double)*numBRows*numBColumns);
gpuErrchk(cudaMalloc((void **)&deviceC, sizeof(double)*numCRows*numCColumns));
//thrust::device_ptr< double >dev_ptr_A(deviceA);
//thrust::device_ptr< double >dev_ptr_C(deviceC);
// Copy memory to the GPU
gpuErrchk(cudaMemcpy(deviceA, hostA, sizeof(double)*numARows*numAColumns, cudaMemcpyHostToDevice));
//cudaMemcpy(deviceB, hostB, sizeof(double)*numBRows*numBColumns, cudaMemcpyHostToDevice);
/////////////////////////////////////////////////////////
dim3 dimBlock(TILE_DIM, TILE_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTIII_Column_Inverse_Kernel << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, cudaMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(cudaFree(deviceA));
//cudaFree(deviceB);
gpuErrchk(cudaFree(deviceC));
return;
}
|
9674384084a5f7d855f2a6c9443462e527cabf00.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "reference_calc.cpp"
#include "utils.h"
#define max(a,b) \
({ __typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a > _b ? _a : _b; })
#define min(a,b) \
({ __typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a > _b ? _b : _a; })
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows){
return;
}
float sum = 0.0f;
float pixvalue = 0.0f;
int i = 0;
int j = 0;
const int filter_radius = filterWidth >> 1;
const int numSize = numRows * numCols;
for(j = -filter_radius; j <= filter_radius; j++){
for(i = -filter_radius; i <= filter_radius; i++){
int pose_r = min(max(thread_2D_pos.y + j, 0), numRows - 1);
int pose_c = min(max(thread_2D_pos.x + i, 0), numCols - 1);
int pos = pose_r * numCols + pose_c;
pixvalue = (float)inputChannel[pos];
sum += pixvalue * filter[i + filter_radius + (j + filter_radius) * filterWidth];
}
}
outputChannel[thread_1D_pos] = (unsigned char)sum;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows){
return;
}
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
//const dim3 blockSize((numCols >> 5) + 1 , (numRows >> 5) + 1);
const dim3 blockSize(32 , 32);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
//const dim3 gridSize(31, 32);
const dim3 gridSize(18, 10);
//TODO: Launch a kernel for separating the RGBA image into different color channelshipLaunchKernelGGL((
separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols,
d_red, d_green, d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.hipLaunchKernelGGL((
gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);hipLaunchKernelGGL((
gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);hipLaunchKernelGGL((
gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.hipLaunchKernelGGL((
recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
}
| 9674384084a5f7d855f2a6c9443462e527cabf00.cu | // Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "reference_calc.cpp"
#include "utils.h"
#define max(a,b) \
({ __typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a > _b ? _a : _b; })
#define min(a,b) \
({ __typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a > _b ? _b : _a; })
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows){
return;
}
float sum = 0.0f;
float pixvalue = 0.0f;
int i = 0;
int j = 0;
const int filter_radius = filterWidth >> 1;
const int numSize = numRows * numCols;
for(j = -filter_radius; j <= filter_radius; j++){
for(i = -filter_radius; i <= filter_radius; i++){
int pose_r = min(max(thread_2D_pos.y + j, 0), numRows - 1);
int pose_c = min(max(thread_2D_pos.x + i, 0), numCols - 1);
int pos = pose_r * numCols + pose_c;
pixvalue = (float)inputChannel[pos];
sum += pixvalue * filter[i + filter_radius + (j + filter_radius) * filterWidth];
}
}
outputChannel[thread_1D_pos] = (unsigned char)sum;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows){
return;
}
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
//const dim3 blockSize((numCols >> 5) + 1 , (numRows >> 5) + 1);
const dim3 blockSize(32 , 32);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
//const dim3 gridSize(31, 32);
const dim3 gridSize(18, 10);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols,
d_red, d_green, d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
}
|
089a640817e425f4ace753aa1495c4a7c082d900.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "sapporo_defs.h"
#include "dev_evaluate_gravity.cu"
double get_time();
inline int n_norm(int n, int j) {
n = ((n-1)/j) * j + j;
if (n == 0) n = j;
return n;
}
#include "sapporo_multi.h"
extern "C"
{
int get_device_count() {
int gpuCount = 0;
CUDA_SAFE_CALL(hipGetDeviceCount(&gpuCount));
CUT_CHECK_ERROR("Failed to get CUDA device count\n");
return gpuCount;
}
//#define _DEBUG_PRINT_
hipError_t host_evaluate_gravity(sapporo_multi_struct gpu) {
double t0 = get_time();
int ofs = gpu.offset;
DS t_i = (DS){gpu.t_i_x, gpu.t_i_y};
if (gpu.nj_modified > 0) {
int nj_scaled = n_norm(gpu.nj_modified, NTHREADS);
dim3 threads(NTHREADS, 1, 1);
dim3 grid(nj_scaled/NTHREADS, 1, 1);
if (nj_scaled < threads.x) {
threads.x = nj_scaled;
grid.x = 1;
};
#ifdef _DEBUG_PRINT_
double t1 = get_time();
#endif
hipLaunchKernelGGL(( dev_copy_particles), dim3(grid), dim3(threads), 0, 0, gpu.nj_modified,
gpu.nj_max,
gpu.address_j,
gpu.t_j,
gpu.Ppos_j,
gpu.Pvel_j,
gpu.pos_j,
gpu.vel_j,
gpu.acc_j,
gpu.jrk_j);
#ifdef _DEBUG_PRINT_
fprintf(stderr, " dev_copy_particles: %lf sec\n", get_time() - t1);
#endif
}
if (gpu.predict) {
int nj_scaled = n_norm(gpu.nj, NTHREADS);
dim3 threads_p(NTHREADS, 1, 1);
dim3 grid_p(nj_scaled/NTHREADS, 1, 1);
double t1 = get_time();
hipLaunchKernelGGL(( dev_predictor), dim3(grid_p), dim3(threads_p), 0, 0, gpu.nj,
t_i,
gpu.t_j + ofs,
gpu.Ppos_j + ofs,
gpu.Pvel_j + ofs,
gpu.pos_j + ofs,
gpu.vel_j + ofs,
gpu.acc_j + ofs,
gpu.jrk_j + ofs);
#ifdef _DEBUG_PRINT_
fprintf(stderr, " dev_predict: %lf sec\n", get_time() - t1);
#endif
};
int p = gpu.ni;
int q = min(NTHREADS/gpu.ni, 32);
// q = 1;
dim3 threads(p, q, 1);
dim3 grid(NBLOCKS, 1, 1);
int shared_mem_size = p*q*(sizeof(DS4) + sizeof(float4));
int nj_scaled = n_norm(gpu.nj, q*NBLOCKS);
#if CUDART_VERSION < 5000
CUDA_SAFE_CALL(hipMemcpyToSymbol(
"EPS2",
&gpu.EPS2,
sizeof(float),
0,
hipMemcpyHostToDevice)
);
#else
CUDA_SAFE_CALL(hipMemcpyToSymbol(
EPS2,
&gpu.EPS2,
sizeof(float),
0,
hipMemcpyHostToDevice)
);
#endif
double t1 = get_time();
if (gpu.ngb)
hipLaunchKernelGGL(( dev_evaluate_gravity<true>), dim3(grid), dim3(threads), shared_mem_size, 0, gpu.nj,
nj_scaled/(NBLOCKS*q),
NTHREADS,
gpu.Ppos_j+ ofs,
gpu.Pvel_j + ofs,
gpu.pos_i, gpu.vel_i,
gpu.acc_i, gpu.jrk_i,
gpu.ngb_list);
else
hipLaunchKernelGGL(( dev_evaluate_gravity<false>), dim3(grid), dim3(threads), shared_mem_size, 0, gpu.nj,
nj_scaled/(NBLOCKS*q),
NTHREADS,
gpu.Ppos_j + ofs,
gpu.Pvel_j + ofs,
gpu.pos_i, gpu.vel_i,
gpu.acc_i, gpu.jrk_i,
gpu.ngb_list);
#ifdef _DEBUG_PRINT_
fprintf(stderr, " dev_evaluate_gravity: %lf sec\n", get_time() - t1);
#endif
dim3 threads_r(NBLOCKS, 1, 1);
dim3 grid_r(gpu.ni, 1, 1);
int shared_mem_size_r= NBLOCKS*(2*sizeof(float4) + 3*sizeof(int));
t1 = get_time();
hipLaunchKernelGGL(( dev_reduce_forces), dim3(grid_r), dim3(threads_r), shared_mem_size_r, 0, gpu.acc_i,
gpu.jrk_i,
gpu.ds_i,
gpu.vel_i,
NTHREADS,
NGB_PB*NBLOCKS*NTHREADS,
gpu.ngb_list);
#ifdef _DEBUG_PRINT_
fprintf(stderr, " dev_reduce_forces: %lf sec\n", get_time() - t1);
#endif
return hipSuccess;
}
}
| 089a640817e425f4ace753aa1495c4a7c082d900.cu | #include <stdio.h>
#include "sapporo_defs.h"
#include "dev_evaluate_gravity.cu"
double get_time();
inline int n_norm(int n, int j) {
n = ((n-1)/j) * j + j;
if (n == 0) n = j;
return n;
}
#include "sapporo_multi.h"
extern "C"
{
int get_device_count() {
int gpuCount = 0;
CUDA_SAFE_CALL(cudaGetDeviceCount(&gpuCount));
CUT_CHECK_ERROR("Failed to get CUDA device count\n");
return gpuCount;
}
//#define _DEBUG_PRINT_
cudaError_t host_evaluate_gravity(sapporo_multi_struct gpu) {
double t0 = get_time();
int ofs = gpu.offset;
DS t_i = (DS){gpu.t_i_x, gpu.t_i_y};
if (gpu.nj_modified > 0) {
int nj_scaled = n_norm(gpu.nj_modified, NTHREADS);
dim3 threads(NTHREADS, 1, 1);
dim3 grid(nj_scaled/NTHREADS, 1, 1);
if (nj_scaled < threads.x) {
threads.x = nj_scaled;
grid.x = 1;
};
#ifdef _DEBUG_PRINT_
double t1 = get_time();
#endif
dev_copy_particles<<<grid, threads>>>(gpu.nj_modified,
gpu.nj_max,
gpu.address_j,
gpu.t_j,
gpu.Ppos_j,
gpu.Pvel_j,
gpu.pos_j,
gpu.vel_j,
gpu.acc_j,
gpu.jrk_j);
#ifdef _DEBUG_PRINT_
fprintf(stderr, " dev_copy_particles: %lf sec\n", get_time() - t1);
#endif
}
if (gpu.predict) {
int nj_scaled = n_norm(gpu.nj, NTHREADS);
dim3 threads_p(NTHREADS, 1, 1);
dim3 grid_p(nj_scaled/NTHREADS, 1, 1);
double t1 = get_time();
dev_predictor<<<grid_p, threads_p>>>(gpu.nj,
t_i,
gpu.t_j + ofs,
gpu.Ppos_j + ofs,
gpu.Pvel_j + ofs,
gpu.pos_j + ofs,
gpu.vel_j + ofs,
gpu.acc_j + ofs,
gpu.jrk_j + ofs);
#ifdef _DEBUG_PRINT_
fprintf(stderr, " dev_predict: %lf sec\n", get_time() - t1);
#endif
};
int p = gpu.ni;
int q = min(NTHREADS/gpu.ni, 32);
// q = 1;
dim3 threads(p, q, 1);
dim3 grid(NBLOCKS, 1, 1);
int shared_mem_size = p*q*(sizeof(DS4) + sizeof(float4));
int nj_scaled = n_norm(gpu.nj, q*NBLOCKS);
#if CUDART_VERSION < 5000
CUDA_SAFE_CALL(cudaMemcpyToSymbol(
"EPS2",
&gpu.EPS2,
sizeof(float),
0,
cudaMemcpyHostToDevice)
);
#else
CUDA_SAFE_CALL(cudaMemcpyToSymbol(
EPS2,
&gpu.EPS2,
sizeof(float),
0,
cudaMemcpyHostToDevice)
);
#endif
double t1 = get_time();
if (gpu.ngb)
dev_evaluate_gravity<true><<<grid, threads, shared_mem_size>>>(gpu.nj,
nj_scaled/(NBLOCKS*q),
NTHREADS,
gpu.Ppos_j+ ofs,
gpu.Pvel_j + ofs,
gpu.pos_i, gpu.vel_i,
gpu.acc_i, gpu.jrk_i,
gpu.ngb_list);
else
dev_evaluate_gravity<false><<<grid, threads, shared_mem_size>>>(gpu.nj,
nj_scaled/(NBLOCKS*q),
NTHREADS,
gpu.Ppos_j + ofs,
gpu.Pvel_j + ofs,
gpu.pos_i, gpu.vel_i,
gpu.acc_i, gpu.jrk_i,
gpu.ngb_list);
#ifdef _DEBUG_PRINT_
fprintf(stderr, " dev_evaluate_gravity: %lf sec\n", get_time() - t1);
#endif
dim3 threads_r(NBLOCKS, 1, 1);
dim3 grid_r(gpu.ni, 1, 1);
int shared_mem_size_r= NBLOCKS*(2*sizeof(float4) + 3*sizeof(int));
t1 = get_time();
dev_reduce_forces<<<grid_r, threads_r, shared_mem_size_r>>>(gpu.acc_i,
gpu.jrk_i,
gpu.ds_i,
gpu.vel_i,
NTHREADS,
NGB_PB*NBLOCKS*NTHREADS,
gpu.ngb_list);
#ifdef _DEBUG_PRINT_
fprintf(stderr, " dev_reduce_forces: %lf sec\n", get_time() - t1);
#endif
return cudaSuccess;
}
}
|
c62a36f43b01f80d80fd7d3240c57b09d2f5cc96.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <wb.h>
#define NUM_BINS 4096
#define CUDA_CHECK(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line,
bool abort = true) {
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code),
file, line);
if (abort)
exit(code);
}
}
__global__ void binning(unsigned int* input_array, unsigned int* bin_array, int input_length, int bin_num) {
int i = blockDim.x*blockIdx.x + threadIdx.x;
//int stride = blockDim.x*gridDim.x;
if (i < input_length) {
unsigned int position = input_array[i];
if (position >= 0 && position < NUM_BINS) {
if (bin_array[position] < 127)
atomicAdd(&bin_array[position], 1);
}
}
}
__global__ void binningFast(unsigned int* input_array, unsigned int* bin_array, int input_length, int bin_num) {
__shared__ unsigned int temp[NUM_BINS];
temp[threadIdx.x] = 0;
__syncthreads();
//int i = threadIdx.x;
//int stride = blockDim.x;
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < input_length) {
// atomicAdd(&temp[input_array[i]], 1);
unsigned int position = input_array[i];
if (position >= 0 && position < NUM_BINS) {
if (bin_array[position] < 127)
atomicAdd(&temp[position], 1);
}
}
__syncthreads();
// int k = threadIdx.x;
int k = blockDim.x*blockIdx.x + threadIdx.x;
if (k < input_length) {
bin_array[k] = temp[k];
//atomicAdd(&bin_array[k], temp[k]);
}
}
__global__ void limmiting(unsigned int* bin_array, int bin_num) {
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < bin_num) {
if (bin_array[i] > 127) {
bin_array[i] = 127;
}
}
}
int main(int argc, char *argv[]) {
wbArg_t args;
int inputLength;
unsigned int *hostInput;
unsigned int *hostBins;
unsigned int *deviceInput;
unsigned int *deviceBins;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (unsigned int *)wbImport(wbArg_getInputFile(args, 0),
&inputLength, "Integer");
hostBins = (unsigned int *)malloc(NUM_BINS * sizeof(unsigned int));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbLog(TRACE, "The number of bins is ", NUM_BINS);
for (int i = 0; i < NUM_BINS; i++) {
hostBins[i] = 0;
}
wbTime_start(GPU, "Allocating GPU memory.");
// TODO: Allocate GPU memory here
hipMalloc((void**)&deviceInput, inputLength*sizeof(unsigned int));
hipMalloc((void**)&deviceBins, NUM_BINS*sizeof(unsigned int));
CUDA_CHECK(hipDeviceSynchronize());
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
// TODO: Copy memory to the GPU here
hipMemcpy(deviceInput, hostInput, inputLength*sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(deviceBins, hostBins, NUM_BINS*sizeof(unsigned int), hipMemcpyHostToDevice);
CUDA_CHECK(hipDeviceSynchronize());
wbTime_stop(GPU, "Copying input memory to the GPU.");
// Launch kernel
// ----------------------------------------------------------
wbLog(TRACE, "Launching kernel");
wbTime_start(Compute, "Performing CUDA computation");
// TODO: Perform kernel computation here
int thread_num = 256;
int block_num = (NUM_BINS + inputLength + thread_num - 1) / thread_num;
//binning<<<block_num, thread_num>>>(deviceInput, deviceBins, inputLength, NUM_BINS);
binning << <block_num, thread_num >> >(deviceInput, deviceBins, inputLength, NUM_BINS);
CUDA_CHECK(hipDeviceSynchronize());
//limmiting << <block_num, thread_num >> >(deviceBins, NUM_BINS);
// You should call the following lines after you call the kernel.
CUDA_CHECK(hipGetLastError());
CUDA_CHECK(hipDeviceSynchronize());
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
// TODO: Copy the GPU memory back to the CPU here
hipMemcpy(hostBins, deviceBins, NUM_BINS*sizeof(unsigned int), hipMemcpyDeviceToHost);
CUDA_CHECK(hipDeviceSynchronize());
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
// TODO: Free the GPU memory here
hipFree(deviceBins);
hipFree(deviceInput);
wbTime_stop(GPU, "Freeing GPU Memory");
// Verify correctness
// -----------------------------------------------------
wbSolution(args, hostBins, NUM_BINS);
free(hostBins);
free(hostInput);
#if LAB_DEBUG
system("pause");
#endif
return 0;
}
| c62a36f43b01f80d80fd7d3240c57b09d2f5cc96.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <wb.h>
#define NUM_BINS 4096
#define CUDA_CHECK(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line,
bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code),
file, line);
if (abort)
exit(code);
}
}
__global__ void binning(unsigned int* input_array, unsigned int* bin_array, int input_length, int bin_num) {
int i = blockDim.x*blockIdx.x + threadIdx.x;
//int stride = blockDim.x*gridDim.x;
if (i < input_length) {
unsigned int position = input_array[i];
if (position >= 0 && position < NUM_BINS) {
if (bin_array[position] < 127)
atomicAdd(&bin_array[position], 1);
}
}
}
__global__ void binningFast(unsigned int* input_array, unsigned int* bin_array, int input_length, int bin_num) {
__shared__ unsigned int temp[NUM_BINS];
temp[threadIdx.x] = 0;
__syncthreads();
//int i = threadIdx.x;
//int stride = blockDim.x;
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < input_length) {
// atomicAdd(&temp[input_array[i]], 1);
unsigned int position = input_array[i];
if (position >= 0 && position < NUM_BINS) {
if (bin_array[position] < 127)
atomicAdd(&temp[position], 1);
}
}
__syncthreads();
// int k = threadIdx.x;
int k = blockDim.x*blockIdx.x + threadIdx.x;
if (k < input_length) {
bin_array[k] = temp[k];
//atomicAdd(&bin_array[k], temp[k]);
}
}
__global__ void limmiting(unsigned int* bin_array, int bin_num) {
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < bin_num) {
if (bin_array[i] > 127) {
bin_array[i] = 127;
}
}
}
int main(int argc, char *argv[]) {
wbArg_t args;
int inputLength;
unsigned int *hostInput;
unsigned int *hostBins;
unsigned int *deviceInput;
unsigned int *deviceBins;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (unsigned int *)wbImport(wbArg_getInputFile(args, 0),
&inputLength, "Integer");
hostBins = (unsigned int *)malloc(NUM_BINS * sizeof(unsigned int));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbLog(TRACE, "The number of bins is ", NUM_BINS);
for (int i = 0; i < NUM_BINS; i++) {
hostBins[i] = 0;
}
wbTime_start(GPU, "Allocating GPU memory.");
// TODO: Allocate GPU memory here
cudaMalloc((void**)&deviceInput, inputLength*sizeof(unsigned int));
cudaMalloc((void**)&deviceBins, NUM_BINS*sizeof(unsigned int));
CUDA_CHECK(cudaDeviceSynchronize());
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
// TODO: Copy memory to the GPU here
cudaMemcpy(deviceInput, hostInput, inputLength*sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(deviceBins, hostBins, NUM_BINS*sizeof(unsigned int), cudaMemcpyHostToDevice);
CUDA_CHECK(cudaDeviceSynchronize());
wbTime_stop(GPU, "Copying input memory to the GPU.");
// Launch kernel
// ----------------------------------------------------------
wbLog(TRACE, "Launching kernel");
wbTime_start(Compute, "Performing CUDA computation");
// TODO: Perform kernel computation here
int thread_num = 256;
int block_num = (NUM_BINS + inputLength + thread_num - 1) / thread_num;
//binning<<<block_num, thread_num>>>(deviceInput, deviceBins, inputLength, NUM_BINS);
binning << <block_num, thread_num >> >(deviceInput, deviceBins, inputLength, NUM_BINS);
CUDA_CHECK(cudaDeviceSynchronize());
//limmiting << <block_num, thread_num >> >(deviceBins, NUM_BINS);
// You should call the following lines after you call the kernel.
CUDA_CHECK(cudaGetLastError());
CUDA_CHECK(cudaDeviceSynchronize());
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
// TODO: Copy the GPU memory back to the CPU here
cudaMemcpy(hostBins, deviceBins, NUM_BINS*sizeof(unsigned int), cudaMemcpyDeviceToHost);
CUDA_CHECK(cudaDeviceSynchronize());
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
// TODO: Free the GPU memory here
cudaFree(deviceBins);
cudaFree(deviceInput);
wbTime_stop(GPU, "Freeing GPU Memory");
// Verify correctness
// -----------------------------------------------------
wbSolution(args, hostBins, NUM_BINS);
free(hostBins);
free(hostInput);
#if LAB_DEBUG
system("pause");
#endif
return 0;
}
|
4ae0b64ec6a7357c28b98174a66fa82995797aed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void getLoss(float* dat, float* rst){
*rst = -logf(*dat);
} | 4ae0b64ec6a7357c28b98174a66fa82995797aed.cu | #include "includes.h"
__global__ void getLoss(float* dat, float* rst){
*rst = -logf(*dat);
} |
8a0992c81914defae3e2b2a5a8c716a712d8a2b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void my2DConvKernel(float *d_Result, float *d_Data, int dataW, int dataH)
{
// original image based coordinate
int y = blockIdx.y * blockDim.y threadIdx.y;
int x = blockIdx.x * blockDim.x threadIdx.x;
int BiasY = y - KERNEL_RADIUS;
int BiasX = x - KERNEL_RADIUS;
float sum = 0;
for(int j = 0; j < KERNEL_LENGTH; j)
{
//out of image range
if (BiasY j < 0 || BiasY j >= dataH)
continue;
for(int i = 0; i < KERNEL_LENGTH; i)
{
//out of image range
if (BiasX i < 0 || BiasX i >= dataW)
continue;
sum = d_Data[(BiasY j) * dataW BiasX i] *
c_Kernel[KERNEL_LENGTH * j i];
}
}
d_Result[y * dataW x] = sum;
}
| 8a0992c81914defae3e2b2a5a8c716a712d8a2b2.cu | __global__ void my2DConvKernel(float *d_Result, float *d_Data, int dataW, int dataH)
{
// original image based coordinate
int y = blockIdx.y * blockDim.y threadIdx.y;
int x = blockIdx.x * blockDim.x threadIdx.x;
int BiasY = y - KERNEL_RADIUS;
int BiasX = x - KERNEL_RADIUS;
float sum = 0;
for(int j = 0; j < KERNEL_LENGTH; j)
{
//out of image range
if (BiasY j < 0 || BiasY j >= dataH)
continue;
for(int i = 0; i < KERNEL_LENGTH; i)
{
//out of image range
if (BiasX i < 0 || BiasX i >= dataW)
continue;
sum = d_Data[(BiasY j) * dataW BiasX i] *
c_Kernel[KERNEL_LENGTH * j i];
}
}
d_Result[y * dataW x] = sum;
}
|
cef7b1acb34d5b00f15b23becfb482faa9c863dc.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
__global__
void tensor_transpose(int dim_input, int dim_output, int nblocks, int tile_size,
int *shape_input, int *shape_output,
float *shape_input_r, float *shape_output_r,
int *stride_input,
int *stride_output_local, int *stride_output_global,
double *input, double *output) {
__shared__ double tile[TILE_SIZE];
for (int block_idx = blockIdx.x; block_idx < nblocks; block_idx += gridDim.x) {
int it = block_idx, im = 0, offset1 = 0;
for (int i = 0; i < dim_input; i++) {
im = it / shape_input[i];
offset1 += stride_input[i] * (it - im * shape_input[i]);
it = im;
}
for (int i = threadIdx.x; i < tile_size; i += blockDim.x) {
tile[i] = input[i + block_idx * tile_size];
}
__syncthreads();
for (int i = threadIdx.x; i < tile_size; i += blockDim.x) {
it = i;
int offset2 = 0, local_offset = 0;
for (int j = 0; j < dim_output; j++) {
im = it / shape_output[j];
int tmp = it - im * shape_output[j];
offset2 += stride_output_global[j] * tmp;
local_offset += stride_output_local[j] * tmp;
it = im;
}
output[offset1 + offset2] = tile[local_offset];
}
__syncthreads();
}
}
| cef7b1acb34d5b00f15b23becfb482faa9c863dc.cu | #include <cuda_runtime.h>
#include <cuda.h>
__global__
void tensor_transpose(int dim_input, int dim_output, int nblocks, int tile_size,
int *shape_input, int *shape_output,
float *shape_input_r, float *shape_output_r,
int *stride_input,
int *stride_output_local, int *stride_output_global,
double *input, double *output) {
__shared__ double tile[TILE_SIZE];
for (int block_idx = blockIdx.x; block_idx < nblocks; block_idx += gridDim.x) {
int it = block_idx, im = 0, offset1 = 0;
for (int i = 0; i < dim_input; i++) {
im = it / shape_input[i];
offset1 += stride_input[i] * (it - im * shape_input[i]);
it = im;
}
for (int i = threadIdx.x; i < tile_size; i += blockDim.x) {
tile[i] = input[i + block_idx * tile_size];
}
__syncthreads();
for (int i = threadIdx.x; i < tile_size; i += blockDim.x) {
it = i;
int offset2 = 0, local_offset = 0;
for (int j = 0; j < dim_output; j++) {
im = it / shape_output[j];
int tmp = it - im * shape_output[j];
offset2 += stride_output_global[j] * tmp;
local_offset += stride_output_local[j] * tmp;
it = im;
}
output[offset1 + offset2] = tile[local_offset];
}
__syncthreads();
}
}
|
cc2c129eda8736f0d3301080fee794729ed68f4d.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
The convolution version of 12_gemm_bias_relu. Similarly, we put bias vector in Operand C and the
rest is the same as normal convolution.
*/
#include <iostream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/host_reorder.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // Data type of epilogue computation
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementOutput = float; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 4;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombinationRelu<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue, // Data type for alpha in linear combination
cutlass::epilogue::thread::ScaleType::NoBetaScaling>; // alpha X C + per channel bias
using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm
>::Kernel;
using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
int run() {
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
{1, 7, 7, 512}, // activation
{512, 3, 3, 512}, // filter
{1, 1, 1, 1}, // padding
{1, 1}, // striding
{1, 1}, // dilation
cutlass::conv::Mode::kCrossCorrelation, // mode (convolution or cross-correlation)
1 // split-k slices
);
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(problem_size.activation_extent());
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(problem_size.filter_extent());
// Create tensor C with dimensions 1x1x1xk which is the bias vector
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c_bias({1, 1, 1, problem_size.K});
// Create tensor D used to store output from CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(problem_size.output_extent());
// Create matrix D with dimensions M x N used to store output from reference
// kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(problem_size.output_extent());
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(4),
ElementInputA(-4),
0); // <- Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(4),
ElementInputB(-4),
0); // <- Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c_bias.host_view(),
1,
ElementOutput(4),
ElementOutput(-4),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c_bias.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename ImplicitGemm::Arguments arguments{
problem_size,
tensor_a.device_ref(), // <- reference to tensor A on device
tensor_b.device_ref(), // <- reference to tensor B on device
// tensor C is treated as the bias vector. We can enable the CONV
// to project away the N, H, W dimension by setting the stride to zero.
{tensor_c_bias.device_data(), LayoutOutput::Stride(0)},
tensor_d.device_ref(), // <- reference to tensor D on device
{alpha} };
// Instantiate CUTLASS kernel depending on templates
ImplicitGemm implicit_gemm_op;
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Check the problem size is supported or not
cutlass::Status status = implicit_gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = implicit_gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Launch initialized CUTLASS kernel
status = implicit_gemm_op();
CUTLASS_CHECK(status);
//
// Create instantiation for device reference conv kernel
//
// Launch device reference to compute strictly the product A * B
cutlass::reference::device::Conv2d<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator,
cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue>>
(
cutlass::conv::Operator::kFprop,
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c_bias.device_ref(),
tensor_ref_d.device_ref(),
alpha, ElementComputeEpilogue(0)
);
// Wait for kernels to finish
hipDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d.sync_host();
tensor_ref_d.sync_host();
// Compute bias + relu in host code
for (int n = 0; n < problem_size.N; ++n) {
for (int p = 0; p < problem_size.P; ++p) {
for (int q = 0; q < problem_size.Q; ++q) {
for (int k = 0; k < problem_size.K; ++k) {
tensor_ref_d.at({n, p, q, k}) =
::max(ElementOutput(0),
ElementOutput(tensor_ref_d.at({n, p, q, k}) +
tensor_c_bias.at({0, 0, 0, k})));
}
}
}
}
// Check if output from CUTLASS kernel and reference kernel are equal or not
std::cout << (cutlass::reference::host::TensorEquals(tensor_d.host_view(),
tensor_ref_d.host_view())
? "Passed"
: "Failed")
<< std::endl;
CUTLASS_CHECK(status);
return 0;
}
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
hipDeviceProp_t props;
CUDA_CHECK(hipGetDeviceProperties(&props, 0));
if (!(props.major > 8 || (props.major == 8 && props.minor >= 0))) {
std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
return run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| cc2c129eda8736f0d3301080fee794729ed68f4d.cu | /***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
The convolution version of 12_gemm_bias_relu. Similarly, we put bias vector in Operand C and the
rest is the same as normal convolution.
*/
#include <iostream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/host_reorder.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/convolution.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output tensors and computation between
// elements
using ElementAccumulator = float; // Data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // Data type of epilogue computation
using ElementInputA = cutlass::half_t; // Data type of elements in input tensor
using ElementInputB = cutlass::half_t; // Data type of elements in input tensor
using ElementOutput = float; // Data type of elements in output tensor
using LayoutInputA = cutlass::layout::TensorNHWC;
using LayoutInputB = cutlass::layout::TensorNHWC;
using LayoutOutput = cutlass::layout::TensorNHWC;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape
// This code section describes tile size a warp will compute
using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape
// This code section describes the size of MMA op
using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>;
// Number of pipelines you want to use
constexpr int NumStages = 4;
// This code section describe iterator algorithm selected is Analytic or Optimized
static cutlass::conv::IteratorAlgorithm const IteratorAlgorithm = cutlass::conv::IteratorAlgorithm::kOptimized;
// This code section describes the epilogue part of the kernel, we use default value
using EpilogueOp = cutlass::epilogue::thread::LinearCombinationRelu<
ElementOutput, // Data type of output matrix.
128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized.
// memory access. This becomes the vector width of
// math instructions in the epilogue too.
ElementAccumulator, // Data type of accumulator
ElementComputeEpilogue, // Data type for alpha in linear combination
cutlass::epilogue::thread::ScaleType::NoBetaScaling>; // alpha X C + per channel bias
using Conv2dFpropKernel = typename cutlass::conv::kernel::DefaultConv2dFprop<
ElementInputA, LayoutInputA,
ElementInputB, LayoutInputB,
ElementOutput, LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ThreadblockShape,
WarpShape,
InstructionShape,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
cutlass::arch::OpMultiplyAdd,
IteratorAlgorithm
>::Kernel;
using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<Conv2dFpropKernel>;
/////////////////////////////////////////////////////////////////////////////////////////////////
int run() {
// Construct Conv2dProblemSize with user defined output size
cutlass::conv::Conv2dProblemSize problem_size(
{1, 7, 7, 512}, // activation
{512, 3, 3, 512}, // filter
{1, 1, 1, 1}, // padding
{1, 1}, // striding
{1, 1}, // dilation
cutlass::conv::Mode::kCrossCorrelation, // mode (convolution or cross-correlation)
1 // split-k slices
);
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(problem_size.activation_extent());
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(problem_size.filter_extent());
// Create tensor C with dimensions 1x1x1xk which is the bias vector
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c_bias({1, 1, 1, problem_size.K});
// Create tensor D used to store output from CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(problem_size.output_extent());
// Create matrix D with dimensions M x N used to store output from reference
// kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(problem_size.output_extent());
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(4),
ElementInputA(-4),
0); // <- Fill tensor A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(4),
ElementInputB(-4),
0); // <- Fill tensor B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c_bias.host_view(),
1,
ElementOutput(4),
ElementOutput(-4),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c_bias.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename ImplicitGemm::Arguments arguments{
problem_size,
tensor_a.device_ref(), // <- reference to tensor A on device
tensor_b.device_ref(), // <- reference to tensor B on device
// tensor C is treated as the bias vector. We can enable the CONV
// to project away the N, H, W dimension by setting the stride to zero.
{tensor_c_bias.device_data(), LayoutOutput::Stride(0)},
tensor_d.device_ref(), // <- reference to tensor D on device
{alpha} };
// Instantiate CUTLASS kernel depending on templates
ImplicitGemm implicit_gemm_op;
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Check the problem size is supported or not
cutlass::Status status = implicit_gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = implicit_gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Launch initialized CUTLASS kernel
status = implicit_gemm_op();
CUTLASS_CHECK(status);
//
// Create instantiation for device reference conv kernel
//
// Launch device reference to compute strictly the product A * B
cutlass::reference::device::Conv2d<
ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementAccumulator,
cutlass::NumericConverter<ElementOutput, ElementComputeEpilogue>>
(
cutlass::conv::Operator::kFprop,
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c_bias.device_ref(),
tensor_ref_d.device_ref(),
alpha, ElementComputeEpilogue(0)
);
// Wait for kernels to finish
cudaDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d.sync_host();
tensor_ref_d.sync_host();
// Compute bias + relu in host code
for (int n = 0; n < problem_size.N; ++n) {
for (int p = 0; p < problem_size.P; ++p) {
for (int q = 0; q < problem_size.Q; ++q) {
for (int k = 0; k < problem_size.K; ++k) {
tensor_ref_d.at({n, p, q, k}) =
std::max(ElementOutput(0),
ElementOutput(tensor_ref_d.at({n, p, q, k}) +
tensor_c_bias.at({0, 0, 0, k})));
}
}
}
}
// Check if output from CUTLASS kernel and reference kernel are equal or not
std::cout << (cutlass::reference::host::TensorEquals(tensor_d.host_view(),
tensor_ref_d.host_view())
? "Passed"
: "Failed")
<< std::endl;
CUTLASS_CHECK(status);
return 0;
}
int main(int argc, char const **args) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major > 8 || (props.major == 8 && props.minor >= 0))) {
std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
return run();
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
6dea2fa33c19a72abe2fae50cea27df4dc3cc3ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "lab3.h"
#include <cstdio>
__device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; }
__device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; }
__global__ void SimpleClone(
const float *background,
const float *target,
const float *mask,
float *output,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
)
{
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt*yt+xt;
if (yt < ht && xt < wt && mask[curt] > 127.0f) {
const int yb = oy+yt, xb = ox+xt;
const int curb = wb*yb+xb;
if (0 <= yb && yb < hb && 0 <= xb && xb < wb) {
output[curb*3+0] = target[curt*3+0];
output[curb*3+1] = target[curt*3+1];
output[curb*3+2] = target[curt*3+2];
}
}
}
__global__ void CalculateFixed(
const float *background,
const float *buf1, // target
const float *mask,
float *output, //fixed
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
) {
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt*yt+xt;
const int yb = oy+yt, xb = ox+xt;
const int curb = wb*yb+xb;
int northt = wt*(yt-1)+xt, northb = wb*(yb-1)+xb;
int southt = wt*(yt+1)+xt, southb = wb*(yb+1)+xb;
int eastt = wt*yt+xt+1, eastb = wb*yb+xb+1;
int westt = wt*yt+xt-1, westb = wb*yb+xb-1;
int direction[8] = {northt*3, westt*3, eastt*3, southt*3,
northb*3, westb*3, eastb*3, southb*3};
bool condition[8] = {yt-1 >= 0, xt-1 >= 0, xt+1 < wt, yt+1 < ht,
yt-1 >= 0 && !mask[northt],
xt-1 >= 0 && !mask[westt],
xt+1 < wt && !mask[eastt],
yt+1 < ht && !mask[southt]
};
if (yt < ht && xt < wt) {
for(int j = 0; j < 3; ++j)
output[curt*3+j] = 4*buf1[curt*3+j];
for(int i = 0; i < 4; ++i) { // check if Nt, St, Wt or Et is inbound
if(condition[i])
for(int j = 0; j < 3; ++j)
output[curt*3+j] -= buf1[direction[i]+j];
}
if (0 <= yb && yb < hb && 0 <= xb && xb < wb) {
for(int i = 4; i < 8; ++i) // check if Nb, Sb, Wb or Eb is fixed
if(condition[i])
for(int j = 0; j < 3; ++j)
output[curt*3+j] += background[direction[i]+j];
else if(!condition[i-4])
for(int j = 0; j < 3; ++j)
output[curt*3+j] = background[direction[i]+j];
}
}
}
__global__ void PoissonImageCloningIteration(
const float *fixed,
const float *mask,
const float *buf1,
float *buf2,
const int wt,
const int ht
) {
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt*yt+xt;
int northt = wt*(yt-1)+xt;
int southt = wt*(yt+1)+xt;
int eastt = wt*yt+xt+1;
int westt = wt*yt+xt-1;
int direction[4] = {northt*3, westt*3, eastt*3, southt*3};
bool condition[4] = {
yt-1 >= 0 && mask[northt] > 127.0f,
xt-1 >= 0 && mask[westt] > 127.0f,
xt+1 < wt && mask[eastt] > 127.0f,
yt+1 < ht && mask[southt] > 127.0f
};
for(int j = 0; j < 3; ++j)
buf2[curt*3+j] = fixed[curt*3+j];
if (yt < ht && xt < wt) {
for(int i = 0; i < 4; ++i) {
if(condition[i])
for(int j = 0; j < 3; ++j)
buf2[curt*3+j] += buf1[direction[i]+j];
}
}
for(int j = 0; j < 3; ++j)
buf2[curt*3+j] /= 4;
}
void PoissonImageCloning(
const float *background,
const float *target,
const float *mask,
float *output,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
) {
// set up
float *fixed, *buf1, *buf2;
hipMalloc(&fixed, 3*wt*ht*sizeof(float));
hipMalloc(&buf1, 3*wt*ht*sizeof(float));
hipMalloc(&buf2, 3*wt*ht*sizeof(float));
// initialize the iteration
dim3 gdim(CeilDiv(wt,32), CeilDiv(ht,16)), bdim(32,16);
hipLaunchKernelGGL(( CalculateFixed), dim3(gdim), dim3(bdim), 0, 0,
background, target, mask, fixed,
wb, hb, wt, ht, oy, ox
);
hipMemcpy(buf1, target, sizeof(float)*3*wt*ht, hipMemcpyDeviceToDevice);
// iterate
for (int i = 0; i < 20000; ++i) {
hipLaunchKernelGGL(( PoissonImageCloningIteration), dim3(gdim), dim3(bdim), 0, 0,
fixed, mask, buf1, buf2, wt, ht
);
hipLaunchKernelGGL(( PoissonImageCloningIteration), dim3(gdim), dim3(bdim), 0, 0,
fixed, mask, buf2, buf1, wt, ht
);
}
// copy the image back
hipMemcpy(output, background, wb*hb*sizeof(float)*3, hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( SimpleClone), dim3(gdim), dim3(bdim), 0, 0,
background, buf1, mask, output,
wb, hb, wt, ht, oy, ox
);
// clean up
hipFree(fixed);
hipFree(buf1);
hipFree(buf2);
} | 6dea2fa33c19a72abe2fae50cea27df4dc3cc3ea.cu | #include "lab3.h"
#include <cstdio>
__device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; }
__device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; }
__global__ void SimpleClone(
const float *background,
const float *target,
const float *mask,
float *output,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
)
{
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt*yt+xt;
if (yt < ht && xt < wt && mask[curt] > 127.0f) {
const int yb = oy+yt, xb = ox+xt;
const int curb = wb*yb+xb;
if (0 <= yb && yb < hb && 0 <= xb && xb < wb) {
output[curb*3+0] = target[curt*3+0];
output[curb*3+1] = target[curt*3+1];
output[curb*3+2] = target[curt*3+2];
}
}
}
__global__ void CalculateFixed(
const float *background,
const float *buf1, // target
const float *mask,
float *output, //fixed
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
) {
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt*yt+xt;
const int yb = oy+yt, xb = ox+xt;
const int curb = wb*yb+xb;
int northt = wt*(yt-1)+xt, northb = wb*(yb-1)+xb;
int southt = wt*(yt+1)+xt, southb = wb*(yb+1)+xb;
int eastt = wt*yt+xt+1, eastb = wb*yb+xb+1;
int westt = wt*yt+xt-1, westb = wb*yb+xb-1;
int direction[8] = {northt*3, westt*3, eastt*3, southt*3,
northb*3, westb*3, eastb*3, southb*3};
bool condition[8] = {yt-1 >= 0, xt-1 >= 0, xt+1 < wt, yt+1 < ht,
yt-1 >= 0 && !mask[northt],
xt-1 >= 0 && !mask[westt],
xt+1 < wt && !mask[eastt],
yt+1 < ht && !mask[southt]
};
if (yt < ht && xt < wt) {
for(int j = 0; j < 3; ++j)
output[curt*3+j] = 4*buf1[curt*3+j];
for(int i = 0; i < 4; ++i) { // check if Nt, St, Wt or Et is inbound
if(condition[i])
for(int j = 0; j < 3; ++j)
output[curt*3+j] -= buf1[direction[i]+j];
}
if (0 <= yb && yb < hb && 0 <= xb && xb < wb) {
for(int i = 4; i < 8; ++i) // check if Nb, Sb, Wb or Eb is fixed
if(condition[i])
for(int j = 0; j < 3; ++j)
output[curt*3+j] += background[direction[i]+j];
else if(!condition[i-4])
for(int j = 0; j < 3; ++j)
output[curt*3+j] = background[direction[i]+j];
}
}
}
__global__ void PoissonImageCloningIteration(
const float *fixed,
const float *mask,
const float *buf1,
float *buf2,
const int wt,
const int ht
) {
const int yt = blockIdx.y * blockDim.y + threadIdx.y;
const int xt = blockIdx.x * blockDim.x + threadIdx.x;
const int curt = wt*yt+xt;
int northt = wt*(yt-1)+xt;
int southt = wt*(yt+1)+xt;
int eastt = wt*yt+xt+1;
int westt = wt*yt+xt-1;
int direction[4] = {northt*3, westt*3, eastt*3, southt*3};
bool condition[4] = {
yt-1 >= 0 && mask[northt] > 127.0f,
xt-1 >= 0 && mask[westt] > 127.0f,
xt+1 < wt && mask[eastt] > 127.0f,
yt+1 < ht && mask[southt] > 127.0f
};
for(int j = 0; j < 3; ++j)
buf2[curt*3+j] = fixed[curt*3+j];
if (yt < ht && xt < wt) {
for(int i = 0; i < 4; ++i) {
if(condition[i])
for(int j = 0; j < 3; ++j)
buf2[curt*3+j] += buf1[direction[i]+j];
}
}
for(int j = 0; j < 3; ++j)
buf2[curt*3+j] /= 4;
}
void PoissonImageCloning(
const float *background,
const float *target,
const float *mask,
float *output,
const int wb, const int hb, const int wt, const int ht,
const int oy, const int ox
) {
// set up
float *fixed, *buf1, *buf2;
cudaMalloc(&fixed, 3*wt*ht*sizeof(float));
cudaMalloc(&buf1, 3*wt*ht*sizeof(float));
cudaMalloc(&buf2, 3*wt*ht*sizeof(float));
// initialize the iteration
dim3 gdim(CeilDiv(wt,32), CeilDiv(ht,16)), bdim(32,16);
CalculateFixed<<<gdim, bdim>>>(
background, target, mask, fixed,
wb, hb, wt, ht, oy, ox
);
cudaMemcpy(buf1, target, sizeof(float)*3*wt*ht, cudaMemcpyDeviceToDevice);
// iterate
for (int i = 0; i < 20000; ++i) {
PoissonImageCloningIteration<<<gdim, bdim>>>(
fixed, mask, buf1, buf2, wt, ht
);
PoissonImageCloningIteration<<<gdim, bdim>>>(
fixed, mask, buf2, buf1, wt, ht
);
}
// copy the image back
cudaMemcpy(output, background, wb*hb*sizeof(float)*3, cudaMemcpyDeviceToDevice);
SimpleClone<<<gdim, bdim>>>(
background, buf1, mask, output,
wb, hb, wt, ht, oy, ox
);
// clean up
cudaFree(fixed);
cudaFree(buf1);
cudaFree(buf2);
} |
b165a30037a0582381faccfa650584c22dd0d331.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* The basic MD5 functions.
*
* F and G are optimized compared to their RFC 1321 definitions for
* architectures that lack an AND-NOT instruction, just like in Colin Plumb's
* implementation.
*/
typedef unsigned int MD5_u32plus;
typedef struct {
MD5_u32plus lo, hi;
MD5_u32plus a, b, c, d;
unsigned char buffer[64];
MD5_u32plus block[16];
} MD5_CTX;
#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
#define G(x, y, z) ((y) ^ ((z) & ((x) ^ (y))))
#define H(x, y, z) (((x) ^ (y)) ^ (z))
#define H2(x, y, z) ((x) ^ ((y) ^ (z)))
#define I(x, y, z) ((y) ^ ((x) | ~(z)))
/*
* The MD5 transformation for all four rounds.
*/
#define STEP(f, a, b, c, d, x, t, s) \
(a) += f((b), (c), (d)) + (x) + (t); \
(a) = (((a) << (s)) | (((a) & 0xffffffff) >> (32 - (s)))); \
(a) += (b);
/*
* SET reads 4 input bytes in little-endian byte order and stores them
* in a properly aligned word in host byte order.
*
* The check for little-endian architectures that tolerate unaligned
* memory accesses is just an optimization. Nothing will break if it
* doesn't work.
*/
#if defined(__i386__) || defined(__x86_64__) || defined(__vax__)
#define SET(n) \
(*(MD5_u32plus *)&ptr[(n) * 4])
#define GET(n) \
SET(n)
#else
#define SET(n) \
(ctx->block[(n)] = \
(MD5_u32plus)ptr[(n) * 4] | \
((MD5_u32plus)ptr[(n) * 4 + 1] << 8) | \
((MD5_u32plus)ptr[(n) * 4 + 2] << 16) | \
((MD5_u32plus)ptr[(n) * 4 + 3] << 24))
#define GET(n) \
(ctx->block[(n)])
#endif
/*
* This processes one or more 64-byte data blocks, but does NOT update
* the bit counters. There are no alignment requirements.
*/
__host__ __device__
const void *body(MD5_CTX *ctx, const void *data, unsigned long size)
{
const unsigned char *ptr;
MD5_u32plus a, b, c, d;
MD5_u32plus saved_a, saved_b, saved_c, saved_d;
ptr = (const unsigned char *)data;
a = ctx->a;
b = ctx->b;
c = ctx->c;
d = ctx->d;
do {
saved_a = a;
saved_b = b;
saved_c = c;
saved_d = d;
/* Round 1 */
STEP(F, a, b, c, d, SET(0), 0xd76aa478, 7)
STEP(F, d, a, b, c, SET(1), 0xe8c7b756, 12)
STEP(F, c, d, a, b, SET(2), 0x242070db, 17)
STEP(F, b, c, d, a, SET(3), 0xc1bdceee, 22)
STEP(F, a, b, c, d, SET(4), 0xf57c0faf, 7)
STEP(F, d, a, b, c, SET(5), 0x4787c62a, 12)
STEP(F, c, d, a, b, SET(6), 0xa8304613, 17)
STEP(F, b, c, d, a, SET(7), 0xfd469501, 22)
STEP(F, a, b, c, d, SET(8), 0x698098d8, 7)
STEP(F, d, a, b, c, SET(9), 0x8b44f7af, 12)
STEP(F, c, d, a, b, SET(10), 0xffff5bb1, 17)
STEP(F, b, c, d, a, SET(11), 0x895cd7be, 22)
STEP(F, a, b, c, d, SET(12), 0x6b901122, 7)
STEP(F, d, a, b, c, SET(13), 0xfd987193, 12)
STEP(F, c, d, a, b, SET(14), 0xa679438e, 17)
STEP(F, b, c, d, a, SET(15), 0x49b40821, 22)
/* Round 2 */
STEP(G, a, b, c, d, GET(1), 0xf61e2562, 5)
STEP(G, d, a, b, c, GET(6), 0xc040b340, 9)
STEP(G, c, d, a, b, GET(11), 0x265e5a51, 14)
STEP(G, b, c, d, a, GET(0), 0xe9b6c7aa, 20)
STEP(G, a, b, c, d, GET(5), 0xd62f105d, 5)
STEP(G, d, a, b, c, GET(10), 0x02441453, 9)
STEP(G, c, d, a, b, GET(15), 0xd8a1e681, 14)
STEP(G, b, c, d, a, GET(4), 0xe7d3fbc8, 20)
STEP(G, a, b, c, d, GET(9), 0x21e1cde6, 5)
STEP(G, d, a, b, c, GET(14), 0xc33707d6, 9)
STEP(G, c, d, a, b, GET(3), 0xf4d50d87, 14)
STEP(G, b, c, d, a, GET(8), 0x455a14ed, 20)
STEP(G, a, b, c, d, GET(13), 0xa9e3e905, 5)
STEP(G, d, a, b, c, GET(2), 0xfcefa3f8, 9)
STEP(G, c, d, a, b, GET(7), 0x676f02d9, 14)
STEP(G, b, c, d, a, GET(12), 0x8d2a4c8a, 20)
/* Round 3 */
STEP(H, a, b, c, d, GET(5), 0xfffa3942, 4)
STEP(H2, d, a, b, c, GET(8), 0x8771f681, 11)
STEP(H, c, d, a, b, GET(11), 0x6d9d6122, 16)
STEP(H2, b, c, d, a, GET(14), 0xfde5380c, 23)
STEP(H, a, b, c, d, GET(1), 0xa4beea44, 4)
STEP(H2, d, a, b, c, GET(4), 0x4bdecfa9, 11)
STEP(H, c, d, a, b, GET(7), 0xf6bb4b60, 16)
STEP(H2, b, c, d, a, GET(10), 0xbebfbc70, 23)
STEP(H, a, b, c, d, GET(13), 0x289b7ec6, 4)
STEP(H2, d, a, b, c, GET(0), 0xeaa127fa, 11)
STEP(H, c, d, a, b, GET(3), 0xd4ef3085, 16)
STEP(H2, b, c, d, a, GET(6), 0x04881d05, 23)
STEP(H, a, b, c, d, GET(9), 0xd9d4d039, 4)
STEP(H2, d, a, b, c, GET(12), 0xe6db99e5, 11)
STEP(H, c, d, a, b, GET(15), 0x1fa27cf8, 16)
STEP(H2, b, c, d, a, GET(2), 0xc4ac5665, 23)
/* Round 4 */
STEP(I, a, b, c, d, GET(0), 0xf4292244, 6)
STEP(I, d, a, b, c, GET(7), 0x432aff97, 10)
STEP(I, c, d, a, b, GET(14), 0xab9423a7, 15)
STEP(I, b, c, d, a, GET(5), 0xfc93a039, 21)
STEP(I, a, b, c, d, GET(12), 0x655b59c3, 6)
STEP(I, d, a, b, c, GET(3), 0x8f0ccc92, 10)
STEP(I, c, d, a, b, GET(10), 0xffeff47d, 15)
STEP(I, b, c, d, a, GET(1), 0x85845dd1, 21)
STEP(I, a, b, c, d, GET(8), 0x6fa87e4f, 6)
STEP(I, d, a, b, c, GET(15), 0xfe2ce6e0, 10)
STEP(I, c, d, a, b, GET(6), 0xa3014314, 15)
STEP(I, b, c, d, a, GET(13), 0x4e0811a1, 21)
STEP(I, a, b, c, d, GET(4), 0xf7537e82, 6)
STEP(I, d, a, b, c, GET(11), 0xbd3af235, 10)
STEP(I, c, d, a, b, GET(2), 0x2ad7d2bb, 15)
STEP(I, b, c, d, a, GET(9), 0xeb86d391, 21)
a += saved_a;
b += saved_b;
c += saved_c;
d += saved_d;
ptr += 64;
} while (size -= 64);
ctx->a = a;
ctx->b = b;
ctx->c = c;
ctx->d = d;
return ptr;
}
__host__ __device__
void MD5_Init(MD5_CTX *ctx)
{
ctx->a = 0x67452301;
ctx->b = 0xefcdab89;
ctx->c = 0x98badcfe;
ctx->d = 0x10325476;
ctx->lo = 0;
ctx->hi = 0;
}
__host__ __device__
void MD5_Update(MD5_CTX *ctx, const void *data, unsigned long size)
{
MD5_u32plus saved_lo;
unsigned long used, available;
saved_lo = ctx->lo;
if ((ctx->lo = (saved_lo + size) & 0x1fffffff) < saved_lo)
ctx->hi++;
ctx->hi += size >> 29;
used = saved_lo & 0x3f;
if (used) {
available = 64 - used;
if (size < available) {
memcpy(&ctx->buffer[used], data, size);
return;
}
memcpy(&ctx->buffer[used], data, available);
data = (const unsigned char *)data + available;
size -= available;
body(ctx, ctx->buffer, 64);
}
if (size >= 64) {
data = body(ctx, data, size & ~(unsigned long)0x3f);
size &= 0x3f;
}
memcpy(ctx->buffer, data, size);
}
__host__ __device__
void MD5_Final(unsigned char *result, MD5_CTX *ctx)
{
unsigned long used, available;
used = ctx->lo & 0x3f;
ctx->buffer[used++] = 0x80;
available = 64 - used;
if (available < 8) {
memset(&ctx->buffer[used], 0, available);
body(ctx, ctx->buffer, 64);
used = 0;
available = 64;
}
memset(&ctx->buffer[used], 0, available - 8);
ctx->lo <<= 3;
ctx->buffer[56] = ctx->lo;
ctx->buffer[57] = ctx->lo >> 8;
ctx->buffer[58] = ctx->lo >> 16;
ctx->buffer[59] = ctx->lo >> 24;
ctx->buffer[60] = ctx->hi;
ctx->buffer[61] = ctx->hi >> 8;
ctx->buffer[62] = ctx->hi >> 16;
ctx->buffer[63] = ctx->hi >> 24;
body(ctx, ctx->buffer, 64);
result[0] = ctx->a;
result[1] = ctx->a >> 8;
result[2] = ctx->a >> 16;
result[3] = ctx->a >> 24;
result[4] = ctx->b;
result[5] = ctx->b >> 8;
result[6] = ctx->b >> 16;
result[7] = ctx->b >> 24;
result[8] = ctx->c;
result[9] = ctx->c >> 8;
result[10] = ctx->c >> 16;
result[11] = ctx->c >> 24;
result[12] = ctx->d;
result[13] = ctx->d >> 8;
result[14] = ctx->d >> 16;
result[15] = ctx->d >> 24;
memset(ctx, 0, sizeof(*ctx));
}
__global__
void count_md5(unsigned char * text, unsigned char* result, int text_length)
{
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, text, text_length-1);
MD5_Final(result, &ctx);
}
struct Word
{
char data[32];
int length;
};
struct MDHash
{
unsigned char data[16];
unsigned char matchedWord[32];
bool wordMatched = false;
};
__global__
void check_md5(MDHash * hashes, int hashesSize, Word * words, wordsSize)
{
int i; int j;
unsigned char hashResult[16];
Word checkedWord;
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, text, text_length-1);
MD5_Final(hashResult, &ctx);
for(i = 0; i< hashesSize;++i)
{
if(!hashes[i].wordMatched)
{
for(j=0; j<16;++j)
if(hashes[i].data[j]!=hashResult)
break;
if(j==16)
{
hashes[i].wordMatched=true;
for(j=0; j<checkedWord.length; ++j)
hashes[i].matchedWord[j]=checkedWord.data[j];
}
}
}
}
struct MD5Hasher
{
__host__ __device__
operator()(const Word & word)
{
MDHash hash;
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, word.data, word.length);
MD5_Final(hash.data, &ctx);
return hash;
}
};
#include <stdio.h>
#include <string.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
int main(int argc, char const *argv[])
{
unsigned char host_text[] = "text";
int text_mem_size = 5;
unsigned char host_result[16];
unsigned char* device_text;
unsigned char* device_result;
hipMalloc((void **) &device_text, text_mem_size*sizeof(unsigned char)); // Allocate array on device
hipMemcpy(device_text, host_text, text_mem_size*sizeof(unsigned char), hipMemcpyHostToDevice); //Copy data to device
hipMalloc((void **) &device_result, 16*sizeof(unsigned char)); // Allocate array on device
hipLaunchKernelGGL((
count_md5), dim3(1),dim3(1), 0, 0, device_text, device_result, text_mem_size);
hipMemcpy(host_result, device_result, 16*sizeof(unsigned char), hipMemcpyDeviceToHost);//copy data back to CPU
for(int i=0; i<16*sizeof(unsigned char); ++i)
printf("%02x", host_result[i]);
printf("\n");
thrust::host_vector<Word> host_words(2);
char word1[] = "mom";
for(int i =0; word1[i]!='\0'; ++i)
host_words[0].data[i]=word1[i];
host_words[0].length=strlen(word1);
char word2[] = "help";
for(int i =0; word2[i]!='\0'; ++i)
host_words[1].data[i]=word2[i];
host_words[1].length=strlen(word2);
thrust::device_vector<Word> device_words = host_words;
thrust::device_vector<MDHash> hashesFound(host_words.size())
thrust::device_vector<MDHash> device_passwords(2);
thrust::transform(device_words.begin(), device_words.end(), device_passwords.begin(), MD5Hasher());
for(int n=0; n<2; ++n)
{
MDHash h = device_passwords[n];
for(int i=0; i<16*sizeof(unsigned char); ++i)
printf("%02x", h.data[i]);
printf("\n");
}
return 0;
}
| b165a30037a0582381faccfa650584c22dd0d331.cu |
/*
* The basic MD5 functions.
*
* F and G are optimized compared to their RFC 1321 definitions for
* architectures that lack an AND-NOT instruction, just like in Colin Plumb's
* implementation.
*/
typedef unsigned int MD5_u32plus;
typedef struct {
MD5_u32plus lo, hi;
MD5_u32plus a, b, c, d;
unsigned char buffer[64];
MD5_u32plus block[16];
} MD5_CTX;
#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
#define G(x, y, z) ((y) ^ ((z) & ((x) ^ (y))))
#define H(x, y, z) (((x) ^ (y)) ^ (z))
#define H2(x, y, z) ((x) ^ ((y) ^ (z)))
#define I(x, y, z) ((y) ^ ((x) | ~(z)))
/*
* The MD5 transformation for all four rounds.
*/
#define STEP(f, a, b, c, d, x, t, s) \
(a) += f((b), (c), (d)) + (x) + (t); \
(a) = (((a) << (s)) | (((a) & 0xffffffff) >> (32 - (s)))); \
(a) += (b);
/*
* SET reads 4 input bytes in little-endian byte order and stores them
* in a properly aligned word in host byte order.
*
* The check for little-endian architectures that tolerate unaligned
* memory accesses is just an optimization. Nothing will break if it
* doesn't work.
*/
#if defined(__i386__) || defined(__x86_64__) || defined(__vax__)
#define SET(n) \
(*(MD5_u32plus *)&ptr[(n) * 4])
#define GET(n) \
SET(n)
#else
#define SET(n) \
(ctx->block[(n)] = \
(MD5_u32plus)ptr[(n) * 4] | \
((MD5_u32plus)ptr[(n) * 4 + 1] << 8) | \
((MD5_u32plus)ptr[(n) * 4 + 2] << 16) | \
((MD5_u32plus)ptr[(n) * 4 + 3] << 24))
#define GET(n) \
(ctx->block[(n)])
#endif
/*
* This processes one or more 64-byte data blocks, but does NOT update
* the bit counters. There are no alignment requirements.
*/
__host__ __device__
const void *body(MD5_CTX *ctx, const void *data, unsigned long size)
{
const unsigned char *ptr;
MD5_u32plus a, b, c, d;
MD5_u32plus saved_a, saved_b, saved_c, saved_d;
ptr = (const unsigned char *)data;
a = ctx->a;
b = ctx->b;
c = ctx->c;
d = ctx->d;
do {
saved_a = a;
saved_b = b;
saved_c = c;
saved_d = d;
/* Round 1 */
STEP(F, a, b, c, d, SET(0), 0xd76aa478, 7)
STEP(F, d, a, b, c, SET(1), 0xe8c7b756, 12)
STEP(F, c, d, a, b, SET(2), 0x242070db, 17)
STEP(F, b, c, d, a, SET(3), 0xc1bdceee, 22)
STEP(F, a, b, c, d, SET(4), 0xf57c0faf, 7)
STEP(F, d, a, b, c, SET(5), 0x4787c62a, 12)
STEP(F, c, d, a, b, SET(6), 0xa8304613, 17)
STEP(F, b, c, d, a, SET(7), 0xfd469501, 22)
STEP(F, a, b, c, d, SET(8), 0x698098d8, 7)
STEP(F, d, a, b, c, SET(9), 0x8b44f7af, 12)
STEP(F, c, d, a, b, SET(10), 0xffff5bb1, 17)
STEP(F, b, c, d, a, SET(11), 0x895cd7be, 22)
STEP(F, a, b, c, d, SET(12), 0x6b901122, 7)
STEP(F, d, a, b, c, SET(13), 0xfd987193, 12)
STEP(F, c, d, a, b, SET(14), 0xa679438e, 17)
STEP(F, b, c, d, a, SET(15), 0x49b40821, 22)
/* Round 2 */
STEP(G, a, b, c, d, GET(1), 0xf61e2562, 5)
STEP(G, d, a, b, c, GET(6), 0xc040b340, 9)
STEP(G, c, d, a, b, GET(11), 0x265e5a51, 14)
STEP(G, b, c, d, a, GET(0), 0xe9b6c7aa, 20)
STEP(G, a, b, c, d, GET(5), 0xd62f105d, 5)
STEP(G, d, a, b, c, GET(10), 0x02441453, 9)
STEP(G, c, d, a, b, GET(15), 0xd8a1e681, 14)
STEP(G, b, c, d, a, GET(4), 0xe7d3fbc8, 20)
STEP(G, a, b, c, d, GET(9), 0x21e1cde6, 5)
STEP(G, d, a, b, c, GET(14), 0xc33707d6, 9)
STEP(G, c, d, a, b, GET(3), 0xf4d50d87, 14)
STEP(G, b, c, d, a, GET(8), 0x455a14ed, 20)
STEP(G, a, b, c, d, GET(13), 0xa9e3e905, 5)
STEP(G, d, a, b, c, GET(2), 0xfcefa3f8, 9)
STEP(G, c, d, a, b, GET(7), 0x676f02d9, 14)
STEP(G, b, c, d, a, GET(12), 0x8d2a4c8a, 20)
/* Round 3 */
STEP(H, a, b, c, d, GET(5), 0xfffa3942, 4)
STEP(H2, d, a, b, c, GET(8), 0x8771f681, 11)
STEP(H, c, d, a, b, GET(11), 0x6d9d6122, 16)
STEP(H2, b, c, d, a, GET(14), 0xfde5380c, 23)
STEP(H, a, b, c, d, GET(1), 0xa4beea44, 4)
STEP(H2, d, a, b, c, GET(4), 0x4bdecfa9, 11)
STEP(H, c, d, a, b, GET(7), 0xf6bb4b60, 16)
STEP(H2, b, c, d, a, GET(10), 0xbebfbc70, 23)
STEP(H, a, b, c, d, GET(13), 0x289b7ec6, 4)
STEP(H2, d, a, b, c, GET(0), 0xeaa127fa, 11)
STEP(H, c, d, a, b, GET(3), 0xd4ef3085, 16)
STEP(H2, b, c, d, a, GET(6), 0x04881d05, 23)
STEP(H, a, b, c, d, GET(9), 0xd9d4d039, 4)
STEP(H2, d, a, b, c, GET(12), 0xe6db99e5, 11)
STEP(H, c, d, a, b, GET(15), 0x1fa27cf8, 16)
STEP(H2, b, c, d, a, GET(2), 0xc4ac5665, 23)
/* Round 4 */
STEP(I, a, b, c, d, GET(0), 0xf4292244, 6)
STEP(I, d, a, b, c, GET(7), 0x432aff97, 10)
STEP(I, c, d, a, b, GET(14), 0xab9423a7, 15)
STEP(I, b, c, d, a, GET(5), 0xfc93a039, 21)
STEP(I, a, b, c, d, GET(12), 0x655b59c3, 6)
STEP(I, d, a, b, c, GET(3), 0x8f0ccc92, 10)
STEP(I, c, d, a, b, GET(10), 0xffeff47d, 15)
STEP(I, b, c, d, a, GET(1), 0x85845dd1, 21)
STEP(I, a, b, c, d, GET(8), 0x6fa87e4f, 6)
STEP(I, d, a, b, c, GET(15), 0xfe2ce6e0, 10)
STEP(I, c, d, a, b, GET(6), 0xa3014314, 15)
STEP(I, b, c, d, a, GET(13), 0x4e0811a1, 21)
STEP(I, a, b, c, d, GET(4), 0xf7537e82, 6)
STEP(I, d, a, b, c, GET(11), 0xbd3af235, 10)
STEP(I, c, d, a, b, GET(2), 0x2ad7d2bb, 15)
STEP(I, b, c, d, a, GET(9), 0xeb86d391, 21)
a += saved_a;
b += saved_b;
c += saved_c;
d += saved_d;
ptr += 64;
} while (size -= 64);
ctx->a = a;
ctx->b = b;
ctx->c = c;
ctx->d = d;
return ptr;
}
__host__ __device__
void MD5_Init(MD5_CTX *ctx)
{
ctx->a = 0x67452301;
ctx->b = 0xefcdab89;
ctx->c = 0x98badcfe;
ctx->d = 0x10325476;
ctx->lo = 0;
ctx->hi = 0;
}
__host__ __device__
void MD5_Update(MD5_CTX *ctx, const void *data, unsigned long size)
{
MD5_u32plus saved_lo;
unsigned long used, available;
saved_lo = ctx->lo;
if ((ctx->lo = (saved_lo + size) & 0x1fffffff) < saved_lo)
ctx->hi++;
ctx->hi += size >> 29;
used = saved_lo & 0x3f;
if (used) {
available = 64 - used;
if (size < available) {
memcpy(&ctx->buffer[used], data, size);
return;
}
memcpy(&ctx->buffer[used], data, available);
data = (const unsigned char *)data + available;
size -= available;
body(ctx, ctx->buffer, 64);
}
if (size >= 64) {
data = body(ctx, data, size & ~(unsigned long)0x3f);
size &= 0x3f;
}
memcpy(ctx->buffer, data, size);
}
__host__ __device__
void MD5_Final(unsigned char *result, MD5_CTX *ctx)
{
unsigned long used, available;
used = ctx->lo & 0x3f;
ctx->buffer[used++] = 0x80;
available = 64 - used;
if (available < 8) {
memset(&ctx->buffer[used], 0, available);
body(ctx, ctx->buffer, 64);
used = 0;
available = 64;
}
memset(&ctx->buffer[used], 0, available - 8);
ctx->lo <<= 3;
ctx->buffer[56] = ctx->lo;
ctx->buffer[57] = ctx->lo >> 8;
ctx->buffer[58] = ctx->lo >> 16;
ctx->buffer[59] = ctx->lo >> 24;
ctx->buffer[60] = ctx->hi;
ctx->buffer[61] = ctx->hi >> 8;
ctx->buffer[62] = ctx->hi >> 16;
ctx->buffer[63] = ctx->hi >> 24;
body(ctx, ctx->buffer, 64);
result[0] = ctx->a;
result[1] = ctx->a >> 8;
result[2] = ctx->a >> 16;
result[3] = ctx->a >> 24;
result[4] = ctx->b;
result[5] = ctx->b >> 8;
result[6] = ctx->b >> 16;
result[7] = ctx->b >> 24;
result[8] = ctx->c;
result[9] = ctx->c >> 8;
result[10] = ctx->c >> 16;
result[11] = ctx->c >> 24;
result[12] = ctx->d;
result[13] = ctx->d >> 8;
result[14] = ctx->d >> 16;
result[15] = ctx->d >> 24;
memset(ctx, 0, sizeof(*ctx));
}
__global__
void count_md5(unsigned char * text, unsigned char* result, int text_length)
{
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, text, text_length-1);
MD5_Final(result, &ctx);
}
struct Word
{
char data[32];
int length;
};
struct MDHash
{
unsigned char data[16];
unsigned char matchedWord[32];
bool wordMatched = false;
};
__global__
void check_md5(MDHash * hashes, int hashesSize, Word * words, wordsSize)
{
int i; int j;
unsigned char hashResult[16];
Word checkedWord;
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, text, text_length-1);
MD5_Final(hashResult, &ctx);
for(i = 0; i< hashesSize;++i)
{
if(!hashes[i].wordMatched)
{
for(j=0; j<16;++j)
if(hashes[i].data[j]!=hashResult)
break;
if(j==16)
{
hashes[i].wordMatched=true;
for(j=0; j<checkedWord.length; ++j)
hashes[i].matchedWord[j]=checkedWord.data[j];
}
}
}
}
struct MD5Hasher
{
__host__ __device__
operator()(const Word & word)
{
MDHash hash;
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, word.data, word.length);
MD5_Final(hash.data, &ctx);
return hash;
}
};
#include <stdio.h>
#include <string.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
int main(int argc, char const *argv[])
{
unsigned char host_text[] = "text";
int text_mem_size = 5;
unsigned char host_result[16];
unsigned char* device_text;
unsigned char* device_result;
cudaMalloc((void **) &device_text, text_mem_size*sizeof(unsigned char)); // Allocate array on device
cudaMemcpy(device_text, host_text, text_mem_size*sizeof(unsigned char), cudaMemcpyHostToDevice); //Copy data to device
cudaMalloc((void **) &device_result, 16*sizeof(unsigned char)); // Allocate array on device
count_md5<<<1,1>>>(device_text, device_result, text_mem_size);
cudaMemcpy(host_result, device_result, 16*sizeof(unsigned char), cudaMemcpyDeviceToHost);//copy data back to CPU
for(int i=0; i<16*sizeof(unsigned char); ++i)
printf("%02x", host_result[i]);
printf("\n");
thrust::host_vector<Word> host_words(2);
char word1[] = "mom";
for(int i =0; word1[i]!='\0'; ++i)
host_words[0].data[i]=word1[i];
host_words[0].length=strlen(word1);
char word2[] = "help";
for(int i =0; word2[i]!='\0'; ++i)
host_words[1].data[i]=word2[i];
host_words[1].length=strlen(word2);
thrust::device_vector<Word> device_words = host_words;
thrust::device_vector<MDHash> hashesFound(host_words.size())
thrust::device_vector<MDHash> device_passwords(2);
thrust::transform(device_words.begin(), device_words.end(), device_passwords.begin(), MD5Hasher());
for(int n=0; n<2; ++n)
{
MDHash h = device_passwords[n];
for(int i=0; i<16*sizeof(unsigned char); ++i)
printf("%02x", h.data[i]);
printf("\n");
}
return 0;
}
|
33e04eb2f9b644988ddc39b437ec1ea8be31e854.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h> | 33e04eb2f9b644988ddc39b437ec1ea8be31e854.cu |
#include <cuda_runtime.h> |
51265b5e4dcd4cf172beca2dacf1c5afa3344168.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "IntegratorHPMCMonoGPU.cuh"
#include "hoomd/RandomNumbers.h"
namespace hpmc
{
namespace gpu
{
namespace kernel
{
//! Kernel to generate expanded cells
/*! \param d_excell_idx Output array to list the particle indices in the expanded cells
\param d_excell_size Output array to list the number of particles in each expanded cell
\param excli Indexer for the expanded cells
\param d_cell_idx Particle indices in the normal cells
\param d_cell_size Number of particles in each cell
\param d_cell_adj Cell adjacency list
\param ci Cell indexer
\param cli Cell list indexer
\param cadji Cell adjacency indexer
\param ngpu Number of active devices
gpu_hpmc_excell_kernel executes one thread per cell. It gathers the particle indices from all neighboring cells
into the output expanded cell.
*/
__global__ void hpmc_excell(unsigned int *d_excell_idx,
unsigned int *d_excell_size,
const Index2D excli,
const unsigned int *d_cell_idx,
const unsigned int *d_cell_size,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji,
const unsigned int ngpu)
{
// compute the output cell
unsigned int my_cell = 0;
my_cell = blockDim.x * blockIdx.x + threadIdx.x;
if (my_cell >= ci.getNumElements())
return;
unsigned int my_cell_size = 0;
// loop over neighboring cells and build up the expanded cell list
for (unsigned int offset = 0; offset < cadji.getW(); offset++)
{
unsigned int neigh_cell = d_cell_adj[cadji(offset, my_cell)];
// iterate over per-device cell lists
for (unsigned int igpu = 0; igpu < ngpu; ++igpu)
{
unsigned int neigh_cell_size = d_cell_size[neigh_cell+igpu*ci.getNumElements()];
for (unsigned int k = 0; k < neigh_cell_size; k++)
{
// read in the index of the new particle to add to our cell
unsigned int new_idx = d_cell_idx[cli(k, neigh_cell)+igpu*cli.getNumElements()];
d_excell_idx[excli(my_cell_size, my_cell)] = new_idx;
my_cell_size++;
}
}
}
// write out the final size
d_excell_size[my_cell] = my_cell_size;
}
//! Kernel for grid shift
/*! \param d_postype postype of each particle
\param d_image Image flags for each particle
\param N number of particles
\param box Simulation box
\param shift Vector by which to translate the particles
Shift all the particles by a given vector.
\ingroup hpmc_kernels
*/
__global__ void hpmc_shift(Scalar4 *d_postype,
int3 *d_image,
const unsigned int N,
const BoxDim box,
const Scalar3 shift)
{
// identify the active cell that this thread handles
unsigned int my_pidx = blockIdx.x * blockDim.x + threadIdx.x;
// this thread is inactive if it indexes past the end of the particle list
if (my_pidx >= N)
return;
// pull in the current position
Scalar4 postype = d_postype[my_pidx];
// shift the position
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
pos += shift;
// wrap the particle back into the box
int3 image = d_image[my_pidx];
box.wrap(pos, image);
// write out the new position and orientation
d_postype[my_pidx] = make_scalar4(pos.x, pos.y, pos.z, postype.w);
d_image[my_pidx] = image;
}
//!< Kernel to accept/reject
__global__ void hpmc_accept(const unsigned int *d_update_order_by_ptl,
const unsigned int *d_trial_move_type,
const unsigned int *d_reject_out_of_cell,
unsigned int *d_reject,
unsigned int *d_reject_out,
const unsigned int *d_nneigh,
const unsigned int *d_nlist,
const unsigned int N_old,
const unsigned int N,
const unsigned int nwork,
const unsigned work_offset,
const unsigned int maxn,
bool patch,
const unsigned int *d_nlist_patch_old,
const unsigned int *d_nlist_patch_new,
const unsigned int *d_nneigh_patch_old,
const unsigned int *d_nneigh_patch_new,
const float *d_energy_old,
const float *d_energy_new,
const unsigned int maxn_patch,
unsigned int *d_condition,
const unsigned int seed,
const unsigned int select,
const unsigned int timestep)
{
unsigned offset = threadIdx.x;
unsigned int group_size = blockDim.x;
unsigned int group = threadIdx.y;
unsigned int n_groups = blockDim.y;
bool master = offset == 0;
// the particle we are handling
unsigned int i = blockIdx.x*n_groups + group;
bool active = true;
if (i >= nwork)
active = false;
i += work_offset;
extern __shared__ char sdata[];
float *s_energy_old = (float *) sdata;
float *s_energy_new = (float *) (s_energy_old + n_groups);
unsigned int *s_reject = (unsigned int *) (s_energy_new + n_groups);
bool move_active = false;
if (active && master)
{
s_reject[group] = d_reject_out_of_cell[i];
s_energy_old[group] = 0.0f;
s_energy_new[group] = 0.0f;
}
if (active)
{
move_active = d_trial_move_type[i] > 0;
}
__syncthreads();
if (active && move_active)
{
unsigned int update_order_i = d_update_order_by_ptl[i];
// iterate over overlapping neighbors in old configuration
unsigned int nneigh = d_nneigh[i];
bool accept = true;
for (unsigned int cur_neigh = offset; cur_neigh < nneigh; cur_neigh += group_size)
{
unsigned int primitive = d_nlist[cur_neigh+maxn*i];
unsigned int j = primitive;
bool old = true;
if (j >= N_old)
{
j -= N_old;
old = false;
}
// has j been updated? ghost particles are not updated
bool j_has_been_updated = j < N && d_trial_move_type[j]
&& d_update_order_by_ptl[j] < update_order_i && !d_reject[j];
// acceptance, reject if current configuration of particle overlaps
if ((old && !j_has_been_updated) || (!old && j_has_been_updated))
{
accept = false;
break;
}
} // end loop over neighbors
if (!accept)
{
atomicMax(&s_reject[group], 1);
}
if (patch)
{
// iterate over overlapping neighbors in old configuration
float energy_old = 0.0f;
unsigned int nneigh = d_nneigh_patch_old[i];
bool evaluated = false;
for (unsigned int cur_neigh = offset; cur_neigh < nneigh; cur_neigh += group_size)
{
unsigned int primitive = d_nlist_patch_old[cur_neigh+maxn_patch*i];
unsigned int j = primitive;
bool old = true;
if (j >= N_old)
{
j -= N_old;
old = false;
}
// has j been updated? ghost particles are not updated
bool j_has_been_updated = j < N && d_trial_move_type[j]
&& d_update_order_by_ptl[j] < update_order_i && !d_reject[j];
if ((old && !j_has_been_updated) || (!old && j_has_been_updated))
{
energy_old += d_energy_old[cur_neigh+maxn_patch*i];
evaluated = true;
}
} // end loop over neighbors
if (evaluated)
atomicAdd(&s_energy_old[group], energy_old);
// iterate over overlapping neighbors in new configuration
float energy_new = 0.0f;
nneigh = d_nneigh_patch_new[i];
evaluated = false;
for (unsigned int cur_neigh = offset; cur_neigh < nneigh; cur_neigh += group_size)
{
unsigned int primitive = d_nlist_patch_new[cur_neigh+maxn_patch*i];
unsigned int j = primitive;
bool old = true;
if (j >= N_old)
{
j -= N_old;
old = false;
}
// has j been updated? ghost particles are not updated
bool j_has_been_updated = j < N && d_trial_move_type[j]
&& d_update_order_by_ptl[j] < update_order_i && !d_reject[j];
if ((old && !j_has_been_updated) || (!old && j_has_been_updated))
{
energy_new += d_energy_new[cur_neigh+maxn_patch*i];
evaluated = true;
}
} // end loop over neighbors
if (evaluated)
atomicAdd(&s_energy_new[group], energy_new);
}
} // end if (active && move_active)
__syncthreads();
if (master && active && move_active)
{
float delta_U = s_energy_new[group] - s_energy_old[group];
// Metropolis-Hastings
hoomd::RandomGenerator rng_i(hoomd::RNGIdentifier::HPMCMonoAccept, seed, i, select, timestep);
bool accept = !s_reject[group] && (!patch || (hoomd::detail::generate_canonical<double>(rng_i) < slow::exp(-delta_U)));
if ((accept && d_reject[i]) || (!accept && !d_reject[i]))
{
// flag that we're not done yet (a trivial race condition upon write)
*d_condition = 1;
}
// write out to device memory
d_reject_out[i] = accept ? 0 : 1;
}
}
} // end namespace kernel
//! Driver for kernel::hpmc_excell()
void hpmc_excell(unsigned int *d_excell_idx,
unsigned int *d_excell_size,
const Index2D& excli,
const unsigned int *d_cell_idx,
const unsigned int *d_cell_size,
const unsigned int *d_cell_adj,
const Index3D& ci,
const Index2D& cli,
const Index2D& cadji,
const unsigned int ngpu,
const unsigned int block_size)
{
assert(d_excell_idx);
assert(d_excell_size);
assert(d_cell_idx);
assert(d_cell_size);
assert(d_cell_adj);
// determine the maximum block size and clamp the input block size down
static int max_block_size = -1;
if (max_block_size == -1)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, reinterpret_cast<const void*>(kernel::hpmc_excell));
max_block_size = attr.maxThreadsPerBlock;
}
// setup the grid to run the kernel
unsigned int run_block_size = min(block_size, (unsigned int)max_block_size);
dim3 threads(run_block_size, 1, 1);
dim3 grid(ci.getNumElements() / run_block_size + 1, 1, 1);
hipLaunchKernelGGL(kernel::hpmc_excell, dim3(grid), dim3(threads), 0, 0, d_excell_idx,
d_excell_size,
excli,
d_cell_idx,
d_cell_size,
d_cell_adj,
ci,
cli,
cadji,
ngpu);
}
//! Kernel driver for kernel::hpmc_shift()
void hpmc_shift(Scalar4 *d_postype,
int3 *d_image,
const unsigned int N,
const BoxDim& box,
const Scalar3 shift,
const unsigned int block_size)
{
assert(d_postype);
assert(d_image);
// setup the grid to run the kernel
dim3 threads_shift(block_size, 1, 1);
dim3 grid_shift(N / block_size + 1, 1, 1);
hipLaunchKernelGGL(kernel::hpmc_shift, dim3(grid_shift), dim3(threads_shift), 0, 0, d_postype,
d_image,
N,
box,
shift);
// after this kernel we return control of cuda managed memory to the host
hipDeviceSynchronize();
}
void hpmc_accept(const unsigned int *d_update_order_by_ptl,
const unsigned int *d_trial_move_type,
const unsigned int *d_reject_out_of_cell,
unsigned int *d_reject,
unsigned int *d_reject_out,
const unsigned int *d_nneigh,
const unsigned int *d_nlist,
const unsigned int N_old,
const unsigned int N,
const GPUPartition& gpu_partition,
const unsigned int maxn,
bool patch,
const unsigned int *d_nlist_patch_old,
const unsigned int *d_nlist_patch_new,
const unsigned int *d_nneigh_patch_old,
const unsigned int *d_nneigh_patch_new,
const float *d_energy_old,
const float *d_energy_new,
const unsigned int maxn_patch,
unsigned int *d_condition,
const unsigned int seed,
const unsigned int select,
const unsigned int timestep,
const unsigned int block_size,
const unsigned int tpp)
{
// determine the maximum block size and clamp the input block size down
static int max_block_size = -1;
if (max_block_size == -1)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, reinterpret_cast<const void*>(kernel::hpmc_accept));
max_block_size = attr.maxThreadsPerBlock;
}
// setup the grid to run the kernel
unsigned int run_block_size = min(block_size, (unsigned int)max_block_size);
// threads per particle
unsigned int cur_tpp = min(run_block_size,tpp);
while (run_block_size % cur_tpp != 0)
cur_tpp--;
unsigned int n_groups = run_block_size/cur_tpp;
dim3 threads(cur_tpp, n_groups, 1);
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
const unsigned int num_blocks = (nwork + n_groups - 1)/n_groups;
dim3 grid(num_blocks, 1, 1);
unsigned int shared_bytes = (unsigned int)(n_groups * (2*sizeof(float) + sizeof(unsigned int)));
hipLaunchKernelGGL(kernel::hpmc_accept, grid, threads, shared_bytes, 0,
d_update_order_by_ptl,
d_trial_move_type,
d_reject_out_of_cell,
d_reject,
d_reject_out,
d_nneigh,
d_nlist,
N_old,
N,
nwork,
range.first,
maxn,
patch,
d_nlist_patch_old,
d_nlist_patch_new,
d_nneigh_patch_old,
d_nneigh_patch_new,
d_energy_old,
d_energy_new,
maxn_patch,
d_condition,
seed,
select,
timestep);
}
}
} // end namespace gpu
} // end namespace hpmc
| 51265b5e4dcd4cf172beca2dacf1c5afa3344168.cu | // Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "IntegratorHPMCMonoGPU.cuh"
#include "hoomd/RandomNumbers.h"
namespace hpmc
{
namespace gpu
{
namespace kernel
{
//! Kernel to generate expanded cells
/*! \param d_excell_idx Output array to list the particle indices in the expanded cells
\param d_excell_size Output array to list the number of particles in each expanded cell
\param excli Indexer for the expanded cells
\param d_cell_idx Particle indices in the normal cells
\param d_cell_size Number of particles in each cell
\param d_cell_adj Cell adjacency list
\param ci Cell indexer
\param cli Cell list indexer
\param cadji Cell adjacency indexer
\param ngpu Number of active devices
gpu_hpmc_excell_kernel executes one thread per cell. It gathers the particle indices from all neighboring cells
into the output expanded cell.
*/
__global__ void hpmc_excell(unsigned int *d_excell_idx,
unsigned int *d_excell_size,
const Index2D excli,
const unsigned int *d_cell_idx,
const unsigned int *d_cell_size,
const unsigned int *d_cell_adj,
const Index3D ci,
const Index2D cli,
const Index2D cadji,
const unsigned int ngpu)
{
// compute the output cell
unsigned int my_cell = 0;
my_cell = blockDim.x * blockIdx.x + threadIdx.x;
if (my_cell >= ci.getNumElements())
return;
unsigned int my_cell_size = 0;
// loop over neighboring cells and build up the expanded cell list
for (unsigned int offset = 0; offset < cadji.getW(); offset++)
{
unsigned int neigh_cell = d_cell_adj[cadji(offset, my_cell)];
// iterate over per-device cell lists
for (unsigned int igpu = 0; igpu < ngpu; ++igpu)
{
unsigned int neigh_cell_size = d_cell_size[neigh_cell+igpu*ci.getNumElements()];
for (unsigned int k = 0; k < neigh_cell_size; k++)
{
// read in the index of the new particle to add to our cell
unsigned int new_idx = d_cell_idx[cli(k, neigh_cell)+igpu*cli.getNumElements()];
d_excell_idx[excli(my_cell_size, my_cell)] = new_idx;
my_cell_size++;
}
}
}
// write out the final size
d_excell_size[my_cell] = my_cell_size;
}
//! Kernel for grid shift
/*! \param d_postype postype of each particle
\param d_image Image flags for each particle
\param N number of particles
\param box Simulation box
\param shift Vector by which to translate the particles
Shift all the particles by a given vector.
\ingroup hpmc_kernels
*/
__global__ void hpmc_shift(Scalar4 *d_postype,
int3 *d_image,
const unsigned int N,
const BoxDim box,
const Scalar3 shift)
{
// identify the active cell that this thread handles
unsigned int my_pidx = blockIdx.x * blockDim.x + threadIdx.x;
// this thread is inactive if it indexes past the end of the particle list
if (my_pidx >= N)
return;
// pull in the current position
Scalar4 postype = d_postype[my_pidx];
// shift the position
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
pos += shift;
// wrap the particle back into the box
int3 image = d_image[my_pidx];
box.wrap(pos, image);
// write out the new position and orientation
d_postype[my_pidx] = make_scalar4(pos.x, pos.y, pos.z, postype.w);
d_image[my_pidx] = image;
}
//!< Kernel to accept/reject
__global__ void hpmc_accept(const unsigned int *d_update_order_by_ptl,
const unsigned int *d_trial_move_type,
const unsigned int *d_reject_out_of_cell,
unsigned int *d_reject,
unsigned int *d_reject_out,
const unsigned int *d_nneigh,
const unsigned int *d_nlist,
const unsigned int N_old,
const unsigned int N,
const unsigned int nwork,
const unsigned work_offset,
const unsigned int maxn,
bool patch,
const unsigned int *d_nlist_patch_old,
const unsigned int *d_nlist_patch_new,
const unsigned int *d_nneigh_patch_old,
const unsigned int *d_nneigh_patch_new,
const float *d_energy_old,
const float *d_energy_new,
const unsigned int maxn_patch,
unsigned int *d_condition,
const unsigned int seed,
const unsigned int select,
const unsigned int timestep)
{
unsigned offset = threadIdx.x;
unsigned int group_size = blockDim.x;
unsigned int group = threadIdx.y;
unsigned int n_groups = blockDim.y;
bool master = offset == 0;
// the particle we are handling
unsigned int i = blockIdx.x*n_groups + group;
bool active = true;
if (i >= nwork)
active = false;
i += work_offset;
extern __shared__ char sdata[];
float *s_energy_old = (float *) sdata;
float *s_energy_new = (float *) (s_energy_old + n_groups);
unsigned int *s_reject = (unsigned int *) (s_energy_new + n_groups);
bool move_active = false;
if (active && master)
{
s_reject[group] = d_reject_out_of_cell[i];
s_energy_old[group] = 0.0f;
s_energy_new[group] = 0.0f;
}
if (active)
{
move_active = d_trial_move_type[i] > 0;
}
__syncthreads();
if (active && move_active)
{
unsigned int update_order_i = d_update_order_by_ptl[i];
// iterate over overlapping neighbors in old configuration
unsigned int nneigh = d_nneigh[i];
bool accept = true;
for (unsigned int cur_neigh = offset; cur_neigh < nneigh; cur_neigh += group_size)
{
unsigned int primitive = d_nlist[cur_neigh+maxn*i];
unsigned int j = primitive;
bool old = true;
if (j >= N_old)
{
j -= N_old;
old = false;
}
// has j been updated? ghost particles are not updated
bool j_has_been_updated = j < N && d_trial_move_type[j]
&& d_update_order_by_ptl[j] < update_order_i && !d_reject[j];
// acceptance, reject if current configuration of particle overlaps
if ((old && !j_has_been_updated) || (!old && j_has_been_updated))
{
accept = false;
break;
}
} // end loop over neighbors
if (!accept)
{
atomicMax(&s_reject[group], 1);
}
if (patch)
{
// iterate over overlapping neighbors in old configuration
float energy_old = 0.0f;
unsigned int nneigh = d_nneigh_patch_old[i];
bool evaluated = false;
for (unsigned int cur_neigh = offset; cur_neigh < nneigh; cur_neigh += group_size)
{
unsigned int primitive = d_nlist_patch_old[cur_neigh+maxn_patch*i];
unsigned int j = primitive;
bool old = true;
if (j >= N_old)
{
j -= N_old;
old = false;
}
// has j been updated? ghost particles are not updated
bool j_has_been_updated = j < N && d_trial_move_type[j]
&& d_update_order_by_ptl[j] < update_order_i && !d_reject[j];
if ((old && !j_has_been_updated) || (!old && j_has_been_updated))
{
energy_old += d_energy_old[cur_neigh+maxn_patch*i];
evaluated = true;
}
} // end loop over neighbors
if (evaluated)
atomicAdd(&s_energy_old[group], energy_old);
// iterate over overlapping neighbors in new configuration
float energy_new = 0.0f;
nneigh = d_nneigh_patch_new[i];
evaluated = false;
for (unsigned int cur_neigh = offset; cur_neigh < nneigh; cur_neigh += group_size)
{
unsigned int primitive = d_nlist_patch_new[cur_neigh+maxn_patch*i];
unsigned int j = primitive;
bool old = true;
if (j >= N_old)
{
j -= N_old;
old = false;
}
// has j been updated? ghost particles are not updated
bool j_has_been_updated = j < N && d_trial_move_type[j]
&& d_update_order_by_ptl[j] < update_order_i && !d_reject[j];
if ((old && !j_has_been_updated) || (!old && j_has_been_updated))
{
energy_new += d_energy_new[cur_neigh+maxn_patch*i];
evaluated = true;
}
} // end loop over neighbors
if (evaluated)
atomicAdd(&s_energy_new[group], energy_new);
}
} // end if (active && move_active)
__syncthreads();
if (master && active && move_active)
{
float delta_U = s_energy_new[group] - s_energy_old[group];
// Metropolis-Hastings
hoomd::RandomGenerator rng_i(hoomd::RNGIdentifier::HPMCMonoAccept, seed, i, select, timestep);
bool accept = !s_reject[group] && (!patch || (hoomd::detail::generate_canonical<double>(rng_i) < slow::exp(-delta_U)));
if ((accept && d_reject[i]) || (!accept && !d_reject[i]))
{
// flag that we're not done yet (a trivial race condition upon write)
*d_condition = 1;
}
// write out to device memory
d_reject_out[i] = accept ? 0 : 1;
}
}
} // end namespace kernel
//! Driver for kernel::hpmc_excell()
void hpmc_excell(unsigned int *d_excell_idx,
unsigned int *d_excell_size,
const Index2D& excli,
const unsigned int *d_cell_idx,
const unsigned int *d_cell_size,
const unsigned int *d_cell_adj,
const Index3D& ci,
const Index2D& cli,
const Index2D& cadji,
const unsigned int ngpu,
const unsigned int block_size)
{
assert(d_excell_idx);
assert(d_excell_size);
assert(d_cell_idx);
assert(d_cell_size);
assert(d_cell_adj);
// determine the maximum block size and clamp the input block size down
static int max_block_size = -1;
if (max_block_size == -1)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, reinterpret_cast<const void*>(kernel::hpmc_excell));
max_block_size = attr.maxThreadsPerBlock;
}
// setup the grid to run the kernel
unsigned int run_block_size = min(block_size, (unsigned int)max_block_size);
dim3 threads(run_block_size, 1, 1);
dim3 grid(ci.getNumElements() / run_block_size + 1, 1, 1);
hipLaunchKernelGGL(kernel::hpmc_excell, dim3(grid), dim3(threads), 0, 0, d_excell_idx,
d_excell_size,
excli,
d_cell_idx,
d_cell_size,
d_cell_adj,
ci,
cli,
cadji,
ngpu);
}
//! Kernel driver for kernel::hpmc_shift()
void hpmc_shift(Scalar4 *d_postype,
int3 *d_image,
const unsigned int N,
const BoxDim& box,
const Scalar3 shift,
const unsigned int block_size)
{
assert(d_postype);
assert(d_image);
// setup the grid to run the kernel
dim3 threads_shift(block_size, 1, 1);
dim3 grid_shift(N / block_size + 1, 1, 1);
hipLaunchKernelGGL(kernel::hpmc_shift, dim3(grid_shift), dim3(threads_shift), 0, 0, d_postype,
d_image,
N,
box,
shift);
// after this kernel we return control of cuda managed memory to the host
hipDeviceSynchronize();
}
void hpmc_accept(const unsigned int *d_update_order_by_ptl,
const unsigned int *d_trial_move_type,
const unsigned int *d_reject_out_of_cell,
unsigned int *d_reject,
unsigned int *d_reject_out,
const unsigned int *d_nneigh,
const unsigned int *d_nlist,
const unsigned int N_old,
const unsigned int N,
const GPUPartition& gpu_partition,
const unsigned int maxn,
bool patch,
const unsigned int *d_nlist_patch_old,
const unsigned int *d_nlist_patch_new,
const unsigned int *d_nneigh_patch_old,
const unsigned int *d_nneigh_patch_new,
const float *d_energy_old,
const float *d_energy_new,
const unsigned int maxn_patch,
unsigned int *d_condition,
const unsigned int seed,
const unsigned int select,
const unsigned int timestep,
const unsigned int block_size,
const unsigned int tpp)
{
// determine the maximum block size and clamp the input block size down
static int max_block_size = -1;
if (max_block_size == -1)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, reinterpret_cast<const void*>(kernel::hpmc_accept));
max_block_size = attr.maxThreadsPerBlock;
}
// setup the grid to run the kernel
unsigned int run_block_size = min(block_size, (unsigned int)max_block_size);
// threads per particle
unsigned int cur_tpp = min(run_block_size,tpp);
while (run_block_size % cur_tpp != 0)
cur_tpp--;
unsigned int n_groups = run_block_size/cur_tpp;
dim3 threads(cur_tpp, n_groups, 1);
for (int idev = gpu_partition.getNumActiveGPUs() - 1; idev >= 0; --idev)
{
auto range = gpu_partition.getRangeAndSetGPU(idev);
unsigned int nwork = range.second - range.first;
const unsigned int num_blocks = (nwork + n_groups - 1)/n_groups;
dim3 grid(num_blocks, 1, 1);
unsigned int shared_bytes = (unsigned int)(n_groups * (2*sizeof(float) + sizeof(unsigned int)));
hipLaunchKernelGGL(kernel::hpmc_accept, grid, threads, shared_bytes, 0,
d_update_order_by_ptl,
d_trial_move_type,
d_reject_out_of_cell,
d_reject,
d_reject_out,
d_nneigh,
d_nlist,
N_old,
N,
nwork,
range.first,
maxn,
patch,
d_nlist_patch_old,
d_nlist_patch_new,
d_nneigh_patch_old,
d_nneigh_patch_new,
d_energy_old,
d_energy_new,
maxn_patch,
d_condition,
seed,
select,
timestep);
}
}
} // end namespace gpu
} // end namespace hpmc
|
bc78380ecbb2f56b20e59468ed6a69200f72ae13.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <time.h>
#include <math.h>
// Assertion to check for errors
#define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define NUM_THREADS_PER_BLOCK 256
#define PRINT_TIME 1
#define SM_ARR_LEN 1024
#define NUM_BLOCKS (SM_ARR_LEN*SM_ARR_LEN + NUM_THREADS_PER_BLOCK-1)/NUM_THREADS_PER_BLOCK
#define TOL 1//0.00001
#define OMEGA 1.60
#define IMUL(a, b) __mul24(a, b)
void initializeArray1D(float *arr, int len, int seed);
__global__ void kernel_mmm (float* A, float* B, float* C, int N) {
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
float tmpSum = 0;
if(row < N && col<N){
for(int i = 0; i<N; i++){
tmpSum += A[row * N + i] * B[i * N + col];
}
}
C[row * N + col] = tmpSum;
}
int main(int argc, char **argv){
int arrLen = 0;
// GPU Timing variables
hipEvent_t start, stop;
float elapsed_gpu;
// Arrays on GPU global memoryc
float *d_x;
float *d_y;
float *d_result;
// Arrays on the host memory
float *h_x;
float *h_y;
float *h_result;
float *h_result_gold;
int i, errCount = 0, zeroCount = 0;
int j;
if (argc > 1) {
arrLen = atoi(argv[1]);
}
else {
arrLen = SM_ARR_LEN;
}
printf("Length of the array = %d\n", arrLen);
// Select GPU
CUDA_SAFE_CALL(hipSetDevice(0));
// Allocate GPU memory
size_t allocSize = arrLen*arrLen * sizeof(float);
CUDA_SAFE_CALL(hipMalloc((void **)&d_x, allocSize));
CUDA_SAFE_CALL(hipMalloc((void **)&d_y, allocSize));
CUDA_SAFE_CALL(hipMalloc((void **)&d_result, allocSize));
// Allocate arrays on host memory
h_x = (float *) malloc(allocSize);
h_y = (float *) malloc(allocSize);
h_result = (float *) malloc(allocSize);
h_result_gold = (float *) malloc(allocSize);
// Initialize the host arrays
printf("\nInitializing the arrays ...");
// Arrays are initialized with a known seed for reproducability
initializeArray1D(h_x, arrLen, 2453);
initializeArray1D(h_y, arrLen, 1467);
//initializeArray1D(h_result, arrLen, 2453);
//initializeArray1D(h_result_gold, arrLen, 1467);
printf("\t... done\n\n");
#if PRINT_TIME
// Create the cuda events
hipEventCreate(&start);
hipEventCreate(&stop);
// Record event on the default stream
hipEventRecord(start, 0);
#endif
// Transfer the arrays to the GPU memory
CUDA_SAFE_CALL(hipMemcpy(d_result, h_result, allocSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_y, h_y, allocSize, hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_x, h_x, allocSize, hipMemcpyHostToDevice));
// Launch the kernel
dim3 dimBlock(16,16);
hipLaunchKernelGGL(( kernel_mmm), dim3(NUM_BLOCKS), dim3(dimBlock), 0, 0, d_x, d_y, d_result, arrLen);
// Check for errors during launch
CUDA_SAFE_CALL(hipPeekAtLastError());
// Transfer the results back to the host
CUDA_SAFE_CALL(hipMemcpy(h_result, d_result, allocSize, hipMemcpyDeviceToHost));
#if PRINT_TIME
// Stop and destroy the timer
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_gpu, start, stop);
printf("\nGPU time: %f (msec)\n", elapsed_gpu);
hipEventDestroy(start);
hipEventDestroy(stop);
#endif
clock_t begin = clock();
// Compute the results on the host
int l, m, n;
int length = arrLen;
float sum;
for (l = 0; l < length; l++) {
for (m = 0; m < length; m++) {
sum = 0;
for (n = 0; n < length; n++){
sum += h_x[l*length+n] * h_y[n*length+m];
}
h_result_gold[l*length+m] += sum;
}
}
clock_t end = clock();
// Compare the results
/*
for(i = 0; i < arrLen*arrLen; i++) {
if (abs(h_result_gold[i] - h_result[i]) > TOL) {
errCount++;
}
if (h_result[i] == 0) {
zeroCount++;
}
}
*/
for(i = 0; i < 50; i++) {
printf("%d:\t%.8f\t%.8f\n", i, h_result_gold[i], h_result[i]);
}
double time_spent;
time_spent += (double)(end - begin) / CLOCKS_PER_SEC;
printf("Time elpased is %f seconds", time_spent);
/*if (errCount > 0) {
printf("\n@ERROR: TEST FAILED: %d results did not matched\n", errCount);
}
else if (zeroCount > 0){
printf("\n@ERROR: TEST FAILED: %d results (from GPU) are zero\n", zeroCount);
}
else {
printf("\nTEST PASSED: All results matched\n");
}*/
// Free-up device and host memory
CUDA_SAFE_CALL(hipFree(d_x));
CUDA_SAFE_CALL(hipFree(d_y));
CUDA_SAFE_CALL(hipFree(d_result));
free(h_x);
free(h_y);
free(h_result);
return 0;
}
void initializeArray1D(float *arr, int len, int seed) {
int i;
int j;
float randNum;
srand(seed);
for (i = 0; i < len; i++) {
for(j = 0; j<len; j++){
randNum = (float) rand();
randNum = randNum/RAND_MAX;
arr[i*len + j] = randNum;
}
}
}
| bc78380ecbb2f56b20e59468ed6a69200f72ae13.cu | #include <cstdio>
#include <cstdlib>
#include <time.h>
#include <math.h>
// Assertion to check for errors
#define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define NUM_THREADS_PER_BLOCK 256
#define PRINT_TIME 1
#define SM_ARR_LEN 1024
#define NUM_BLOCKS (SM_ARR_LEN*SM_ARR_LEN + NUM_THREADS_PER_BLOCK-1)/NUM_THREADS_PER_BLOCK
#define TOL 1//0.00001
#define OMEGA 1.60
#define IMUL(a, b) __mul24(a, b)
void initializeArray1D(float *arr, int len, int seed);
__global__ void kernel_mmm (float* A, float* B, float* C, int N) {
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
float tmpSum = 0;
if(row < N && col<N){
for(int i = 0; i<N; i++){
tmpSum += A[row * N + i] * B[i * N + col];
}
}
C[row * N + col] = tmpSum;
}
int main(int argc, char **argv){
int arrLen = 0;
// GPU Timing variables
cudaEvent_t start, stop;
float elapsed_gpu;
// Arrays on GPU global memoryc
float *d_x;
float *d_y;
float *d_result;
// Arrays on the host memory
float *h_x;
float *h_y;
float *h_result;
float *h_result_gold;
int i, errCount = 0, zeroCount = 0;
int j;
if (argc > 1) {
arrLen = atoi(argv[1]);
}
else {
arrLen = SM_ARR_LEN;
}
printf("Length of the array = %d\n", arrLen);
// Select GPU
CUDA_SAFE_CALL(cudaSetDevice(0));
// Allocate GPU memory
size_t allocSize = arrLen*arrLen * sizeof(float);
CUDA_SAFE_CALL(cudaMalloc((void **)&d_x, allocSize));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_y, allocSize));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_result, allocSize));
// Allocate arrays on host memory
h_x = (float *) malloc(allocSize);
h_y = (float *) malloc(allocSize);
h_result = (float *) malloc(allocSize);
h_result_gold = (float *) malloc(allocSize);
// Initialize the host arrays
printf("\nInitializing the arrays ...");
// Arrays are initialized with a known seed for reproducability
initializeArray1D(h_x, arrLen, 2453);
initializeArray1D(h_y, arrLen, 1467);
//initializeArray1D(h_result, arrLen, 2453);
//initializeArray1D(h_result_gold, arrLen, 1467);
printf("\t... done\n\n");
#if PRINT_TIME
// Create the cuda events
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Record event on the default stream
cudaEventRecord(start, 0);
#endif
// Transfer the arrays to the GPU memory
CUDA_SAFE_CALL(cudaMemcpy(d_result, h_result, allocSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_y, h_y, allocSize, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_x, h_x, allocSize, cudaMemcpyHostToDevice));
// Launch the kernel
dim3 dimBlock(16,16);
kernel_mmm<<<NUM_BLOCKS, dimBlock>>>(d_x, d_y, d_result, arrLen);
// Check for errors during launch
CUDA_SAFE_CALL(cudaPeekAtLastError());
// Transfer the results back to the host
CUDA_SAFE_CALL(cudaMemcpy(h_result, d_result, allocSize, cudaMemcpyDeviceToHost));
#if PRINT_TIME
// Stop and destroy the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_gpu, start, stop);
printf("\nGPU time: %f (msec)\n", elapsed_gpu);
cudaEventDestroy(start);
cudaEventDestroy(stop);
#endif
clock_t begin = clock();
// Compute the results on the host
int l, m, n;
int length = arrLen;
float sum;
for (l = 0; l < length; l++) {
for (m = 0; m < length; m++) {
sum = 0;
for (n = 0; n < length; n++){
sum += h_x[l*length+n] * h_y[n*length+m];
}
h_result_gold[l*length+m] += sum;
}
}
clock_t end = clock();
// Compare the results
/*
for(i = 0; i < arrLen*arrLen; i++) {
if (abs(h_result_gold[i] - h_result[i]) > TOL) {
errCount++;
}
if (h_result[i] == 0) {
zeroCount++;
}
}
*/
for(i = 0; i < 50; i++) {
printf("%d:\t%.8f\t%.8f\n", i, h_result_gold[i], h_result[i]);
}
double time_spent;
time_spent += (double)(end - begin) / CLOCKS_PER_SEC;
printf("Time elpased is %f seconds", time_spent);
/*if (errCount > 0) {
printf("\n@ERROR: TEST FAILED: %d results did not matched\n", errCount);
}
else if (zeroCount > 0){
printf("\n@ERROR: TEST FAILED: %d results (from GPU) are zero\n", zeroCount);
}
else {
printf("\nTEST PASSED: All results matched\n");
}*/
// Free-up device and host memory
CUDA_SAFE_CALL(cudaFree(d_x));
CUDA_SAFE_CALL(cudaFree(d_y));
CUDA_SAFE_CALL(cudaFree(d_result));
free(h_x);
free(h_y);
free(h_result);
return 0;
}
void initializeArray1D(float *arr, int len, int seed) {
int i;
int j;
float randNum;
srand(seed);
for (i = 0; i < len; i++) {
for(j = 0; j<len; j++){
randNum = (float) rand();
randNum = randNum/RAND_MAX;
arr[i*len + j] = randNum;
}
}
}
|
bb84fcc4afd8c1573130a238154c1727ae0bdabf.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample demonstrates how to use texture fetches from layered 2D textures in CUDA C
*
* This sample first generates a 3D input data array for the layered texture
* and the expected output. Then it starts CUDA C kernels, one for each layer,
* which fetch their layer's texture data (using normalized texture coordinates)
* transform it to the expected output, and write it to a 3D output data array.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, kernels
#include <hip/hip_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper for shared that are common to CUDA Samples
static char *sSDKname = "simpleLayeredTexture";
// includes, kernels
// declare texture reference for layered 2D float texture
// Note: The "dim" field in the texture reference template is now deprecated.
// Instead, please use a texture type macro such as hipTextureType1D, etc.
texture<float, hipTextureType2DLayered> tex;
////////////////////////////////////////////////////////////////////////////////
//! Transform a layer of a layered 2D texture using texture lookups
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
__global__ void
transformKernel(float *g_odata, int width, int height, int layer)
{
// calculate this thread's data point
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// 0.5f offset and division are necessary to access the original data points
// in the texture (such that bilinear interpolation will not be activated).
// For details, see also CUDA Programming Guide, Appendix D
float u = (x+0.5f) / (float) width;
float v = (y+0.5f) / (float) height;
// read from texture, do expected transformation and write to global memory
g_odata[layer*width*height + y*width + x] = -tex2DLayered(tex, u, v, layer) + layer;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char **argv)
{
printf("[%s] - Starting...\n", sSDKname);
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
int devID = findCudaDevice(argc, (const char **)argv);
bool bResult = true;
// get number of SMs on this GPU
hipDeviceProp_t deviceProps;
checkCudaErrors(hipGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s] has %d Multi-Processors ", deviceProps.name, deviceProps.multiProcessorCount);
printf("SM %d.%d\n", deviceProps.major, deviceProps.minor);
if (deviceProps.major < 2)
{
printf("%s requires SM >= 2.0 to support Texture Arrays. Test will be waived... \n", sSDKname);
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
exit(EXIT_WAIVED);
}
// generate input data for layered texture
unsigned int width=512, height=512, num_layers = 5;
unsigned int size = width * height * num_layers * sizeof(float);
float *h_data = (float *) malloc(size);
for (unsigned int layer = 0; layer < num_layers; layer++)
for (int i = 0; i < (int)(width * height); i++)
{
h_data[layer*width*height + i] = (float)i;
}
// this is the expected transformation of the input data (the expected output)
float *h_data_ref = (float *) malloc(size);
for (unsigned int layer = 0; layer < num_layers; layer++)
for (int i = 0; i < (int)(width * height); i++)
{
h_data_ref[layer*width*height + i] = -h_data[layer*width*height + i] + layer;
}
// allocate device memory for result
float *d_data = NULL;
checkCudaErrors(hipMalloc((void **) &d_data, size));
// allocate array and copy image data
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipArray *cu_3darray;
checkCudaErrors(hipMalloc3DArray(&cu_3darray, &channelDesc, make_hipExtent(width, height, num_layers), hipArrayLayered));
hipMemcpy3DParms myparms = {0};
myparms.srcPos = make_hipPos(0,0,0);
myparms.dstPos = make_hipPos(0,0,0);
myparms.srcPtr = make_hipPitchedPtr(h_data, width * sizeof(float), width, height);
myparms.dstArray = cu_3darray;
myparms.extent = make_hipExtent(width, height, num_layers);
myparms.kind = hipMemcpyHostToDevice;
checkCudaErrors(hipMemcpy3D(&myparms));
// set texture parameters
tex.addressMode[0] = hipAddressModeWrap;
tex.addressMode[1] = hipAddressModeWrap;
tex.filterMode = hipFilterModeLinear;
tex.normalized = true; // access with normalized texture coordinates
// Bind the array to the texture
checkCudaErrors(hipBindTextureToArray(tex, cu_3darray, channelDesc));
dim3 dimBlock(8, 8, 1);
dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1);
printf("Covering 2D data array of %d x %d: Grid size is %d x %d, each block has 8 x 8 threads\n",
width, height, dimGrid.x, dimGrid.y);
hipLaunchKernelGGL(( transformKernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_data, width, height, 0); // warmup (for better timing)
// check if kernel execution generated an error
getLastCudaError("warmup Kernel execution failed");
checkCudaErrors(hipDeviceSynchronize());
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
// execute the kernel
for (unsigned int layer = 0; layer < num_layers; layer++)
hipLaunchKernelGGL(( transformKernel), dim3(dimGrid), dim3(dimBlock), 0 , 0, d_data, width, height, layer);
// check if kernel execution generated an error
getLastCudaError("Kernel execution failed");
checkCudaErrors(hipDeviceSynchronize());
sdkStopTimer(&timer);
printf("Processing time: %.3f msec\n", sdkGetTimerValue(&timer));
printf("%.2f Mtexlookups/sec\n", (width *height *num_layers / (sdkGetTimerValue(&timer) / 1000.0f) / 1e6));
sdkDeleteTimer(&timer);
// allocate mem for the result on host side
float *h_odata = (float *) malloc(size);
// copy result from device to host
checkCudaErrors(hipMemcpy(h_odata, d_data, size, hipMemcpyDeviceToHost));
// write regression file if necessary
if (checkCmdLineFlag(argc, (const char **)argv, "regression"))
{
// write file for regression test
sdkWriteFile<float>("./data/regression.dat", h_odata, width*height, 0.0f, false);
}
else
{
printf("Comparing kernel output to expected data\n");
#define MIN_EPSILON_ERROR 5e-3f
bResult = compareData(h_odata, h_data_ref, width*height*num_layers, MIN_EPSILON_ERROR, 0.0f);
}
// cleanup memory
free(h_data);
free(h_data_ref);
free(h_odata);
checkCudaErrors(hipFree(d_data));
checkCudaErrors(hipFreeArray(cu_3darray));
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
exit(bResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
| bb84fcc4afd8c1573130a238154c1727ae0bdabf.cu | /*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample demonstrates how to use texture fetches from layered 2D textures in CUDA C
*
* This sample first generates a 3D input data array for the layered texture
* and the expected output. Then it starts CUDA C kernels, one for each layer,
* which fetch their layer's texture data (using normalized texture coordinates)
* transform it to the expected output, and write it to a 3D output data array.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, kernels
#include <cuda_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper for shared that are common to CUDA Samples
static char *sSDKname = "simpleLayeredTexture";
// includes, kernels
// declare texture reference for layered 2D float texture
// Note: The "dim" field in the texture reference template is now deprecated.
// Instead, please use a texture type macro such as cudaTextureType1D, etc.
texture<float, cudaTextureType2DLayered> tex;
////////////////////////////////////////////////////////////////////////////////
//! Transform a layer of a layered 2D texture using texture lookups
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
__global__ void
transformKernel(float *g_odata, int width, int height, int layer)
{
// calculate this thread's data point
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// 0.5f offset and division are necessary to access the original data points
// in the texture (such that bilinear interpolation will not be activated).
// For details, see also CUDA Programming Guide, Appendix D
float u = (x+0.5f) / (float) width;
float v = (y+0.5f) / (float) height;
// read from texture, do expected transformation and write to global memory
g_odata[layer*width*height + y*width + x] = -tex2DLayered(tex, u, v, layer) + layer;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char **argv)
{
printf("[%s] - Starting...\n", sSDKname);
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
int devID = findCudaDevice(argc, (const char **)argv);
bool bResult = true;
// get number of SMs on this GPU
cudaDeviceProp deviceProps;
checkCudaErrors(cudaGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s] has %d Multi-Processors ", deviceProps.name, deviceProps.multiProcessorCount);
printf("SM %d.%d\n", deviceProps.major, deviceProps.minor);
if (deviceProps.major < 2)
{
printf("%s requires SM >= 2.0 to support Texture Arrays. Test will be waived... \n", sSDKname);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
exit(EXIT_WAIVED);
}
// generate input data for layered texture
unsigned int width=512, height=512, num_layers = 5;
unsigned int size = width * height * num_layers * sizeof(float);
float *h_data = (float *) malloc(size);
for (unsigned int layer = 0; layer < num_layers; layer++)
for (int i = 0; i < (int)(width * height); i++)
{
h_data[layer*width*height + i] = (float)i;
}
// this is the expected transformation of the input data (the expected output)
float *h_data_ref = (float *) malloc(size);
for (unsigned int layer = 0; layer < num_layers; layer++)
for (int i = 0; i < (int)(width * height); i++)
{
h_data_ref[layer*width*height + i] = -h_data[layer*width*height + i] + layer;
}
// allocate device memory for result
float *d_data = NULL;
checkCudaErrors(cudaMalloc((void **) &d_data, size));
// allocate array and copy image data
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaArray *cu_3darray;
checkCudaErrors(cudaMalloc3DArray(&cu_3darray, &channelDesc, make_cudaExtent(width, height, num_layers), cudaArrayLayered));
cudaMemcpy3DParms myparms = {0};
myparms.srcPos = make_cudaPos(0,0,0);
myparms.dstPos = make_cudaPos(0,0,0);
myparms.srcPtr = make_cudaPitchedPtr(h_data, width * sizeof(float), width, height);
myparms.dstArray = cu_3darray;
myparms.extent = make_cudaExtent(width, height, num_layers);
myparms.kind = cudaMemcpyHostToDevice;
checkCudaErrors(cudaMemcpy3D(&myparms));
// set texture parameters
tex.addressMode[0] = cudaAddressModeWrap;
tex.addressMode[1] = cudaAddressModeWrap;
tex.filterMode = cudaFilterModeLinear;
tex.normalized = true; // access with normalized texture coordinates
// Bind the array to the texture
checkCudaErrors(cudaBindTextureToArray(tex, cu_3darray, channelDesc));
dim3 dimBlock(8, 8, 1);
dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1);
printf("Covering 2D data array of %d x %d: Grid size is %d x %d, each block has 8 x 8 threads\n",
width, height, dimGrid.x, dimGrid.y);
transformKernel<<< dimGrid, dimBlock >>>(d_data, width, height, 0); // warmup (for better timing)
// check if kernel execution generated an error
getLastCudaError("warmup Kernel execution failed");
checkCudaErrors(cudaDeviceSynchronize());
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
// execute the kernel
for (unsigned int layer = 0; layer < num_layers; layer++)
transformKernel<<< dimGrid, dimBlock, 0 >>>(d_data, width, height, layer);
// check if kernel execution generated an error
getLastCudaError("Kernel execution failed");
checkCudaErrors(cudaDeviceSynchronize());
sdkStopTimer(&timer);
printf("Processing time: %.3f msec\n", sdkGetTimerValue(&timer));
printf("%.2f Mtexlookups/sec\n", (width *height *num_layers / (sdkGetTimerValue(&timer) / 1000.0f) / 1e6));
sdkDeleteTimer(&timer);
// allocate mem for the result on host side
float *h_odata = (float *) malloc(size);
// copy result from device to host
checkCudaErrors(cudaMemcpy(h_odata, d_data, size, cudaMemcpyDeviceToHost));
// write regression file if necessary
if (checkCmdLineFlag(argc, (const char **)argv, "regression"))
{
// write file for regression test
sdkWriteFile<float>("./data/regression.dat", h_odata, width*height, 0.0f, false);
}
else
{
printf("Comparing kernel output to expected data\n");
#define MIN_EPSILON_ERROR 5e-3f
bResult = compareData(h_odata, h_data_ref, width*height*num_layers, MIN_EPSILON_ERROR, 0.0f);
}
// cleanup memory
free(h_data);
free(h_data_ref);
free(h_odata);
checkCudaErrors(cudaFree(d_data));
checkCudaErrors(cudaFreeArray(cu_3darray));
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
exit(bResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
49d13b1079da071d213f21027f992f0ea8c34fc7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "a_kernel.hu"
__global__ void kernel0(int *a, int *b, int *c)
{
int b0 = blockIdx.x;
int t0 = threadIdx.x;
int private_a[1];
#define ppcg_fdiv_q(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))
{
private_a[0] = a[t0];
private_a[0] = (private_a[0] + b[t0]);
c[t0] = private_a[0];
}
}
| 49d13b1079da071d213f21027f992f0ea8c34fc7.cu | #include "a_kernel.hu"
__global__ void kernel0(int *a, int *b, int *c)
{
int b0 = blockIdx.x;
int t0 = threadIdx.x;
int private_a[1];
#define ppcg_fdiv_q(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))
{
private_a[0] = a[t0];
private_a[0] = (private_a[0] + b[t0]);
c[t0] = private_a[0];
}
}
|
4fdd571fabe3c4f36fe378589118c7e1a6a2e6c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// helper for CUDA error handling
__global__ void getSufficientComponentNum(const double* eigenvalues, std::size_t* componentNum, std::size_t eigenRows, double epsilon)
{
double variance = 0;
for(std::size_t i = 0; i < eigenRows; ++i)
{
variance += eigenvalues[i];
}
variance *= eigenRows;
(*componentNum) = 1;
double subVariance = eigenRows * eigenvalues[eigenRows - 1];
double explanatoryScore = subVariance / variance;
for(; (*componentNum) < eigenRows && explanatoryScore <= epsilon; (*componentNum) += 1)
{
subVariance += eigenRows * eigenvalues[eigenRows - (*componentNum) - 1];
explanatoryScore = subVariance / variance;
}
} | 4fdd571fabe3c4f36fe378589118c7e1a6a2e6c7.cu | #include "includes.h"
// helper for CUDA error handling
__global__ void getSufficientComponentNum(const double* eigenvalues, std::size_t* componentNum, std::size_t eigenRows, double epsilon)
{
double variance = 0;
for(std::size_t i = 0; i < eigenRows; ++i)
{
variance += eigenvalues[i];
}
variance *= eigenRows;
(*componentNum) = 1;
double subVariance = eigenRows * eigenvalues[eigenRows - 1];
double explanatoryScore = subVariance / variance;
for(; (*componentNum) < eigenRows && explanatoryScore <= epsilon; (*componentNum) += 1)
{
subVariance += eigenRows * eigenvalues[eigenRows - (*componentNum) - 1];
explanatoryScore = subVariance / variance;
}
} |
9cbdce536e55169dfd05c8979f35802dc56ffdb2.hip | // !!! This is a file automatically generated by hipify!!!
/*
* ht.cu
*
* Created on: Feb 17, 2010
* Author: zhmurov
*/
#include "../Util/ran2.h"
#include "../Util/Log.h"
namespace hybrid_taus {
#define jflone 0x3f800000
#define jflmsk 0x007fffff
#define EPS 1.0e-8f
#include "../Util/Log.h"
class Log: public ILog {
virtual void Write(const char* message) const {
std::cout << makeTimePrefix() << "<hybrid_taus> " << message << std::endl;
}
} log;
#define LOG LogStream(log)
int hasGauss = 0;
float gauss;
struct HybTau{
uint4* h_seeds;
uint4* d_seeds;
uint4 mseed;
};
HybTau ht;
__device__ __constant__ HybTau c_ht;
void generateSeeds(uint4* seeds, int seed, int Np);
int initRand(int seed, int Np){
LOG << "Initializing Hybrid Taus PRNG...";
allocateCPU((void**)&ht.h_seeds, Np*sizeof(uint4));
generateSeeds(ht.h_seeds, seed, Np);
allocateGPU((void**)&ht.d_seeds, Np*sizeof(uint4));
hipMemcpy(ht.d_seeds, ht.h_seeds, Np*sizeof(uint4), hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_ht, &ht, sizeof(HybTau), 0, hipMemcpyHostToDevice);
LOG << "done";
return 1;
}
void generateSeeds(uint4* seeds, int seed, int Np){
for(int i = 0; i < Np; i++){
do{
seeds[i].x = (unsigned)(ran2::ran2(&seed)*UINT_MAX);
} while(seeds[i].x < 128);
do{
seeds[i].y = (unsigned)(ran2::ran2(&seed)*UINT_MAX);
} while(seeds[i].y < 128);
do{
seeds[i].z = (unsigned)(ran2::ran2(&seed)*UINT_MAX);
} while(seeds[i].z < 128);
do{
seeds[i].w = (unsigned)(ran2::ran2(&seed)*UINT_MAX);
} while(seeds[i].w < 128);
}
ht.mseed = seeds[0];
}
// Random number generator.
// Taked from GPU Gems 3, Chapter 37
__device__ inline float uintToFloat(unsigned uint){
unsigned itemp = jflone | (jflmsk & uint);
float result = (*(float *)&itemp) - 1.0f;
if(result == 0){
return EPS;
} else {
return result;
}
}
__device__ inline unsigned TausStep(unsigned &z, int S1, int S2, int S3, unsigned M){
unsigned b = (((z << S1)^z) >> S2);
return z = (((z & M) << S3) ^b);
}
__device__ inline unsigned LCGStep(unsigned &z, unsigned A, unsigned C){
return z = (A * z + C);
}
__device__ inline unsigned HybridTaus(uint4 &seed){
return TausStep(seed.x, 13, 19, 12, 4294967294UL) ^
TausStep(seed.y, 2, 25, 4, 4294967288UL) ^
TausStep(seed.z, 3, 11, 17, 4294967280UL) ^
LCGStep(seed.w, 1664525, 1013904223UL);
/*return 2.3283064365387e-10 * (
TausStep(seed.x, 13, 19, 12, 4294967294UL) ^
TausStep(seed.y, 2, 25, 4, 4294967288UL) ^
TausStep(seed.z, 3, 11, 17, 4294967280UL) ^
LCGStep(seed.w, 1664525, 1013904223UL));*/
}
__device__ inline float4 rforce(int d_i){
uint4 seed = c_ht.d_seeds[d_i];
float4 result;
float r = sqrtf(-2.0f * logf(uintToFloat(HybridTaus(seed))));
float theta = 2.0f*M_PI*uintToFloat(HybridTaus(seed));
result.x = r*__sinf(theta);
result.y = r*__cosf(theta);
r = sqrtf(-2.0f * logf(uintToFloat(HybridTaus(seed))));
theta = 2.0f*M_PI*uintToFloat(HybridTaus(seed));
result.z = r*__sinf(theta);
result.w = r*__cosf(theta);
c_ht.d_seeds[d_i] = seed;
return result;
}
#undef jflone
#undef jflmsk
#undef EPS
#undef LOG
} // namespace hybrid_taus
| 9cbdce536e55169dfd05c8979f35802dc56ffdb2.cu | /*
* ht.cu
*
* Created on: Feb 17, 2010
* Author: zhmurov
*/
#include "../Util/ran2.h"
#include "../Util/Log.h"
namespace hybrid_taus {
#define jflone 0x3f800000
#define jflmsk 0x007fffff
#define EPS 1.0e-8f
#include "../Util/Log.h"
class Log: public ILog {
virtual void Write(const char* message) const {
std::cout << makeTimePrefix() << "<hybrid_taus> " << message << std::endl;
}
} log;
#define LOG LogStream(log)
int hasGauss = 0;
float gauss;
struct HybTau{
uint4* h_seeds;
uint4* d_seeds;
uint4 mseed;
};
HybTau ht;
__device__ __constant__ HybTau c_ht;
void generateSeeds(uint4* seeds, int seed, int Np);
int initRand(int seed, int Np){
LOG << "Initializing Hybrid Taus PRNG...";
allocateCPU((void**)&ht.h_seeds, Np*sizeof(uint4));
generateSeeds(ht.h_seeds, seed, Np);
allocateGPU((void**)&ht.d_seeds, Np*sizeof(uint4));
cudaMemcpy(ht.d_seeds, ht.h_seeds, Np*sizeof(uint4), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_ht, &ht, sizeof(HybTau), 0, cudaMemcpyHostToDevice);
LOG << "done";
return 1;
}
void generateSeeds(uint4* seeds, int seed, int Np){
for(int i = 0; i < Np; i++){
do{
seeds[i].x = (unsigned)(ran2::ran2(&seed)*UINT_MAX);
} while(seeds[i].x < 128);
do{
seeds[i].y = (unsigned)(ran2::ran2(&seed)*UINT_MAX);
} while(seeds[i].y < 128);
do{
seeds[i].z = (unsigned)(ran2::ran2(&seed)*UINT_MAX);
} while(seeds[i].z < 128);
do{
seeds[i].w = (unsigned)(ran2::ran2(&seed)*UINT_MAX);
} while(seeds[i].w < 128);
}
ht.mseed = seeds[0];
}
// Random number generator.
// Taked from GPU Gems 3, Chapter 37
__device__ inline float uintToFloat(unsigned uint){
unsigned itemp = jflone | (jflmsk & uint);
float result = (*(float *)&itemp) - 1.0f;
if(result == 0){
return EPS;
} else {
return result;
}
}
__device__ inline unsigned TausStep(unsigned &z, int S1, int S2, int S3, unsigned M){
unsigned b = (((z << S1)^z) >> S2);
return z = (((z & M) << S3) ^b);
}
__device__ inline unsigned LCGStep(unsigned &z, unsigned A, unsigned C){
return z = (A * z + C);
}
__device__ inline unsigned HybridTaus(uint4 &seed){
return TausStep(seed.x, 13, 19, 12, 4294967294UL) ^
TausStep(seed.y, 2, 25, 4, 4294967288UL) ^
TausStep(seed.z, 3, 11, 17, 4294967280UL) ^
LCGStep(seed.w, 1664525, 1013904223UL);
/*return 2.3283064365387e-10 * (
TausStep(seed.x, 13, 19, 12, 4294967294UL) ^
TausStep(seed.y, 2, 25, 4, 4294967288UL) ^
TausStep(seed.z, 3, 11, 17, 4294967280UL) ^
LCGStep(seed.w, 1664525, 1013904223UL));*/
}
__device__ inline float4 rforce(int d_i){
uint4 seed = c_ht.d_seeds[d_i];
float4 result;
float r = sqrtf(-2.0f * logf(uintToFloat(HybridTaus(seed))));
float theta = 2.0f*M_PI*uintToFloat(HybridTaus(seed));
result.x = r*__sinf(theta);
result.y = r*__cosf(theta);
r = sqrtf(-2.0f * logf(uintToFloat(HybridTaus(seed))));
theta = 2.0f*M_PI*uintToFloat(HybridTaus(seed));
result.z = r*__sinf(theta);
result.w = r*__cosf(theta);
c_ht.d_seeds[d_i] = seed;
return result;
}
#undef jflone
#undef jflmsk
#undef EPS
#undef LOG
} // namespace hybrid_taus
|
2061560911b11823bb9237f3afa3256ade4ac6e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include "caffe/common.hpp"
#include "caffe/util/im2col.hpp"
namespace caffe {
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, double* data_col);
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int pad_c,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n + (2 * pad_c*height_col*width_col)){
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
if (index<(pad_c*height_col*width_col) || index >= (n + (pad_c*height_col*width_col))){
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
*data_col_ptr = 0;
data_col_ptr += height_col * width_col;
}
}
}
else{
const Dtype* data_im_ptr = data_im;
data_im_ptr += ((c_im - pad_c) * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int pad_c,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel<Dtype> << <CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS >> >(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, pad_c, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int pad_c, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int pad_c, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, double* data_col);
template <typename Dtype, int num_axes>
__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
int d_temp[num_axes]; // NOLINT(runtime/arrays)
int d_iter[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
int i;
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int channel_in = index;
int channel_out = 1;
for (i = num_axes - 1; i >= 0; --i) {
d_temp[i] = channel_in % shared_col_shape[i + 1];
channel_in /= shared_col_shape[i + 1];
channel_out *= shared_kernel_shape[i];
}
channel_out *= channel_in;
int data_col_inc = 1;
for (i = 0; i < num_axes; ++i) {
channel_out *= shared_col_shape[i + 1];
channel_out += d_temp[i];
d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i];
channel_in *= shared_im_shape[i + 1];
channel_in += d_temp[i];
data_col_inc *= shared_col_shape[i + 1];
d_iter[i] = 0;
}
Dtype* data_col_ptr = data_col + channel_out;
const Dtype* data_im_ptr = data_im + channel_in;
bool incremented;
do {
bool in_range = true;
for (i = 0; i < num_axes; ++i) {
const int d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i];
in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1];
if (!in_range) { break; }
}
if (in_range) {
int data_im_offset = d_iter[0] * shared_dilation[0];
for (i = 1; i < num_axes; ++i) {
data_im_offset *= shared_im_shape[i + 1];
data_im_offset += d_iter[i] * shared_dilation[i];
}
*data_col_ptr = data_im_ptr[data_im_offset];
} else {
*data_col_ptr = 0;
}
data_col_ptr += data_col_inc;
incremented = false;
for (i = num_axes - 1; i >= 0; --i) {
const int d_max = shared_kernel_shape[i];
if (d_iter[i] == d_max - 1) {
d_iter[i] = 0;
} else { // d_iter[i] < d_max - 1
++d_iter[i];
incremented = true;
break;
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented); // do
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes,
const int num_kernels, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
im2col_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 2:
im2col_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 3:
im2col_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 4:
im2col_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 5:
im2col_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 6:
im2col_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 7:
im2col_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 8:
im2col_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 9:
im2col_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 10:
im2col_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_nd_gpu<float>(const float* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_col);
template void im2col_nd_gpu<double>(const double* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
Dtype* data_im) {
int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( col2im_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
double* data_im);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int pad_c,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
int data_col_index = ((((c_im + pad_c) * kernel_h + h_k) * kernel_w + w_k) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int pad_c, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
Dtype* data_im) {
int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
col2im_gpu_kernel<Dtype> << <CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS >> >(
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, pad_c, stride_h, stride_w, dilation_h, dilation_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int pad_c, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int pad_c, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
double* data_im);
template <typename Dtype, int num_axes>
__global__ void col2im_nd_gpu_kernel(const int n, const Dtype* data_col,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
int d_im[num_axes]; // NOLINT(runtime/arrays)
int d_col_iter[num_axes]; // NOLINT(runtime/arrays)
int d_col_start[num_axes]; // NOLINT(runtime/arrays)
int d_col_end[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int c_im = index;
// Calculate d_im (image dimensions).
for (int i = num_axes - 1; i >= 0; --i) {
d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];
c_im /= shared_im_shape[i + 1];
}
// Calculate col start/end indices.
bool done = false;
for (int i = 0; i < num_axes; ++i) {
const int kernel_extent =
shared_dilation[i] * (shared_kernel_shape[i] - 1) + 1;
d_col_start[i] = d_col_iter[i] =
(d_im[i] < kernel_extent) ? 0 :
(d_im[i] - kernel_extent) / shared_stride[i] + 1;
d_col_end[i] =
min(d_im[i] / shared_stride[i] + 1, shared_col_shape[i + 1]);
if (d_col_start[i] >= d_col_end[i]) {
// Skip computation if the dimension is 0 at any spatial axis --
// final val will be 0.
data_im[index] = 0;
done = true;
break; // for (int i = 0; i < num_axes; ++i)
}
}
if (done) {
continue; // CUDA_KERNEL_LOOP(index, n)
}
// Loop over the col to compute the output val.
Dtype val = 0;
bool incremented = true;
bool skip = false;
do {
// Compute the final offset.
int final_offset = 0;
int kernel_shape_prod = 1;
int kernel_index;
for (int i = num_axes - 1; i >= 0; --i) {
kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];
if (kernel_index % shared_dilation[i]) {
skip = true;
break;
} else {
kernel_index /= shared_dilation[i];
final_offset += kernel_index * kernel_shape_prod;
kernel_shape_prod *= shared_kernel_shape[i];
}
}
if (!skip) {
final_offset += kernel_shape_prod * c_im;
for (int i = 0; i < num_axes; ++i) {
final_offset *= shared_col_shape[i + 1];
final_offset += d_col_iter[i];
}
val += data_col[final_offset];
}
skip = false;
incremented = false;
for (int i = num_axes - 1; i >= 0; --i) {
const int d_max = d_col_end[i];
if (d_col_iter[i] == d_max - 1) {
d_col_iter[i] = d_col_start[i];
} else { // d_col_iter[i] < d_max - 1
++d_col_iter[i];
incremented = true;
break; // for (int i = num_axes - 1; i >= 0; --i)
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented);
data_im[index] = val;
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes,
const int im_size, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
col2im_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 2:
col2im_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 3:
col2im_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 4:
col2im_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 5:
col2im_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 6:
col2im_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 7:
col2im_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 8:
col2im_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 9:
col2im_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 10:
col2im_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_nd_gpu<float>(const float* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_im);
template void col2im_nd_gpu<double>(const double* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_im);
} // namespace caffe
| 2061560911b11823bb9237f3afa3256ade4ac6e9.cu | #include <algorithm>
#include "caffe/common.hpp"
#include "caffe/util/im2col.hpp"
namespace caffe {
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, double* data_col);
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int pad_c,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n + (2 * pad_c*height_col*width_col)){
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
if (index<(pad_c*height_col*width_col) || index >= (n + (pad_c*height_col*width_col))){
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
*data_col_ptr = 0;
data_col_ptr += height_col * width_col;
}
}
}
else{
const Dtype* data_im_ptr = data_im;
data_im_ptr += ((c_im - pad_c) * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int pad_c,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel<Dtype> << <CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS >> >(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, pad_c, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int pad_c, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int pad_c, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, double* data_col);
template <typename Dtype, int num_axes>
__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
int d_temp[num_axes]; // NOLINT(runtime/arrays)
int d_iter[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
int i;
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int channel_in = index;
int channel_out = 1;
for (i = num_axes - 1; i >= 0; --i) {
d_temp[i] = channel_in % shared_col_shape[i + 1];
channel_in /= shared_col_shape[i + 1];
channel_out *= shared_kernel_shape[i];
}
channel_out *= channel_in;
int data_col_inc = 1;
for (i = 0; i < num_axes; ++i) {
channel_out *= shared_col_shape[i + 1];
channel_out += d_temp[i];
d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i];
channel_in *= shared_im_shape[i + 1];
channel_in += d_temp[i];
data_col_inc *= shared_col_shape[i + 1];
d_iter[i] = 0;
}
Dtype* data_col_ptr = data_col + channel_out;
const Dtype* data_im_ptr = data_im + channel_in;
bool incremented;
do {
bool in_range = true;
for (i = 0; i < num_axes; ++i) {
const int d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i];
in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1];
if (!in_range) { break; }
}
if (in_range) {
int data_im_offset = d_iter[0] * shared_dilation[0];
for (i = 1; i < num_axes; ++i) {
data_im_offset *= shared_im_shape[i + 1];
data_im_offset += d_iter[i] * shared_dilation[i];
}
*data_col_ptr = data_im_ptr[data_im_offset];
} else {
*data_col_ptr = 0;
}
data_col_ptr += data_col_inc;
incremented = false;
for (i = num_axes - 1; i >= 0; --i) {
const int d_max = shared_kernel_shape[i];
if (d_iter[i] == d_max - 1) {
d_iter[i] = 0;
} else { // d_iter[i] < d_max - 1
++d_iter[i];
incremented = true;
break;
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented); // do
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes,
const int num_kernels, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
im2col_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 2:
im2col_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 3:
im2col_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 4:
im2col_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 5:
im2col_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 6:
im2col_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 7:
im2col_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 8:
im2col_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 9:
im2col_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 10:
im2col_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_nd_gpu<float>(const float* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_col);
template void im2col_nd_gpu<double>(const double* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
Dtype* data_im) {
int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
double* data_im);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int pad_c,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
int data_col_index = ((((c_im + pad_c) * kernel_h + h_k) * kernel_w + w_k) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int pad_c, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
Dtype* data_im) {
int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
col2im_gpu_kernel<Dtype> << <CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS >> >(
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, pad_c, stride_h, stride_w, dilation_h, dilation_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int pad_c, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int pad_c, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
double* data_im);
template <typename Dtype, int num_axes>
__global__ void col2im_nd_gpu_kernel(const int n, const Dtype* data_col,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
int d_im[num_axes]; // NOLINT(runtime/arrays)
int d_col_iter[num_axes]; // NOLINT(runtime/arrays)
int d_col_start[num_axes]; // NOLINT(runtime/arrays)
int d_col_end[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int c_im = index;
// Calculate d_im (image dimensions).
for (int i = num_axes - 1; i >= 0; --i) {
d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];
c_im /= shared_im_shape[i + 1];
}
// Calculate col start/end indices.
bool done = false;
for (int i = 0; i < num_axes; ++i) {
const int kernel_extent =
shared_dilation[i] * (shared_kernel_shape[i] - 1) + 1;
d_col_start[i] = d_col_iter[i] =
(d_im[i] < kernel_extent) ? 0 :
(d_im[i] - kernel_extent) / shared_stride[i] + 1;
d_col_end[i] =
min(d_im[i] / shared_stride[i] + 1, shared_col_shape[i + 1]);
if (d_col_start[i] >= d_col_end[i]) {
// Skip computation if the dimension is 0 at any spatial axis --
// final val will be 0.
data_im[index] = 0;
done = true;
break; // for (int i = 0; i < num_axes; ++i)
}
}
if (done) {
continue; // CUDA_KERNEL_LOOP(index, n)
}
// Loop over the col to compute the output val.
Dtype val = 0;
bool incremented = true;
bool skip = false;
do {
// Compute the final offset.
int final_offset = 0;
int kernel_shape_prod = 1;
int kernel_index;
for (int i = num_axes - 1; i >= 0; --i) {
kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];
if (kernel_index % shared_dilation[i]) {
skip = true;
break;
} else {
kernel_index /= shared_dilation[i];
final_offset += kernel_index * kernel_shape_prod;
kernel_shape_prod *= shared_kernel_shape[i];
}
}
if (!skip) {
final_offset += kernel_shape_prod * c_im;
for (int i = 0; i < num_axes; ++i) {
final_offset *= shared_col_shape[i + 1];
final_offset += d_col_iter[i];
}
val += data_col[final_offset];
}
skip = false;
incremented = false;
for (int i = num_axes - 1; i >= 0; --i) {
const int d_max = d_col_end[i];
if (d_col_iter[i] == d_max - 1) {
d_col_iter[i] = d_col_start[i];
} else { // d_col_iter[i] < d_max - 1
++d_col_iter[i];
incremented = true;
break; // for (int i = num_axes - 1; i >= 0; --i)
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented);
data_im[index] = val;
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes,
const int im_size, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
col2im_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 2:
col2im_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 3:
col2im_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 4:
col2im_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 5:
col2im_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 6:
col2im_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 7:
col2im_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 8:
col2im_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 9:
col2im_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 10:
col2im_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_nd_gpu<float>(const float* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_im);
template void col2im_nd_gpu<double>(const double* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_im);
} // namespace caffe
|
e7586b6fb9283caa71a6b3bc2ed88d50c18f86c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "reduction.h"
/*
Parallel sum reduction using shared memory
- takes log(n) steps for n input elements
- uses n threads
- only works for power-of-2 arrays
*/
__global__ void
reduction_kernel_2(float *g_out, float *g_in, unsigned int size)
{
unsigned int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ float s_data[];
s_data[threadIdx.x] = (idx_x < size) ? g_in[idx_x] : 0.f;
__syncthreads();
// do reduction
// sequential addressing
for (unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (threadIdx.x < stride)
s_data[threadIdx.x] += s_data[threadIdx.x + stride];
__syncthreads();
}
if (threadIdx.x == 0)
g_out[blockIdx.x] = s_data[0];
}
int reduction(float *g_outPtr, float *g_inPtr, int size, int n_threads)
{
int n_blocks = (size + n_threads - 1) / n_threads;
hipLaunchKernelGGL(( reduction_kernel_2), dim3(n_blocks), dim3(n_threads), n_threads * sizeof(float), 0 , g_outPtr, g_inPtr, size);
return n_blocks;
} | e7586b6fb9283caa71a6b3bc2ed88d50c18f86c9.cu | #include <stdio.h>
#include "reduction.h"
/*
Parallel sum reduction using shared memory
- takes log(n) steps for n input elements
- uses n threads
- only works for power-of-2 arrays
*/
__global__ void
reduction_kernel_2(float *g_out, float *g_in, unsigned int size)
{
unsigned int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ float s_data[];
s_data[threadIdx.x] = (idx_x < size) ? g_in[idx_x] : 0.f;
__syncthreads();
// do reduction
// sequential addressing
for (unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (threadIdx.x < stride)
s_data[threadIdx.x] += s_data[threadIdx.x + stride];
__syncthreads();
}
if (threadIdx.x == 0)
g_out[blockIdx.x] = s_data[0];
}
int reduction(float *g_outPtr, float *g_inPtr, int size, int n_threads)
{
int n_blocks = (size + n_threads - 1) / n_threads;
reduction_kernel_2<<< n_blocks, n_threads, n_threads * sizeof(float), 0 >>>(g_outPtr, g_inPtr, size);
return n_blocks;
} |
520ec349b6251fda78ab2bdb91597db97797f78e.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
using namespace std;
#define cudaCheck(error) \
if (error != hipSuccess) { \
printf("Fatal error: %s at %s:%d\n", \
hipGetErrorString(error), \
__FILE__, __LINE__); \
exit(1); \
}
__global__ void cudawbfs(int *distance, unsigned int *row_ptr, int *col_ind, int nov, int *improvement, int level)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int localImprovement = 0;
if(tid < nov && distance[tid] == level) {
for(int e = row_ptr[tid]; e < row_ptr[tid + 1]; e++){
int adj = col_ind[e];
if(distance[adj] < 0){
distance[adj] = level + 1;
localImprovement = 1;
}
}
}
if(localImprovement) {
(*improvement) = localImprovement;
}
}
void wbfs(unsigned int * row_ptr, int * col_ind, int * distance, int nov, int * d_distance, unsigned int * d_row_ptr, int * d_col_ind){
//initializations
int size_of_rowptr = (nov + 1) * sizeof(int);
int size_of_colind = row_ptr[nov] * sizeof(int);
int *d_improvement, *d_nov, *d_level;
//memory allocations
cudaCheck(hipMalloc((void**) &d_improvement, sizeof(int)));
cudaCheck(hipMalloc((void**) &d_nov, sizeof(int)));
cudaCheck(hipMalloc((void**) &d_level, sizeof(int)));
cudaCheck(hipMalloc((void**) &d_row_ptr, size_of_rowptr));
cudaCheck(hipMalloc((void**) &d_distance, size_of_rowptr));
cudaCheck(hipMalloc((void**) &d_col_ind, size_of_colind));
//memory copies
cudaCheck(hipMemcpy(d_distance, distance, size_of_rowptr, hipMemcpyHostToDevice));
cudaCheck(hipMemcpy(d_row_ptr, row_ptr, size_of_rowptr, hipMemcpyHostToDevice));
cudaCheck(hipMemcpy(d_col_ind, col_ind, size_of_colind, hipMemcpyHostToDevice));
cudaCheck(hipMemcpy(d_nov, &nov, sizeof(int), hipMemcpyHostToDevice));
//start time
hipEvent_t start;
hipEvent_t stop;
cudaCheck(hipEventCreate(&start));
cudaCheck(hipEventCreate(&stop));
cudaCheck(hipEventRecord(start, 0));
int *improvement = new int;
int level = 1;
do{
(*improvement) = 0;
cudaCheck(hipMemcpy(d_improvement, improvement, sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( cudawbfs), dim3((nov + 1023) / 1024), dim3(1024), 0, 0, d_distance, d_row_ptr, d_col_ind, nov, d_improvement, level);
cudaCheck(hipMemcpy(improvement, d_improvement, sizeof(int), hipMemcpyDeviceToHost));
level++;
} while((*improvement) == 1);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsed;
hipEventElapsedTime(&elapsed, start, stop);
//take value again
cudaCheck(hipMemcpy(distance, d_distance, size_of_rowptr, hipMemcpyDeviceToHost));
//deallocations
cudaCheck(hipFree(d_row_ptr));
cudaCheck(hipFree(d_distance));
cudaCheck(hipFree(d_col_ind));
printf("GPU WBFS time: %f s\n", elapsed / 1000);
}
__global__ void cudaqbfs(int *distance, unsigned int *row_ptr, int *col_ind, int *queue, int *nextQueue, int size, int *nextSize, int level) {
int index, u, v, tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < size) {
u = queue[tid];
for(int e = row_ptr[u]; e < row_ptr[u + 1]; e++) {
v = col_ind[e];
if (distance[v] == -1) {
distance[v] = level + 1;
index = atomicAdd(nextSize, 1);
nextQueue[index] = v;
}
}
}
}
void qbfs(unsigned int *row_ptr, int *col_ind, int *distance, int nov, int source) {
int srcNeigh = row_ptr[source + 1] - row_ptr[source];
int *srcArr = new int[srcNeigh];
int index = 0;
for (int i = row_ptr[source]; i < row_ptr[source + 1]; i++) {
if (distance[col_ind[i]] == 1) {
srcArr[index++] = col_ind[i];
}
}
int size_of_rowptr = (nov + 1) * sizeof(int);
int size_of_colind = row_ptr[nov] * sizeof(int);
unsigned int *d_row_ptr;
int *d_col_ind, *d_distance, *d_queue, *d_nextQueue, *d_nextSize;
cudaCheck(hipMalloc((void**) &d_row_ptr, size_of_rowptr));
cudaCheck(hipMalloc((void**) &d_col_ind, size_of_colind));
cudaCheck(hipMalloc((void**) &d_distance, size_of_rowptr));
cudaCheck(hipMalloc((void**) &d_queue, size_of_rowptr));
cudaCheck(hipMalloc((void**) &d_nextQueue, size_of_rowptr));
cudaCheck(hipMalloc((void**) &d_nextSize, sizeof(int)));
cudaCheck(hipMemcpy(d_distance, distance, size_of_rowptr, hipMemcpyHostToDevice));
cudaCheck(hipMemcpy(d_row_ptr, row_ptr, size_of_rowptr, hipMemcpyHostToDevice));
cudaCheck(hipMemcpy(d_col_ind, col_ind, size_of_colind, hipMemcpyHostToDevice));
//cudaCheck(hipMemcpy(d_queue, &source, sizeof(int), hipMemcpyHostToDevice));
cudaCheck(hipMemcpy(d_queue, srcArr, srcNeigh * sizeof(int), hipMemcpyHostToDevice));
hipEvent_t start;
hipEvent_t stop;
cudaCheck(hipEventCreate(&start));
cudaCheck(hipEventCreate(&stop));
cudaCheck(hipEventRecord(start, 0));
int size = srcNeigh;
int *nextSize = new int;
*nextSize = 0;
int level = 1;
do {
cudaCheck(hipMemcpy(d_nextSize, nextSize, sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( cudaqbfs), dim3((size + 1023) / 1024), dim3(1024), 0, 0, d_distance, d_row_ptr, d_col_ind, d_queue, d_nextQueue, size, d_nextSize, level);
cudaCheck(hipMemcpy(nextSize, d_nextSize, sizeof(int), hipMemcpyDeviceToHost));
level++;
size = *nextSize;
*nextSize = 0;
swap(d_queue, d_nextQueue);
} while(size > 0);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsed;
hipEventElapsedTime(&elapsed, start, stop);
cudaCheck(hipMemcpy(distance, d_distance, size_of_rowptr, hipMemcpyDeviceToHost));
cudaCheck(hipFree(d_row_ptr));
cudaCheck(hipFree(d_col_ind));
cudaCheck(hipFree(d_distance));
cudaCheck(hipFree(d_queue));
cudaCheck(hipFree(d_nextQueue));
cudaCheck(hipFree(d_nextSize));
printf("GPU QBFS time: %f s\n", elapsed / 1000);
}
__global__ void cudatdwbfs(int *distance, unsigned int *row_ptr, int *col_ind, int nov, int level, int *mf) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < nov && distance[tid] == level) {
for(int e = row_ptr[tid]; e < row_ptr[tid + 1]; e++) {
int adj = col_ind[e];
if(distance[adj] < 0) {
atomicAdd(mf, -distance[adj]);
distance[adj] = level + 1;
}
}
}
}
__global__ void cudatdqbfs(int *distance, unsigned int *row_ptr, int *col_ind, int *queue, int *nextQueue, int size, int *nextSize, int level, int *mf) {
int index, u, v, tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < size) {
u = queue[tid];
for(int e = row_ptr[u]; e < row_ptr[u + 1]; e++) {
v = col_ind[e];
if (distance[v] < 0) {
index = atomicAdd(nextSize, 1);
atomicAdd(mf, -distance[v]);
distance[v] = level + 1;
nextQueue[index] = v;
}
}
}
}
__global__ void cudabuwbfs(int *distance, unsigned int *row_ptr_inv, int *col_ind_inv, int nov, int level, int *mf) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < nov && distance[tid] < 0) {
for(int e = row_ptr_inv[tid]; e < row_ptr_inv[tid + 1]; e++) {
int adj = col_ind_inv[e];
if(distance[adj] == level) {
atomicAdd(mf, -distance[tid]);
distance[tid] = level + 1;
break;
}
}
}
}
__global__ void cudabuqbfs(int *distance, unsigned int *row_ptr_inv, int *col_ind_inv, int nov, int level, int *nextSize, int *mf) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < nov && distance[tid] < 0) {
for(int e = row_ptr_inv[tid]; e < row_ptr_inv[tid + 1]; e++) {
int adj = col_ind_inv[e];
if(distance[adj] == level) {
atomicAdd(mf, -distance[tid]);
atomicAdd(nextSize, 1);
distance[tid] = level + 1;
break;
}
}
}
}
void hybrid(unsigned int *row_ptr, unsigned int *row_ptr_inv, int *col_ind, int *col_ind_inv, int *distance, int nov, int source, double alpha) { // int init_mf?
int size_of_rowptr = (nov + 1) * sizeof(int);
int size_of_colind = row_ptr[nov] * sizeof(int);
int *improvement = new int;
unsigned int *d_row_ptr, *d_row_ptr_inv;
int *d_col_ind, *d_col_ind_inv, *d_distance, *d_mf;
cudaCheck(hipMalloc((void**) &d_row_ptr, size_of_rowptr));
cudaCheck(hipMalloc((void**) &d_row_ptr_inv, size_of_rowptr));
cudaCheck(hipMalloc((void**) &d_col_ind, size_of_colind));
cudaCheck(hipMalloc((void**) &d_col_ind_inv, size_of_colind));
cudaCheck(hipMalloc((void**) &d_distance, size_of_rowptr));
cudaCheck(hipMalloc((void**) &d_mf, sizeof(int)));
cudaCheck(hipMemcpy(d_distance, distance, size_of_rowptr, hipMemcpyHostToDevice));
cudaCheck(hipMemcpy(d_row_ptr, row_ptr, size_of_rowptr, hipMemcpyHostToDevice));
cudaCheck(hipMemcpy(d_row_ptr_inv, row_ptr_inv, size_of_rowptr, hipMemcpyHostToDevice));
cudaCheck(hipMemcpy(d_col_ind, col_ind, size_of_colind, hipMemcpyHostToDevice));
cudaCheck(hipMemcpy(d_col_ind_inv, col_ind_inv, size_of_colind, hipMemcpyHostToDevice));
int srcNeigh = row_ptr[source + 1] - row_ptr[source];
int *srcArr = new int[srcNeigh];
int index = 0;
for (int i = row_ptr[source]; i < row_ptr[source + 1]; i++) {
if (distance[col_ind[i]] == 1) {
srcArr[index++] = col_ind[i];
}
}
int *d_queue, *d_nextQueue, *d_nextSize;
cudaCheck(hipMalloc((void**) &d_queue, size_of_rowptr));
cudaCheck(hipMalloc((void**) &d_nextQueue, size_of_rowptr));
cudaCheck(hipMalloc((void**) &d_nextSize, sizeof(int)));
//cudaCheck(hipMemcpy(d_queue, &source, sizeof(int), hipMemcpyHostToDevice));
cudaCheck(hipMemcpy(d_queue, srcArr, srcNeigh * sizeof(int), hipMemcpyHostToDevice));
int mf = row_ptr[source + 1] - row_ptr[source]; // number of traversed edges
int mu = row_ptr[nov]; // total number of edges
int prev_mf = -1;
int level = 1;
int size = srcNeigh;
int *nextSize = new int;
*nextSize = 0;
hipEvent_t start;
hipEvent_t stop;
cudaCheck(hipEventCreate(&start));
cudaCheck(hipEventCreate(&stop));
cudaCheck(hipEventRecord(start, 0));
while (mf != prev_mf) {
prev_mf = mf;
if (mf > mu / alpha) {
cudaCheck(hipMemcpy(d_mf, &mf, sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( cudabuwbfs), dim3((nov + 1023) / 1024), dim3(1024), 0, 0, d_distance, d_row_ptr_inv, d_col_ind_inv, nov, level, d_mf);
cudaCheck(hipMemcpy(&mf, d_mf, sizeof(int), hipMemcpyDeviceToHost));
}
else {
cudaCheck(hipMemcpy(d_mf, &mf, sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( cudatdwbfs), dim3((nov + 1023) / 1024), dim3(1024), 0, 0, d_distance, d_row_ptr, d_col_ind, nov, level, d_mf);
cudaCheck(hipMemcpy(&mf, d_mf, sizeof(int), hipMemcpyDeviceToHost));
}
level++;
}
/*while (mf != prev_mf) {
prev_mf = mf;
if (mf > mu / alpha) {
cudaCheck(hipMemcpy(d_mf, &mf, sizeof(int), hipMemcpyHostToDevice));
cudaCheck(hipMemcpy(d_nextSize, nextSize, sizeof(int), hipMemcpyHostToDevice));
cudabuqbfs<<<(nov + 1023) / 1024, 1024>>>(d_distance, d_row_ptr_inv, d_col_ind_inv, nov, level, d_nextSize, d_mf);
cudaCheck(hipMemcpy(&mf, d_mf, sizeof(int), hipMemcpyDeviceToHost));
cudaCheck(hipMemcpy(nextSize, d_nextSize, sizeof(int), hipMemcpyDeviceToHost));
}
else {
cudaCheck(hipMemcpy(d_mf, &mf, sizeof(int), hipMemcpyHostToDevice));
cudaCheck(hipMemcpy(d_nextSize, nextSize, sizeof(int), hipMemcpyHostToDevice));
cudatdqbfs<<<(size + 1023) / 1024, 1024>>>(d_distance, d_row_ptr, d_col_ind, d_queue, d_nextQueue, size, d_nextSize, level, d_mf);
cudaCheck(hipMemcpy(&mf, d_mf, sizeof(int), hipMemcpyDeviceToHost));
cudaCheck(hipMemcpy(nextSize, d_nextSize, sizeof(int), hipMemcpyDeviceToHost));
}
level++;
size = *nextSize;
*nextSize = 0;
swap(d_queue, d_nextQueue);
}*/
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsed;
hipEventElapsedTime(&elapsed, start, stop);
cudaCheck(hipMemcpy(distance, d_distance, size_of_rowptr, hipMemcpyDeviceToHost));
cudaCheck(hipFree(d_row_ptr));
cudaCheck(hipFree(d_row_ptr_inv));
cudaCheck(hipFree(d_col_ind));
cudaCheck(hipFree(d_col_ind_inv));
cudaCheck(hipFree(d_distance));
cudaCheck(hipFree(d_mf));
cudaCheck(hipFree(d_queue));
cudaCheck(hipFree(d_nextQueue));
cudaCheck(hipFree(d_nextSize));
printf("GPU Hybrid time: %f s\n", elapsed / 1000);
} | 520ec349b6251fda78ab2bdb91597db97797f78e.cu | #include <cuda.h>
#include <iostream>
#include <stdio.h>
using namespace std;
#define cudaCheck(error) \
if (error != cudaSuccess) { \
printf("Fatal error: %s at %s:%d\n", \
cudaGetErrorString(error), \
__FILE__, __LINE__); \
exit(1); \
}
__global__ void cudawbfs(int *distance, unsigned int *row_ptr, int *col_ind, int nov, int *improvement, int level)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int localImprovement = 0;
if(tid < nov && distance[tid] == level) {
for(int e = row_ptr[tid]; e < row_ptr[tid + 1]; e++){
int adj = col_ind[e];
if(distance[adj] < 0){
distance[adj] = level + 1;
localImprovement = 1;
}
}
}
if(localImprovement) {
(*improvement) = localImprovement;
}
}
void wbfs(unsigned int * row_ptr, int * col_ind, int * distance, int nov, int * d_distance, unsigned int * d_row_ptr, int * d_col_ind){
//initializations
int size_of_rowptr = (nov + 1) * sizeof(int);
int size_of_colind = row_ptr[nov] * sizeof(int);
int *d_improvement, *d_nov, *d_level;
//memory allocations
cudaCheck(cudaMalloc((void**) &d_improvement, sizeof(int)));
cudaCheck(cudaMalloc((void**) &d_nov, sizeof(int)));
cudaCheck(cudaMalloc((void**) &d_level, sizeof(int)));
cudaCheck(cudaMalloc((void**) &d_row_ptr, size_of_rowptr));
cudaCheck(cudaMalloc((void**) &d_distance, size_of_rowptr));
cudaCheck(cudaMalloc((void**) &d_col_ind, size_of_colind));
//memory copies
cudaCheck(cudaMemcpy(d_distance, distance, size_of_rowptr, cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(d_row_ptr, row_ptr, size_of_rowptr, cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(d_col_ind, col_ind, size_of_colind, cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(d_nov, &nov, sizeof(int), cudaMemcpyHostToDevice));
//start time
cudaEvent_t start;
cudaEvent_t stop;
cudaCheck(cudaEventCreate(&start));
cudaCheck(cudaEventCreate(&stop));
cudaCheck(cudaEventRecord(start, 0));
int *improvement = new int;
int level = 1;
do{
(*improvement) = 0;
cudaCheck(cudaMemcpy(d_improvement, improvement, sizeof(int), cudaMemcpyHostToDevice));
cudawbfs<<<(nov + 1023) / 1024, 1024>>>(d_distance, d_row_ptr, d_col_ind, nov, d_improvement, level);
cudaCheck(cudaMemcpy(improvement, d_improvement, sizeof(int), cudaMemcpyDeviceToHost));
level++;
} while((*improvement) == 1);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsed;
cudaEventElapsedTime(&elapsed, start, stop);
//take value again
cudaCheck(cudaMemcpy(distance, d_distance, size_of_rowptr, cudaMemcpyDeviceToHost));
//deallocations
cudaCheck(cudaFree(d_row_ptr));
cudaCheck(cudaFree(d_distance));
cudaCheck(cudaFree(d_col_ind));
printf("GPU WBFS time: %f s\n", elapsed / 1000);
}
__global__ void cudaqbfs(int *distance, unsigned int *row_ptr, int *col_ind, int *queue, int *nextQueue, int size, int *nextSize, int level) {
int index, u, v, tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < size) {
u = queue[tid];
for(int e = row_ptr[u]; e < row_ptr[u + 1]; e++) {
v = col_ind[e];
if (distance[v] == -1) {
distance[v] = level + 1;
index = atomicAdd(nextSize, 1);
nextQueue[index] = v;
}
}
}
}
void qbfs(unsigned int *row_ptr, int *col_ind, int *distance, int nov, int source) {
int srcNeigh = row_ptr[source + 1] - row_ptr[source];
int *srcArr = new int[srcNeigh];
int index = 0;
for (int i = row_ptr[source]; i < row_ptr[source + 1]; i++) {
if (distance[col_ind[i]] == 1) {
srcArr[index++] = col_ind[i];
}
}
int size_of_rowptr = (nov + 1) * sizeof(int);
int size_of_colind = row_ptr[nov] * sizeof(int);
unsigned int *d_row_ptr;
int *d_col_ind, *d_distance, *d_queue, *d_nextQueue, *d_nextSize;
cudaCheck(cudaMalloc((void**) &d_row_ptr, size_of_rowptr));
cudaCheck(cudaMalloc((void**) &d_col_ind, size_of_colind));
cudaCheck(cudaMalloc((void**) &d_distance, size_of_rowptr));
cudaCheck(cudaMalloc((void**) &d_queue, size_of_rowptr));
cudaCheck(cudaMalloc((void**) &d_nextQueue, size_of_rowptr));
cudaCheck(cudaMalloc((void**) &d_nextSize, sizeof(int)));
cudaCheck(cudaMemcpy(d_distance, distance, size_of_rowptr, cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(d_row_ptr, row_ptr, size_of_rowptr, cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(d_col_ind, col_ind, size_of_colind, cudaMemcpyHostToDevice));
//cudaCheck(cudaMemcpy(d_queue, &source, sizeof(int), cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(d_queue, srcArr, srcNeigh * sizeof(int), cudaMemcpyHostToDevice));
cudaEvent_t start;
cudaEvent_t stop;
cudaCheck(cudaEventCreate(&start));
cudaCheck(cudaEventCreate(&stop));
cudaCheck(cudaEventRecord(start, 0));
int size = srcNeigh;
int *nextSize = new int;
*nextSize = 0;
int level = 1;
do {
cudaCheck(cudaMemcpy(d_nextSize, nextSize, sizeof(int), cudaMemcpyHostToDevice));
cudaqbfs<<<(size + 1023) / 1024, 1024>>>(d_distance, d_row_ptr, d_col_ind, d_queue, d_nextQueue, size, d_nextSize, level);
cudaCheck(cudaMemcpy(nextSize, d_nextSize, sizeof(int), cudaMemcpyDeviceToHost));
level++;
size = *nextSize;
*nextSize = 0;
swap(d_queue, d_nextQueue);
} while(size > 0);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsed;
cudaEventElapsedTime(&elapsed, start, stop);
cudaCheck(cudaMemcpy(distance, d_distance, size_of_rowptr, cudaMemcpyDeviceToHost));
cudaCheck(cudaFree(d_row_ptr));
cudaCheck(cudaFree(d_col_ind));
cudaCheck(cudaFree(d_distance));
cudaCheck(cudaFree(d_queue));
cudaCheck(cudaFree(d_nextQueue));
cudaCheck(cudaFree(d_nextSize));
printf("GPU QBFS time: %f s\n", elapsed / 1000);
}
__global__ void cudatdwbfs(int *distance, unsigned int *row_ptr, int *col_ind, int nov, int level, int *mf) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < nov && distance[tid] == level) {
for(int e = row_ptr[tid]; e < row_ptr[tid + 1]; e++) {
int adj = col_ind[e];
if(distance[adj] < 0) {
atomicAdd(mf, -distance[adj]);
distance[adj] = level + 1;
}
}
}
}
__global__ void cudatdqbfs(int *distance, unsigned int *row_ptr, int *col_ind, int *queue, int *nextQueue, int size, int *nextSize, int level, int *mf) {
int index, u, v, tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < size) {
u = queue[tid];
for(int e = row_ptr[u]; e < row_ptr[u + 1]; e++) {
v = col_ind[e];
if (distance[v] < 0) {
index = atomicAdd(nextSize, 1);
atomicAdd(mf, -distance[v]);
distance[v] = level + 1;
nextQueue[index] = v;
}
}
}
}
__global__ void cudabuwbfs(int *distance, unsigned int *row_ptr_inv, int *col_ind_inv, int nov, int level, int *mf) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < nov && distance[tid] < 0) {
for(int e = row_ptr_inv[tid]; e < row_ptr_inv[tid + 1]; e++) {
int adj = col_ind_inv[e];
if(distance[adj] == level) {
atomicAdd(mf, -distance[tid]);
distance[tid] = level + 1;
break;
}
}
}
}
__global__ void cudabuqbfs(int *distance, unsigned int *row_ptr_inv, int *col_ind_inv, int nov, int level, int *nextSize, int *mf) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < nov && distance[tid] < 0) {
for(int e = row_ptr_inv[tid]; e < row_ptr_inv[tid + 1]; e++) {
int adj = col_ind_inv[e];
if(distance[adj] == level) {
atomicAdd(mf, -distance[tid]);
atomicAdd(nextSize, 1);
distance[tid] = level + 1;
break;
}
}
}
}
void hybrid(unsigned int *row_ptr, unsigned int *row_ptr_inv, int *col_ind, int *col_ind_inv, int *distance, int nov, int source, double alpha) { // int init_mf?
int size_of_rowptr = (nov + 1) * sizeof(int);
int size_of_colind = row_ptr[nov] * sizeof(int);
int *improvement = new int;
unsigned int *d_row_ptr, *d_row_ptr_inv;
int *d_col_ind, *d_col_ind_inv, *d_distance, *d_mf;
cudaCheck(cudaMalloc((void**) &d_row_ptr, size_of_rowptr));
cudaCheck(cudaMalloc((void**) &d_row_ptr_inv, size_of_rowptr));
cudaCheck(cudaMalloc((void**) &d_col_ind, size_of_colind));
cudaCheck(cudaMalloc((void**) &d_col_ind_inv, size_of_colind));
cudaCheck(cudaMalloc((void**) &d_distance, size_of_rowptr));
cudaCheck(cudaMalloc((void**) &d_mf, sizeof(int)));
cudaCheck(cudaMemcpy(d_distance, distance, size_of_rowptr, cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(d_row_ptr, row_ptr, size_of_rowptr, cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(d_row_ptr_inv, row_ptr_inv, size_of_rowptr, cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(d_col_ind, col_ind, size_of_colind, cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(d_col_ind_inv, col_ind_inv, size_of_colind, cudaMemcpyHostToDevice));
int srcNeigh = row_ptr[source + 1] - row_ptr[source];
int *srcArr = new int[srcNeigh];
int index = 0;
for (int i = row_ptr[source]; i < row_ptr[source + 1]; i++) {
if (distance[col_ind[i]] == 1) {
srcArr[index++] = col_ind[i];
}
}
int *d_queue, *d_nextQueue, *d_nextSize;
cudaCheck(cudaMalloc((void**) &d_queue, size_of_rowptr));
cudaCheck(cudaMalloc((void**) &d_nextQueue, size_of_rowptr));
cudaCheck(cudaMalloc((void**) &d_nextSize, sizeof(int)));
//cudaCheck(cudaMemcpy(d_queue, &source, sizeof(int), cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(d_queue, srcArr, srcNeigh * sizeof(int), cudaMemcpyHostToDevice));
int mf = row_ptr[source + 1] - row_ptr[source]; // number of traversed edges
int mu = row_ptr[nov]; // total number of edges
int prev_mf = -1;
int level = 1;
int size = srcNeigh;
int *nextSize = new int;
*nextSize = 0;
cudaEvent_t start;
cudaEvent_t stop;
cudaCheck(cudaEventCreate(&start));
cudaCheck(cudaEventCreate(&stop));
cudaCheck(cudaEventRecord(start, 0));
while (mf != prev_mf) {
prev_mf = mf;
if (mf > mu / alpha) {
cudaCheck(cudaMemcpy(d_mf, &mf, sizeof(int), cudaMemcpyHostToDevice));
cudabuwbfs<<<(nov + 1023) / 1024, 1024>>>(d_distance, d_row_ptr_inv, d_col_ind_inv, nov, level, d_mf);
cudaCheck(cudaMemcpy(&mf, d_mf, sizeof(int), cudaMemcpyDeviceToHost));
}
else {
cudaCheck(cudaMemcpy(d_mf, &mf, sizeof(int), cudaMemcpyHostToDevice));
cudatdwbfs<<<(nov + 1023) / 1024, 1024>>>(d_distance, d_row_ptr, d_col_ind, nov, level, d_mf);
cudaCheck(cudaMemcpy(&mf, d_mf, sizeof(int), cudaMemcpyDeviceToHost));
}
level++;
}
/*while (mf != prev_mf) {
prev_mf = mf;
if (mf > mu / alpha) {
cudaCheck(cudaMemcpy(d_mf, &mf, sizeof(int), cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(d_nextSize, nextSize, sizeof(int), cudaMemcpyHostToDevice));
cudabuqbfs<<<(nov + 1023) / 1024, 1024>>>(d_distance, d_row_ptr_inv, d_col_ind_inv, nov, level, d_nextSize, d_mf);
cudaCheck(cudaMemcpy(&mf, d_mf, sizeof(int), cudaMemcpyDeviceToHost));
cudaCheck(cudaMemcpy(nextSize, d_nextSize, sizeof(int), cudaMemcpyDeviceToHost));
}
else {
cudaCheck(cudaMemcpy(d_mf, &mf, sizeof(int), cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(d_nextSize, nextSize, sizeof(int), cudaMemcpyHostToDevice));
cudatdqbfs<<<(size + 1023) / 1024, 1024>>>(d_distance, d_row_ptr, d_col_ind, d_queue, d_nextQueue, size, d_nextSize, level, d_mf);
cudaCheck(cudaMemcpy(&mf, d_mf, sizeof(int), cudaMemcpyDeviceToHost));
cudaCheck(cudaMemcpy(nextSize, d_nextSize, sizeof(int), cudaMemcpyDeviceToHost));
}
level++;
size = *nextSize;
*nextSize = 0;
swap(d_queue, d_nextQueue);
}*/
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsed;
cudaEventElapsedTime(&elapsed, start, stop);
cudaCheck(cudaMemcpy(distance, d_distance, size_of_rowptr, cudaMemcpyDeviceToHost));
cudaCheck(cudaFree(d_row_ptr));
cudaCheck(cudaFree(d_row_ptr_inv));
cudaCheck(cudaFree(d_col_ind));
cudaCheck(cudaFree(d_col_ind_inv));
cudaCheck(cudaFree(d_distance));
cudaCheck(cudaFree(d_mf));
cudaCheck(cudaFree(d_queue));
cudaCheck(cudaFree(d_nextQueue));
cudaCheck(cudaFree(d_nextSize));
printf("GPU Hybrid time: %f s\n", elapsed / 1000);
} |
93ca640f47a25bda4d75b915ed8962d64d3664c7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <iostream>
#include <fstream>
using namespace std;
#include "./inc/domain.h"
#include "./inc/ray.h"
#include "arc.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#include "./arc_kernels.cu"
// GLOBAL VARIABLES FOR DISPLAY
int numThreads1, numThreads2;
int elements;
// Error handling macro
/*#define CUDA_CHECK(call) \
if((call) != hipSuccess) { \
hipError_t err = hipGetLastError(); \
cerr << "CUDA error calling \""#call"\", code is " << err << endl; \
exit(-1);}*/
#define CUDA_CHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// Perform calculations to produce Flux arrays
// DensArray = Array of densities in cm^-3
// x_NArray = Array of neutral fractions
// particles = Array of source objects
// FluxArray = Array of total ionizations, per baryon per second /1Mpc
// EArray = Array of energies per baryon
// numParts = Number of source objects
// L, a = Length of the side of the box and scale factor
void rad( float* DensArray_dev, float* x_NArray_dev, const source* source_dev,
float* FluxArray_dev, float* dEArray_dev, Ray* RayBuf,
int* PartInfo, int* ndBuf, float L, float a,
float *dt, float* nfSback, Domain domain)
{
int dim = domain.get_dim();
// JET
srand(PartInfo[0]);
// JET
/*cout << "Ray class test area! Stand clear!" << endl;
Ray *ray;
int temp = 2;
ray = new Ray[temp];
int X, Y;
(ray+1)->set_pix(123456,8);
(ray+1)->get_pix(&X, &Y);
cout << "size = " << X << "\tsize = " << Y << endl;
cout << "size = " << sizeof(ray) << "\tsize = " << sizeof(ray[0]) << endl;
(ray+1)->tau[0]+=1;
cout << "tau = " << (ray+1)->tau[0] << endl;
cout << "Ray class test area! Stand clear!" << endl;*/
/*int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++)
{
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}*/
hipEvent_t timeA, timeB, timeC, timeD;
CUDA_CHECK( hipEventCreate(&timeA) );
CUDA_CHECK( hipEventCreate(&timeB) );
CUDA_CHECK( hipEventCreate(&timeC) );
CUDA_CHECK( hipEventCreate(&timeD) );
//size_t free0, free1, free2, total;
//CUDA_CHECK( hipMemGetInfo(&free0, &total) );
//CUDA_CHECK( hipMemGetInfo(&free1, &total) );
// Allocate memory and copy arrays to the device
CUDA_CHECK( hipDeviceSynchronize() );
CUDA_CHECK( hipEventRecord( timeA, 0 ) );
size_t sizeR = sizeof(RayBuf[0]);
int* NumBuf_dev;
CUDA_CHECK( hipMalloc((void **)&NumBuf_dev, 8*sizeof(int)) );
Ray* RayBuf_dev;
CUDA_CHECK( hipMalloc((void **)&RayBuf_dev, 8*NUM_BUF*sizeR) );
float size3 = FREQ_BIN_NUM*sizeof(float);
float* nfSback_dev;
CUDA_CHECK( hipMalloc((void **)&nfSback_dev, size3));
CUDA_CHECK( hipMemcpy(nfSback_dev, nfSback, size3, hipMemcpyHostToDevice) );
CUDA_CHECK( hipEventRecord( timeB, 0 ) );
// Ray data on the GPU host
int nRays0, nRays;
Ray *RayDat;
// Pointers for the ray data
Ray *RayDat0_dev;
Ray *RayDat_dev;
// Tracing rays from the buffer
if(PartInfo[3] == 1)
{
nRays0 = 0;
for(int i=0; i<8; i++)
{
nRays0 += ndBuf[i];
}
nRays = nRays0;
RayDat = new Ray[nRays];
int d_dat = 0;
int d_buf = 0;
for(int i=0; i<8; i++)
{
memcpy( RayDat + d_dat,
RayBuf + d_buf,
ndBuf[i]*sizeR);
// dev rays are listed together
d_dat += ndBuf[i];
// host rays are spaced by NUM_BUF
d_buf += NUM_BUF;
}
CUDA_CHECK( hipMalloc((void **)&RayDat0_dev, nRays0*sizeR) );
CUDA_CHECK( hipMemcpy( RayDat0_dev,
RayDat,
nRays0*sizeR,
hipMemcpyHostToDevice) );
/*int m = 0;
for(int j=0; j<3; j++)
{
for(int i=0; i<ndBuf[j]; i++)
{
int n = j*NUM_BUF + i;
printf("%d\t%d\t%f\n", domain.get_id(), RayDat[m].get_dom(), RayDat[m].R);
m++;
}
}
return;*/
//printf("Buf Domain %d has %d rays.\n", domain.get_id(), nRays);
}
// Reset buffer counts
memset(ndBuf, 0, 8*sizeof(int));
CUDA_CHECK( hipMemcpy( NumBuf_dev, ndBuf,
8*sizeof(int), hipMemcpyHostToDevice) );
if(PartInfo[3] == 0) // Starting rays:
{
// Produce the initial ray arrays:
int ord = 6;
int Nside = pow(2, ord);
int nPix = 12*Nside*Nside;
int partNumAdd = MIN( PartInfo[1], 1);
/*int partNumAdd;
if(10 > partNumAdd)
partNumAdd = 10;
else
partNumAdd = PartInfo[1];*/
nRays0 = partNumAdd*nPix;
nRays = nRays0;
RayDat = new Ray[nRays];
// After initializing a set of rays, we advance along the particle array
//printf("A %d\t%d\t%d\t%d\n", domain.get_id(), PartInfo[1], PartInfo[2], partNumAdd);
// Sources are located on the device. We need them on the host to initialize.
source * source_host = new source[PartInfo[0]];
CUDA_CHECK( hipMemcpy( source_host, source_dev, PartInfo[0]*sizeof(source), hipMemcpyDeviceToHost) );
int index = 0;
for(int i=0; i<partNumAdd; i++)
{
int jetOn = 0;
int jetOnNum = Nside*Nside;
int jetOnPix = rand() % (nPix/jetOnNum);
if(jetOn == 0)
{
for(int j=0; j<nPix; j++)
{
//int index = i*nPix + j;
RayDat[index].set_dom(domain.get_id());
RayDat[index].set_part(PartInfo[2]);
RayDat[index].set_pix(j, ord);
float position[3], direction[3]={0};
position[0] = source_host[PartInfo[2]].x;
position[1] = source_host[PartInfo[2]].y;
position[2] = source_host[PartInfo[2]].z;
RayDat[index].set_position(position, 0.0, direction);
index++;
}
}
else
{
for(int j=0; j<jetOnNum; j++)
{
int pix = jetOnPix*jetOnNum + j;
RayDat[index].set_dom(domain.get_id());
RayDat[index].set_part(PartInfo[2]);
RayDat[index].set_pix(pix, ord);
float position[3], direction[3]={0};
position[0] = source_host[PartInfo[2]].x;
position[1] = source_host[PartInfo[2]].y;
position[2] = source_host[PartInfo[2]].z;
RayDat[index].set_position(position, 0.0, direction);
index++;
}
}
PartInfo[1] -= 1;
PartInfo[2] += 1;
}
delete[] source_host;
//printf("B %d\t%d\t%d\n", PartInfo[1], PartInfo[2], partNumAdd);
CUDA_CHECK( hipMalloc((void **)&RayDat0_dev, nRays0*sizeR) );
CUDA_CHECK( hipMemcpy( RayDat0_dev,
RayDat,
nRays0*sizeR,
hipMemcpyHostToDevice) );
}
delete[] RayDat;
RayDat = 0;
// Number of active rays:
int *nRays_dev;
CUDA_CHECK( hipMalloc((void **)&nRays_dev, sizeof(int)) );
int maxloop = 0;
// Kernel loop
for(;nRays0>0;)
{
//printf("Domain: %d\tnRays0 = %d\n", domain.get_id(), nRays0);
// Set CUDA kernel variables
dim3 threadsPB(16, 16, 1);
// int gridx = 65535;
// int gridy = (nInit/256)/65535 + 1;
int grid = (int) sqrt(nRays0/256) + 1;
dim3 blocksPG(grid, grid, 1);
CUDA_CHECK( hipMemcpy(nRays_dev, &nRays0, sizeof(int), hipMemcpyHostToDevice) );
//CUDA_CHECK( hipMemGetInfo(&free2, &total) );
//if(free2 < free1)
// free1 = free2;
// Execute kernel
CUDA_CHECK( hipGetLastError() );
hipLaunchKernelGGL(( rayTraceKernel), dim3(blocksPG),dim3(threadsPB), 0, 0, DensArray_dev, x_NArray_dev,
source_dev, FluxArray_dev, dEArray_dev,
RayDat0_dev, nRays_dev, nRays0,
L, a, nfSback_dev, domain);
CUDA_CHECK( hipPeekAtLastError() );
CUDA_CHECK( hipDeviceSynchronize() );
// Get the number of continuing arrays
CUDA_CHECK( hipMemcpy(&nRays, nRays_dev, sizeof(int), hipMemcpyDeviceToHost) );
CUDA_CHECK( hipMemset(nRays_dev, 0, sizeof(int)) );
// printf("-> %d\t%f\n", nSplit, temp);
// Create the new RayDat array
//printf("Node %d nRays0 = %d nRays = %d\n", domain.get_id(), nRays0, nRays);
nRays *= 4;
CUDA_CHECK( hipMalloc((void **)&RayDat_dev, nRays*sizeR) );
// Split rays into new array
CUDA_CHECK( hipDeviceSynchronize() );
CUDA_CHECK( hipPeekAtLastError() );
//printf("%d\t%d\t%d\t%d\n", domain.get_id(), ndBuf[0], ndBuf[1], ndBuf[2]);
hipLaunchKernelGGL(( raySplitKernel), dim3(blocksPG),dim3(threadsPB), 0, 0, RayDat0_dev, RayDat_dev,
nRays_dev, nRays0,
RayBuf_dev, NumBuf_dev,
source_dev, domain);
CUDA_CHECK( hipPeekAtLastError() );
CUDA_CHECK( hipDeviceSynchronize() );
// Get the number of continuing arrays (post-split)
CUDA_CHECK( hipMemcpy(&nRays, nRays_dev, sizeof(int), hipMemcpyDeviceToHost) );
CUDA_CHECK( hipMemset(nRays_dev, 0, sizeof(int)) );
// Free old array
CUDA_CHECK( hipFree(RayDat0_dev) );
// Handle pointer referencing
RayDat0_dev = RayDat_dev;
RayDat_dev = 0;
// Reset counter
nRays0 = nRays;
maxloop++;
//printf("%d\t%d\t%d\t%d\n", domain.get_id(), PartInfo[0], PartInfo[1], PartInfo[2]);
}
// Free unecessary array(s)
CUDA_CHECK( hipDeviceSynchronize() );
CUDA_CHECK( hipFree(nRays_dev) );
CUDA_CHECK( hipFree(RayDat0_dev) );
// CUDA_CHECK( hipFree(RayDat_dev) ); //CHECKXXX
CUDA_CHECK( hipEventRecord( timeC, 0 ) );
/*float f0 = (uint)free0/1048576.0;
float f1 = (uint)free1/1048576.0;
float t1 = (uint)total/1048576.0;
printf("Memory:\tStart: %f\tMax: %f\tTotal:%f\n", f0, f1, t1);*/
CUDA_CHECK( hipMemcpy( ndBuf, NumBuf_dev,
8*sizeof(int), hipMemcpyDeviceToHost) );
CUDA_CHECK( hipMemcpy( RayBuf, RayBuf_dev,
8*NUM_BUF*sizeR, hipMemcpyDeviceToHost) );
// Unified Memory
/*memcpy(FluxArray, FluxArray_dev, size0*SPECIES);
memcpy(dEArray, dEArray_dev, size0);*/
CUDA_CHECK( hipMemcpy(nfSback, nfSback_dev, size3, hipMemcpyDeviceToHost) );
CUDA_CHECK( hipFree(NumBuf_dev) );
CUDA_CHECK( hipFree(RayBuf_dev) );
CUDA_CHECK( hipFree(nfSback_dev) );
CUDA_CHECK( hipDeviceSynchronize() );
CUDA_CHECK( hipEventRecord( timeD, 0 ) );
float time0, time1, time2;
CUDA_CHECK( hipDeviceSynchronize() );
CUDA_CHECK( hipEventElapsedTime( &time0, timeA, timeB ) );
CUDA_CHECK( hipDeviceSynchronize() );
CUDA_CHECK( hipEventElapsedTime( &time1, timeB, timeC ) );
CUDA_CHECK( hipDeviceSynchronize() );
CUDA_CHECK( hipEventElapsedTime( &time2, timeC, timeD ) );
CUDA_CHECK( hipEventDestroy(timeA) );
CUDA_CHECK( hipEventDestroy(timeB) );
CUDA_CHECK( hipEventDestroy(timeC) );
CUDA_CHECK( hipEventDestroy(timeD) );
//printf("Ray Trace (node %d):\t%f\t%f\t%f\n", domain.get_id(), time0/1000, time1/1000, time2/1000);
dt[0] = time1/1000;
}
// Integrate neutral fractions differential equations
// DensArray = Array of densities in cm^-3
// x_NArray = Array of neutral fractions
// FluxArray = Array of total ionizations, per baryon per second /1Mpc
// EArray = Array of energies per baryon
// background = Ionizing background for each channel
// dt = Time step
// fErr = Error number
// a = Scale factor
void ion( float* DensArray_dev, float* x_NArray_dev, float* FluxArray_dev,
float* EArray, float* dEArray_dev, const float* background, float* dt, float* fErr, float a, Domain domain)
{
*fErr = 0.0;
//long elements = DIMX*DIMY*DIMZ;
int dim = domain.get_dim();
long elements = dim*dim*dim;
dim3 numThreads1(16,16,1);
dim3 numBlocks1;
/*numBlocks1.x = DIMX/16;
numBlocks1.y = DIMY/16;
numBlocks1.z = DIMZ;*/
numBlocks1.x = dim/16;
numBlocks1.y = dim/16;
numBlocks1.z = dim;
size_t size2 = elements*sizeof(float);
hipExtent size = make_hipExtent(DIMX, DIMY, DIMZ);
hipEvent_t timeA, timeB, timeC, timeD;
CUDA_CHECK( hipEventCreate(&timeA) );
CUDA_CHECK( hipEventCreate(&timeB) );
CUDA_CHECK( hipEventCreate(&timeC) );
CUDA_CHECK( hipEventCreate(&timeD) );
CUDA_CHECK( hipEventRecord( timeA, 0 ) );
// 2. Copy data to GPU on each node
float* EArray_dev;
float* back_dev;
CUDA_CHECK( hipMalloc((void **)&EArray_dev, size2) );
CUDA_CHECK( hipMalloc((void **)&back_dev, 2*SPECIES*sizeof(float)) );
CUDA_CHECK( hipMemcpy(EArray_dev, EArray, size2, hipMemcpyHostToDevice) );
CUDA_CHECK( hipMemcpy(back_dev, background, 2*SPECIES*sizeof(float), hipMemcpyHostToDevice) );
float* err_dev;
CUDA_CHECK( hipMalloc((void **)&err_dev, sizeof(float)) );
CUDA_CHECK( hipMemcpy(err_dev, fErr, sizeof(float), hipMemcpyHostToDevice) );
CUDA_CHECK( hipEventRecord( timeB, 0 ) );
CUDA_CHECK( hipGetLastError() );
hipLaunchKernelGGL(( ionization), dim3(numBlocks1), dim3(numThreads1), 0, 0, *dt, err_dev, DensArray_dev, x_NArray_dev, FluxArray_dev, EArray_dev, dEArray_dev, back_dev, dim, a);
CUDA_CHECK( hipPeekAtLastError() );
CUDA_CHECK( hipDeviceSynchronize() );
CUDA_CHECK( hipEventRecord( timeC, 0 ) );
CUDA_CHECK( hipDeviceSynchronize() );
CUDA_CHECK( hipMemcpy(fErr, err_dev, sizeof(float), hipMemcpyDeviceToHost) );
CUDA_CHECK( hipMemcpy(EArray, EArray_dev, size2, hipMemcpyDeviceToHost) );
CUDA_CHECK( hipEventRecord( timeD, 0 ) );
CUDA_CHECK( hipFree(err_dev) );
CUDA_CHECK( hipFree(EArray_dev) );
CUDA_CHECK( hipFree(back_dev) );
float time0, time1, time2;
CUDA_CHECK( hipEventElapsedTime( &time0, timeA, timeB ) );
CUDA_CHECK( hipEventElapsedTime( &time1, timeB, timeC ) );
CUDA_CHECK( hipEventElapsedTime( &time2, timeC, timeD ) );
CUDA_CHECK( hipEventDestroy(timeA) );
CUDA_CHECK( hipEventDestroy(timeB) );
CUDA_CHECK( hipEventDestroy(timeC) );
CUDA_CHECK( hipEventDestroy(timeD) );
//printf("Ionization (node %d):\t%f\t%f\t%f\n", domain.get_id(), time0/1000, time1/1000, time2/1000);
}
void dt_H( float* rate, float* DensArray_dev, float* x_NArray_dev,
float* FluxArray_dev, float* EArray,
const float* background, const float L, const float a, Domain domain)
{
//long elements = DIMX*DIMY*DIMZ;
int dim = domain.get_dim();
long elements = dim*dim*dim;
dim3 numThreads1(16,16,1);
dim3 numBlocks1;
/*numBlocks1.x = DIMX/16;
numBlocks1.y = DIMY/16;
numBlocks1.z = DIMZ;*/
numBlocks1.x = dim/16;
numBlocks1.y = dim/16;
numBlocks1.z = dim;
size_t size2 = elements*sizeof(float);
hipExtent size = make_hipExtent(DIMX, DIMY, DIMZ);
float* EArray_dev;
float* back_dev;
// FILTER
float* dtFilter = new float[elements];
float* dtFilter_dev;
CUDA_CHECK( hipMalloc((void **)&dtFilter_dev, size2) );
CUDA_CHECK( hipMemcpy(dtFilter_dev, dtFilter, size2, hipMemcpyHostToDevice) );
CUDA_CHECK( hipMalloc((void **)&EArray_dev, size2) );
CUDA_CHECK( hipMalloc((void **)&back_dev, 2*SPECIES*sizeof(float)) );
CUDA_CHECK( hipMemcpy(EArray_dev, EArray, size2, hipMemcpyHostToDevice) );
CUDA_CHECK( hipMemcpy(back_dev, background, 2*SPECIES*sizeof(float), hipMemcpyHostToDevice) );
//rate[0] = 0.0;
float* rate_dev;
CUDA_CHECK( hipMalloc((void **)&rate_dev, 2*sizeof(float)) );
CUDA_CHECK( hipMemcpy(rate_dev, rate, 2*sizeof(float), hipMemcpyHostToDevice) );
CUDA_CHECK( hipGetLastError() );
hipLaunchKernelGGL(( timestep), dim3(numBlocks1), dim3(numThreads1), 0, 0, rate_dev, dtFilter_dev, DensArray_dev, x_NArray_dev,
FluxArray_dev, EArray_dev, back_dev, dim, L, a);
CUDA_CHECK( hipPeekAtLastError() );
CUDA_CHECK( hipDeviceSynchronize() );
CUDA_CHECK( hipDeviceSynchronize() );
CUDA_CHECK( hipMemcpy(rate, rate_dev, 2*sizeof(float), hipMemcpyDeviceToHost) );
CUDA_CHECK( hipFree(EArray_dev) );
CUDA_CHECK( hipFree(back_dev) );
CUDA_CHECK( hipFree(rate_dev) );
// FILTER
CUDA_CHECK( hipMemcpy(dtFilter, dtFilter_dev, size2, hipMemcpyDeviceToHost) );
/*float max = 1.e-20;
float buffer[DIMX];
for(int i=0; i<DIMX; i++)
{
for(int j=0; j<DIMX; j++)
{
for(int k=1; k<DIMX-1; k++)
{
//cout << i << "\t" << j << "\t" << k << endl;
int ind = i + DIMX*j + DIMX*DIMY*k;
int dn = DIMX*DIMY;
buffer[k] = 0.5*dtFilter[ind];
buffer[k] += 0.25*dtFilter[ind+dn];
buffer[k] += 0.25*dtFilter[ind-dn];
}
for(int k=1; k<DIMX-1; k++)
{
int ind = i + DIMX*j + DIMX*DIMY*k;
dtFilter[ind] = buffer[k];
}
}
}
for(int i=0; i<DIMX; i++)
{
for(int k=0; k<DIMX; k++)
{
for(int j=1; j<DIMX-1; j++)
{
int ind = i + DIMX*j + DIMX*DIMY*k;
int dn = DIMX;
buffer[j] = 0.5*dtFilter[ind];
buffer[j] += 0.25*dtFilter[ind+dn];
buffer[j] += 0.25*dtFilter[ind-dn];
}
for(int j=1; j<DIMX-1; j++)
{
int ind = i + DIMX*j + DIMX*DIMY*k;
dtFilter[ind] = buffer[j];
}
}
}
for(int j=1; j<DIMX-1; j++)
{
for(int k=1; k<DIMX-1; k++)
{
for(int i=1; i<DIMX-1; i++)
{
int ind = i + DIMX*j + DIMX*DIMY*k;
int dn = 1;
buffer[i] = 0.5*dtFilter[ind];
buffer[i] += 0.25*dtFilter[ind+dn];
buffer[i] += 0.25*dtFilter[ind-dn];
}
for(int i=1; i<DIMX-1; i++)
{
int ind = i + DIMX*j + DIMX*DIMY*k;
dtFilter[ind] = buffer[i];
max = MAX(max, dtFilter[ind]);
}
}
}
cout << "Unfiltered = " << 0.05/3.154e13/rate[0] << endl;
cout << "Filtered = " << 0.05/3.154e13/max << endl;
rate[0] = max;*/
CUDA_CHECK( hipFree(dtFilter_dev) );
delete[] dtFilter;
}
| 93ca640f47a25bda4d75b915ed8962d64d3664c7.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <iostream>
#include <fstream>
using namespace std;
#include "./inc/domain.h"
#include "./inc/ray.h"
#include "arc.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include "./arc_kernels.cu"
// GLOBAL VARIABLES FOR DISPLAY
int numThreads1, numThreads2;
int elements;
// Error handling macro
/*#define CUDA_CHECK(call) \
if((call) != cudaSuccess) { \
cudaError_t err = cudaGetLastError(); \
cerr << "CUDA error calling \""#call"\", code is " << err << endl; \
exit(-1);}*/
#define CUDA_CHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// Perform calculations to produce Flux arrays
// DensArray = Array of densities in cm^-3
// x_NArray = Array of neutral fractions
// particles = Array of source objects
// FluxArray = Array of total ionizations, per baryon per second /1Mpc
// EArray = Array of energies per baryon
// numParts = Number of source objects
// L, a = Length of the side of the box and scale factor
void rad( float* DensArray_dev, float* x_NArray_dev, const source* source_dev,
float* FluxArray_dev, float* dEArray_dev, Ray* RayBuf,
int* PartInfo, int* ndBuf, float L, float a,
float *dt, float* nfSback, Domain domain)
{
int dim = domain.get_dim();
// JET
srand(PartInfo[0]);
// JET
/*cout << "Ray class test area! Stand clear!" << endl;
Ray *ray;
int temp = 2;
ray = new Ray[temp];
int X, Y;
(ray+1)->set_pix(123456,8);
(ray+1)->get_pix(&X, &Y);
cout << "size = " << X << "\tsize = " << Y << endl;
cout << "size = " << sizeof(ray) << "\tsize = " << sizeof(ray[0]) << endl;
(ray+1)->tau[0]+=1;
cout << "tau = " << (ray+1)->tau[0] << endl;
cout << "Ray class test area! Stand clear!" << endl;*/
/*int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++)
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}*/
cudaEvent_t timeA, timeB, timeC, timeD;
CUDA_CHECK( cudaEventCreate(&timeA) );
CUDA_CHECK( cudaEventCreate(&timeB) );
CUDA_CHECK( cudaEventCreate(&timeC) );
CUDA_CHECK( cudaEventCreate(&timeD) );
//size_t free0, free1, free2, total;
//CUDA_CHECK( cudaMemGetInfo(&free0, &total) );
//CUDA_CHECK( cudaMemGetInfo(&free1, &total) );
// Allocate memory and copy arrays to the device
CUDA_CHECK( cudaThreadSynchronize() );
CUDA_CHECK( cudaEventRecord( timeA, 0 ) );
size_t sizeR = sizeof(RayBuf[0]);
int* NumBuf_dev;
CUDA_CHECK( cudaMalloc((void **)&NumBuf_dev, 8*sizeof(int)) );
Ray* RayBuf_dev;
CUDA_CHECK( cudaMalloc((void **)&RayBuf_dev, 8*NUM_BUF*sizeR) );
float size3 = FREQ_BIN_NUM*sizeof(float);
float* nfSback_dev;
CUDA_CHECK( cudaMalloc((void **)&nfSback_dev, size3));
CUDA_CHECK( cudaMemcpy(nfSback_dev, nfSback, size3, cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaEventRecord( timeB, 0 ) );
// Ray data on the GPU host
int nRays0, nRays;
Ray *RayDat;
// Pointers for the ray data
Ray *RayDat0_dev;
Ray *RayDat_dev;
// Tracing rays from the buffer
if(PartInfo[3] == 1)
{
nRays0 = 0;
for(int i=0; i<8; i++)
{
nRays0 += ndBuf[i];
}
nRays = nRays0;
RayDat = new Ray[nRays];
int d_dat = 0;
int d_buf = 0;
for(int i=0; i<8; i++)
{
memcpy( RayDat + d_dat,
RayBuf + d_buf,
ndBuf[i]*sizeR);
// dev rays are listed together
d_dat += ndBuf[i];
// host rays are spaced by NUM_BUF
d_buf += NUM_BUF;
}
CUDA_CHECK( cudaMalloc((void **)&RayDat0_dev, nRays0*sizeR) );
CUDA_CHECK( cudaMemcpy( RayDat0_dev,
RayDat,
nRays0*sizeR,
cudaMemcpyHostToDevice) );
/*int m = 0;
for(int j=0; j<3; j++)
{
for(int i=0; i<ndBuf[j]; i++)
{
int n = j*NUM_BUF + i;
printf("%d\t%d\t%f\n", domain.get_id(), RayDat[m].get_dom(), RayDat[m].R);
m++;
}
}
return;*/
//printf("Buf Domain %d has %d rays.\n", domain.get_id(), nRays);
}
// Reset buffer counts
memset(ndBuf, 0, 8*sizeof(int));
CUDA_CHECK( cudaMemcpy( NumBuf_dev, ndBuf,
8*sizeof(int), cudaMemcpyHostToDevice) );
if(PartInfo[3] == 0) // Starting rays:
{
// Produce the initial ray arrays:
int ord = 6;
int Nside = pow(2, ord);
int nPix = 12*Nside*Nside;
int partNumAdd = MIN( PartInfo[1], 1);
/*int partNumAdd;
if(10 > partNumAdd)
partNumAdd = 10;
else
partNumAdd = PartInfo[1];*/
nRays0 = partNumAdd*nPix;
nRays = nRays0;
RayDat = new Ray[nRays];
// After initializing a set of rays, we advance along the particle array
//printf("A %d\t%d\t%d\t%d\n", domain.get_id(), PartInfo[1], PartInfo[2], partNumAdd);
// Sources are located on the device. We need them on the host to initialize.
source * source_host = new source[PartInfo[0]];
CUDA_CHECK( cudaMemcpy( source_host, source_dev, PartInfo[0]*sizeof(source), cudaMemcpyDeviceToHost) );
int index = 0;
for(int i=0; i<partNumAdd; i++)
{
int jetOn = 0;
int jetOnNum = Nside*Nside;
int jetOnPix = rand() % (nPix/jetOnNum);
if(jetOn == 0)
{
for(int j=0; j<nPix; j++)
{
//int index = i*nPix + j;
RayDat[index].set_dom(domain.get_id());
RayDat[index].set_part(PartInfo[2]);
RayDat[index].set_pix(j, ord);
float position[3], direction[3]={0};
position[0] = source_host[PartInfo[2]].x;
position[1] = source_host[PartInfo[2]].y;
position[2] = source_host[PartInfo[2]].z;
RayDat[index].set_position(position, 0.0, direction);
index++;
}
}
else
{
for(int j=0; j<jetOnNum; j++)
{
int pix = jetOnPix*jetOnNum + j;
RayDat[index].set_dom(domain.get_id());
RayDat[index].set_part(PartInfo[2]);
RayDat[index].set_pix(pix, ord);
float position[3], direction[3]={0};
position[0] = source_host[PartInfo[2]].x;
position[1] = source_host[PartInfo[2]].y;
position[2] = source_host[PartInfo[2]].z;
RayDat[index].set_position(position, 0.0, direction);
index++;
}
}
PartInfo[1] -= 1;
PartInfo[2] += 1;
}
delete[] source_host;
//printf("B %d\t%d\t%d\n", PartInfo[1], PartInfo[2], partNumAdd);
CUDA_CHECK( cudaMalloc((void **)&RayDat0_dev, nRays0*sizeR) );
CUDA_CHECK( cudaMemcpy( RayDat0_dev,
RayDat,
nRays0*sizeR,
cudaMemcpyHostToDevice) );
}
delete[] RayDat;
RayDat = 0;
// Number of active rays:
int *nRays_dev;
CUDA_CHECK( cudaMalloc((void **)&nRays_dev, sizeof(int)) );
int maxloop = 0;
// Kernel loop
for(;nRays0>0;)
{
//printf("Domain: %d\tnRays0 = %d\n", domain.get_id(), nRays0);
// Set CUDA kernel variables
dim3 threadsPB(16, 16, 1);
// int gridx = 65535;
// int gridy = (nInit/256)/65535 + 1;
int grid = (int) sqrt(nRays0/256) + 1;
dim3 blocksPG(grid, grid, 1);
CUDA_CHECK( cudaMemcpy(nRays_dev, &nRays0, sizeof(int), cudaMemcpyHostToDevice) );
//CUDA_CHECK( cudaMemGetInfo(&free2, &total) );
//if(free2 < free1)
// free1 = free2;
// Execute kernel
CUDA_CHECK( cudaGetLastError() );
rayTraceKernel<<<blocksPG,threadsPB>>>( DensArray_dev, x_NArray_dev,
source_dev, FluxArray_dev, dEArray_dev,
RayDat0_dev, nRays_dev, nRays0,
L, a, nfSback_dev, domain);
CUDA_CHECK( cudaPeekAtLastError() );
CUDA_CHECK( cudaThreadSynchronize() );
// Get the number of continuing arrays
CUDA_CHECK( cudaMemcpy(&nRays, nRays_dev, sizeof(int), cudaMemcpyDeviceToHost) );
CUDA_CHECK( cudaMemset(nRays_dev, 0, sizeof(int)) );
// printf("-> %d\t%f\n", nSplit, temp);
// Create the new RayDat array
//printf("Node %d nRays0 = %d nRays = %d\n", domain.get_id(), nRays0, nRays);
nRays *= 4;
CUDA_CHECK( cudaMalloc((void **)&RayDat_dev, nRays*sizeR) );
// Split rays into new array
CUDA_CHECK( cudaThreadSynchronize() );
CUDA_CHECK( cudaPeekAtLastError() );
//printf("%d\t%d\t%d\t%d\n", domain.get_id(), ndBuf[0], ndBuf[1], ndBuf[2]);
raySplitKernel<<<blocksPG,threadsPB>>>( RayDat0_dev, RayDat_dev,
nRays_dev, nRays0,
RayBuf_dev, NumBuf_dev,
source_dev, domain);
CUDA_CHECK( cudaPeekAtLastError() );
CUDA_CHECK( cudaThreadSynchronize() );
// Get the number of continuing arrays (post-split)
CUDA_CHECK( cudaMemcpy(&nRays, nRays_dev, sizeof(int), cudaMemcpyDeviceToHost) );
CUDA_CHECK( cudaMemset(nRays_dev, 0, sizeof(int)) );
// Free old array
CUDA_CHECK( cudaFree(RayDat0_dev) );
// Handle pointer referencing
RayDat0_dev = RayDat_dev;
RayDat_dev = 0;
// Reset counter
nRays0 = nRays;
maxloop++;
//printf("%d\t%d\t%d\t%d\n", domain.get_id(), PartInfo[0], PartInfo[1], PartInfo[2]);
}
// Free unecessary array(s)
CUDA_CHECK( cudaThreadSynchronize() );
CUDA_CHECK( cudaFree(nRays_dev) );
CUDA_CHECK( cudaFree(RayDat0_dev) );
// CUDA_CHECK( cudaFree(RayDat_dev) ); //CHECKXXX
CUDA_CHECK( cudaEventRecord( timeC, 0 ) );
/*float f0 = (uint)free0/1048576.0;
float f1 = (uint)free1/1048576.0;
float t1 = (uint)total/1048576.0;
printf("Memory:\tStart: %f\tMax: %f\tTotal:%f\n", f0, f1, t1);*/
CUDA_CHECK( cudaMemcpy( ndBuf, NumBuf_dev,
8*sizeof(int), cudaMemcpyDeviceToHost) );
CUDA_CHECK( cudaMemcpy( RayBuf, RayBuf_dev,
8*NUM_BUF*sizeR, cudaMemcpyDeviceToHost) );
// Unified Memory
/*memcpy(FluxArray, FluxArray_dev, size0*SPECIES);
memcpy(dEArray, dEArray_dev, size0);*/
CUDA_CHECK( cudaMemcpy(nfSback, nfSback_dev, size3, cudaMemcpyDeviceToHost) );
CUDA_CHECK( cudaFree(NumBuf_dev) );
CUDA_CHECK( cudaFree(RayBuf_dev) );
CUDA_CHECK( cudaFree(nfSback_dev) );
CUDA_CHECK( cudaThreadSynchronize() );
CUDA_CHECK( cudaEventRecord( timeD, 0 ) );
float time0, time1, time2;
CUDA_CHECK( cudaThreadSynchronize() );
CUDA_CHECK( cudaEventElapsedTime( &time0, timeA, timeB ) );
CUDA_CHECK( cudaThreadSynchronize() );
CUDA_CHECK( cudaEventElapsedTime( &time1, timeB, timeC ) );
CUDA_CHECK( cudaThreadSynchronize() );
CUDA_CHECK( cudaEventElapsedTime( &time2, timeC, timeD ) );
CUDA_CHECK( cudaEventDestroy(timeA) );
CUDA_CHECK( cudaEventDestroy(timeB) );
CUDA_CHECK( cudaEventDestroy(timeC) );
CUDA_CHECK( cudaEventDestroy(timeD) );
//printf("Ray Trace (node %d):\t%f\t%f\t%f\n", domain.get_id(), time0/1000, time1/1000, time2/1000);
dt[0] = time1/1000;
}
// Integrate neutral fractions differential equations
// DensArray = Array of densities in cm^-3
// x_NArray = Array of neutral fractions
// FluxArray = Array of total ionizations, per baryon per second /1Mpc
// EArray = Array of energies per baryon
// background = Ionizing background for each channel
// dt = Time step
// fErr = Error number
// a = Scale factor
void ion( float* DensArray_dev, float* x_NArray_dev, float* FluxArray_dev,
float* EArray, float* dEArray_dev, const float* background, float* dt, float* fErr, float a, Domain domain)
{
*fErr = 0.0;
//long elements = DIMX*DIMY*DIMZ;
int dim = domain.get_dim();
long elements = dim*dim*dim;
dim3 numThreads1(16,16,1);
dim3 numBlocks1;
/*numBlocks1.x = DIMX/16;
numBlocks1.y = DIMY/16;
numBlocks1.z = DIMZ;*/
numBlocks1.x = dim/16;
numBlocks1.y = dim/16;
numBlocks1.z = dim;
size_t size2 = elements*sizeof(float);
cudaExtent size = make_cudaExtent(DIMX, DIMY, DIMZ);
cudaEvent_t timeA, timeB, timeC, timeD;
CUDA_CHECK( cudaEventCreate(&timeA) );
CUDA_CHECK( cudaEventCreate(&timeB) );
CUDA_CHECK( cudaEventCreate(&timeC) );
CUDA_CHECK( cudaEventCreate(&timeD) );
CUDA_CHECK( cudaEventRecord( timeA, 0 ) );
// 2. Copy data to GPU on each node
float* EArray_dev;
float* back_dev;
CUDA_CHECK( cudaMalloc((void **)&EArray_dev, size2) );
CUDA_CHECK( cudaMalloc((void **)&back_dev, 2*SPECIES*sizeof(float)) );
CUDA_CHECK( cudaMemcpy(EArray_dev, EArray, size2, cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMemcpy(back_dev, background, 2*SPECIES*sizeof(float), cudaMemcpyHostToDevice) );
float* err_dev;
CUDA_CHECK( cudaMalloc((void **)&err_dev, sizeof(float)) );
CUDA_CHECK( cudaMemcpy(err_dev, fErr, sizeof(float), cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaEventRecord( timeB, 0 ) );
CUDA_CHECK( cudaGetLastError() );
ionization<<<numBlocks1, numThreads1>>>(*dt, err_dev, DensArray_dev, x_NArray_dev, FluxArray_dev, EArray_dev, dEArray_dev, back_dev, dim, a);
CUDA_CHECK( cudaPeekAtLastError() );
CUDA_CHECK( cudaThreadSynchronize() );
CUDA_CHECK( cudaEventRecord( timeC, 0 ) );
CUDA_CHECK( cudaThreadSynchronize() );
CUDA_CHECK( cudaMemcpy(fErr, err_dev, sizeof(float), cudaMemcpyDeviceToHost) );
CUDA_CHECK( cudaMemcpy(EArray, EArray_dev, size2, cudaMemcpyDeviceToHost) );
CUDA_CHECK( cudaEventRecord( timeD, 0 ) );
CUDA_CHECK( cudaFree(err_dev) );
CUDA_CHECK( cudaFree(EArray_dev) );
CUDA_CHECK( cudaFree(back_dev) );
float time0, time1, time2;
CUDA_CHECK( cudaEventElapsedTime( &time0, timeA, timeB ) );
CUDA_CHECK( cudaEventElapsedTime( &time1, timeB, timeC ) );
CUDA_CHECK( cudaEventElapsedTime( &time2, timeC, timeD ) );
CUDA_CHECK( cudaEventDestroy(timeA) );
CUDA_CHECK( cudaEventDestroy(timeB) );
CUDA_CHECK( cudaEventDestroy(timeC) );
CUDA_CHECK( cudaEventDestroy(timeD) );
//printf("Ionization (node %d):\t%f\t%f\t%f\n", domain.get_id(), time0/1000, time1/1000, time2/1000);
}
void dt_H( float* rate, float* DensArray_dev, float* x_NArray_dev,
float* FluxArray_dev, float* EArray,
const float* background, const float L, const float a, Domain domain)
{
//long elements = DIMX*DIMY*DIMZ;
int dim = domain.get_dim();
long elements = dim*dim*dim;
dim3 numThreads1(16,16,1);
dim3 numBlocks1;
/*numBlocks1.x = DIMX/16;
numBlocks1.y = DIMY/16;
numBlocks1.z = DIMZ;*/
numBlocks1.x = dim/16;
numBlocks1.y = dim/16;
numBlocks1.z = dim;
size_t size2 = elements*sizeof(float);
cudaExtent size = make_cudaExtent(DIMX, DIMY, DIMZ);
float* EArray_dev;
float* back_dev;
// FILTER
float* dtFilter = new float[elements];
float* dtFilter_dev;
CUDA_CHECK( cudaMalloc((void **)&dtFilter_dev, size2) );
CUDA_CHECK( cudaMemcpy(dtFilter_dev, dtFilter, size2, cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMalloc((void **)&EArray_dev, size2) );
CUDA_CHECK( cudaMalloc((void **)&back_dev, 2*SPECIES*sizeof(float)) );
CUDA_CHECK( cudaMemcpy(EArray_dev, EArray, size2, cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMemcpy(back_dev, background, 2*SPECIES*sizeof(float), cudaMemcpyHostToDevice) );
//rate[0] = 0.0;
float* rate_dev;
CUDA_CHECK( cudaMalloc((void **)&rate_dev, 2*sizeof(float)) );
CUDA_CHECK( cudaMemcpy(rate_dev, rate, 2*sizeof(float), cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaGetLastError() );
timestep<<<numBlocks1, numThreads1>>>( rate_dev, dtFilter_dev, DensArray_dev, x_NArray_dev,
FluxArray_dev, EArray_dev, back_dev, dim, L, a);
CUDA_CHECK( cudaPeekAtLastError() );
CUDA_CHECK( cudaThreadSynchronize() );
CUDA_CHECK( cudaThreadSynchronize() );
CUDA_CHECK( cudaMemcpy(rate, rate_dev, 2*sizeof(float), cudaMemcpyDeviceToHost) );
CUDA_CHECK( cudaFree(EArray_dev) );
CUDA_CHECK( cudaFree(back_dev) );
CUDA_CHECK( cudaFree(rate_dev) );
// FILTER
CUDA_CHECK( cudaMemcpy(dtFilter, dtFilter_dev, size2, cudaMemcpyDeviceToHost) );
/*float max = 1.e-20;
float buffer[DIMX];
for(int i=0; i<DIMX; i++)
{
for(int j=0; j<DIMX; j++)
{
for(int k=1; k<DIMX-1; k++)
{
//cout << i << "\t" << j << "\t" << k << endl;
int ind = i + DIMX*j + DIMX*DIMY*k;
int dn = DIMX*DIMY;
buffer[k] = 0.5*dtFilter[ind];
buffer[k] += 0.25*dtFilter[ind+dn];
buffer[k] += 0.25*dtFilter[ind-dn];
}
for(int k=1; k<DIMX-1; k++)
{
int ind = i + DIMX*j + DIMX*DIMY*k;
dtFilter[ind] = buffer[k];
}
}
}
for(int i=0; i<DIMX; i++)
{
for(int k=0; k<DIMX; k++)
{
for(int j=1; j<DIMX-1; j++)
{
int ind = i + DIMX*j + DIMX*DIMY*k;
int dn = DIMX;
buffer[j] = 0.5*dtFilter[ind];
buffer[j] += 0.25*dtFilter[ind+dn];
buffer[j] += 0.25*dtFilter[ind-dn];
}
for(int j=1; j<DIMX-1; j++)
{
int ind = i + DIMX*j + DIMX*DIMY*k;
dtFilter[ind] = buffer[j];
}
}
}
for(int j=1; j<DIMX-1; j++)
{
for(int k=1; k<DIMX-1; k++)
{
for(int i=1; i<DIMX-1; i++)
{
int ind = i + DIMX*j + DIMX*DIMY*k;
int dn = 1;
buffer[i] = 0.5*dtFilter[ind];
buffer[i] += 0.25*dtFilter[ind+dn];
buffer[i] += 0.25*dtFilter[ind-dn];
}
for(int i=1; i<DIMX-1; i++)
{
int ind = i + DIMX*j + DIMX*DIMY*k;
dtFilter[ind] = buffer[i];
max = MAX(max, dtFilter[ind]);
}
}
}
cout << "Unfiltered = " << 0.05/3.154e13/rate[0] << endl;
cout << "Filtered = " << 0.05/3.154e13/max << endl;
rate[0] = max;*/
CUDA_CHECK( cudaFree(dtFilter_dev) );
delete[] dtFilter;
}
|
23bc7ada211f4299a12f8b920be8947e61c362a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <time.h>
#include <stdio.h>
#include <limits.h>
#include <stdlib.h>
void run_test();
void printArr(int *arr);
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
printf("GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__inline__ __device__
float warpReduceSum(double val) {
for (int offset = warpSize/2; offset > 0; offset /= 2)
val += __shfl_down(val, offset);
return val;
}
__inline__ __device__
float blockReduceSum(double val) {
static __shared__ double shared[32]; // Shared mem for 32 partial sums
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceSum(val); // Each warp performs partial reduction
if (lane==0) shared[wid]=val; // Write reduced value to shared memory
__syncthreads(); // Wait for all partial reductions
//read from shared memory only if that warp existed
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
if (wid==0) val = warpReduceSum(val); //Final reduce within first warp
return val;
}
__global__ void reduction(double *in, double *out, int N, int s1, int s2, int splane, int dim1, int dim2, int planeCount, int noEls)
{
double sum =0;
int cur_plane;
int area = dim1 * dim2;
int start = blockIdx.x * noEls * blockDim.x + threadIdx.x;
//int gridStride = splane * gridDim.x;
int gridStride = splane * gridDim.x;
//relative index and coordinates calculation
int target = 0;
target = (start%dim1) * s1;
target += ((start/ dim1) % dim2) * s2;
target += ((start/area)) * splane;
/*int tempDiv = area;
/*
for(int dimIter=0; dimIter<noDims; dimIter++)
{
if(dimIter != noDims-1)
{
dCoord = start / tempDiv % dimSizes[dimIter];
}
}
*/
int counter = 0;
int quarter = dim2 / noEls;
quarter = quarter *s2;
for(int i = target;
counter < planeCount/gridDim.x;
i += gridStride)
{
sum = 0;
//float sum =0;
//calculate the first target index
/*
target = (i%dim1) * s1;
target += ((i/ dim1) % dim2) * s2;
target += ((i/area)) * splane;
*/
//printf("Test: tid= %d target= %d target2= %d \n\n", i, target, target + (dim2/2 * s2));
for(int iter=0; iter < noEls; iter++)
{
sum += in[i + iter*quarter];
}
//__syncthreads();
/*
sum = in[gridStride*counter + target] + in[gridStride*counter + target + quarter] +
in[gridStride*counter + target + 2*quarter] + in[gridStride*counter+target+ 3*quarter];
*/
//sum += in[i] + in[i+blockDim.x];
sum = blockReduceSum(sum);
if(threadIdx.x == 0)
out[counter*gridDim.x + blockIdx.x] = sum;
counter++;
//sum += in[i] + in[i + blockDim.x];
}
}
void run_test(int noEls, int noOfBlocks, int r1, int r2, int rplane, int dimen1, int dimen2, int dimen3, int dimen4)
{
const int dim_len = 4;
//dimension sizes
int dims[dim_len] = {dimen1,dimen2,dimen3, dimen4};
//dimensions to reduce
int rdims[2] = {r1,r2}; //x and y
int strides[dim_len];
strides[0] = 1;
//total number of elements
int N = dims[0];
for(int i=1; i<dim_len; i++){
strides[i] = dims[i-1] * strides[i-1];
//update N
N *= dims[i];
}
printf("Number of elements: %d\n\n", N);
printArr(strides);
//Allocate memory for in and out in host and fill in
double *in, *out, *d_in, *d_out;
//int *d_strides, *d_rdims, *d_dims;
in = (double*)malloc(N*sizeof(double));
int planeCount = N/(dims[rdims[0]]*dims[rdims[1]]);//dims[rplane] * dims[3];//8192;//131072;
/*
printf("Dimz: %d\n\n", dims[rplane]);
printf("Dimt: %d\n\n", dims[3]);
printf("PlaneCount: %d\n\n", planeCount);
*/
out = (double*)malloc(planeCount*sizeof(double));
srand(time(NULL));
for(int i=0; i<N;i++)
{
if((i/128)%2048 == 2047)
{
in[i] = double(i)/1000; //(float)rand() / (float)RAND_MAX;//
}
else
{
in[i] = double(i)/1000;
}
}
/*
for(int i=0;i<N;i++)
{
printf("%.1f ", in[i]);
}
*/
printf("\n\n");
//Allocate memory for in and out on device
hipMalloc(&d_in, N*sizeof(double));
hipMalloc(&d_out, planeCount*sizeof(double));
//Event variables
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//Transfer host data to device
gpuErrchk(hipMemcpy(d_in, in, N*sizeof(double), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_out, out, planeCount*sizeof(double), hipMemcpyHostToDevice));
int s1 = strides[rdims[0]];
int s2 = strides[rdims[1]];
int splane = strides[rplane];
int dim1 = dims[rdims[0]];
int dim2 = dims[rdims[1]];
//int noElems = noEls;
//Record kernel
int noMeasures = 10; //number of measurements to take
hipEventRecord(start);
for(int mesIter=0; mesIter<noMeasures;mesIter++)
{
hipLaunchKernelGGL(( reduction), dim3(noOfBlocks),dim3(((dim1*dim2)/noEls)), 0, 0, d_in,d_out, N, s1, s2, splane, dim1, dim2, planeCount, noEls);
}
hipEventRecord(stop);
hipEventSynchronize(stop);
float ms = 0;
hipEventElapsedTime(&ms,start,stop);
gpuErrchk(hipMemcpy(out, d_out, planeCount*sizeof(double), hipMemcpyDeviceToHost));
ms = ms/noMeasures;
double total = (N)*8;
double ebw = total/(ms*1e6);
printf("EBW: %f\n", ebw);
//Check errors
printf("Strides: %d %d ", s1,s2);
printf("Plane Stride: %d\n ", splane);
printf("Plane Count: %d\n", planeCount);
for(int i=0;i<4;i++)
{
printf("%.3f %d ", out[i], i);
}
double sizeOut = sizeof(out);
double sizeD = sizeof(double);
int lengthOut = sizeOut/sizeD;
printf("Length: %d\n", lengthOut);
printf("%.3f", out[2047/*131071*/]);
for(int i=0;i<16;i++)
{
if(out[i] == 0)
{
printf("Incorrect : %d\n", i );
}
}
printf("\n");
hipFree(d_in);
hipFree(d_out);
}
int main(int argc, char *argv[])
{
{
int noEls = 8;
int noOfBlocks = 512;
int r1 = 0;
int r2 = 1;
int rplane = 2;
int dim1 = 32;
int dim2 = 32;
int dim3 = 16;
int dim4 = 4096;
if(argc > 1)
{
noEls = atoi(argv[1]);
noOfBlocks = atoi(argv[2]);
r1 = atoi(argv[3]);
r2 = atoi(argv[4]);
rplane = atoi(argv[5]);
dim1 = atoi(argv[6]);
dim2 = atoi(argv[7]);
dim3 = atoi(argv[8]);
dim4 = atoi(argv[9]);
}
run_test(noEls, noOfBlocks, r1, r2, rplane, dim1, dim2, dim3, dim4);
}
return 0;
}
void printArr(int *arr)
{
int i;
printf("Stride values in order: ");
for(i=0;i<=sizeof(arr)/sizeof(int);i++)
{
printf("%d ", arr[i]);
}
printf("\n\n");
}
| 23bc7ada211f4299a12f8b920be8947e61c362a4.cu |
#include <time.h>
#include <stdio.h>
#include <limits.h>
#include <stdlib.h>
void run_test();
void printArr(int *arr);
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
printf("GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__inline__ __device__
float warpReduceSum(double val) {
for (int offset = warpSize/2; offset > 0; offset /= 2)
val += __shfl_down(val, offset);
return val;
}
__inline__ __device__
float blockReduceSum(double val) {
static __shared__ double shared[32]; // Shared mem for 32 partial sums
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceSum(val); // Each warp performs partial reduction
if (lane==0) shared[wid]=val; // Write reduced value to shared memory
__syncthreads(); // Wait for all partial reductions
//read from shared memory only if that warp existed
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
if (wid==0) val = warpReduceSum(val); //Final reduce within first warp
return val;
}
__global__ void reduction(double *in, double *out, int N, int s1, int s2, int splane, int dim1, int dim2, int planeCount, int noEls)
{
double sum =0;
int cur_plane;
int area = dim1 * dim2;
int start = blockIdx.x * noEls * blockDim.x + threadIdx.x;
//int gridStride = splane * gridDim.x;
int gridStride = splane * gridDim.x;
//relative index and coordinates calculation
int target = 0;
target = (start%dim1) * s1;
target += ((start/ dim1) % dim2) * s2;
target += ((start/area)) * splane;
/*int tempDiv = area;
/*
for(int dimIter=0; dimIter<noDims; dimIter++)
{
if(dimIter != noDims-1)
{
dCoord = start / tempDiv % dimSizes[dimIter];
}
}
*/
int counter = 0;
int quarter = dim2 / noEls;
quarter = quarter *s2;
for(int i = target;
counter < planeCount/gridDim.x;
i += gridStride)
{
sum = 0;
//float sum =0;
//calculate the first target index
/*
target = (i%dim1) * s1;
target += ((i/ dim1) % dim2) * s2;
target += ((i/area)) * splane;
*/
//printf("Test: tid= %d target= %d target2= %d \n\n", i, target, target + (dim2/2 * s2));
for(int iter=0; iter < noEls; iter++)
{
sum += in[i + iter*quarter];
}
//__syncthreads();
/*
sum = in[gridStride*counter + target] + in[gridStride*counter + target + quarter] +
in[gridStride*counter + target + 2*quarter] + in[gridStride*counter+target+ 3*quarter];
*/
//sum += in[i] + in[i+blockDim.x];
sum = blockReduceSum(sum);
if(threadIdx.x == 0)
out[counter*gridDim.x + blockIdx.x] = sum;
counter++;
//sum += in[i] + in[i + blockDim.x];
}
}
void run_test(int noEls, int noOfBlocks, int r1, int r2, int rplane, int dimen1, int dimen2, int dimen3, int dimen4)
{
const int dim_len = 4;
//dimension sizes
int dims[dim_len] = {dimen1,dimen2,dimen3, dimen4};
//dimensions to reduce
int rdims[2] = {r1,r2}; //x and y
int strides[dim_len];
strides[0] = 1;
//total number of elements
int N = dims[0];
for(int i=1; i<dim_len; i++){
strides[i] = dims[i-1] * strides[i-1];
//update N
N *= dims[i];
}
printf("Number of elements: %d\n\n", N);
printArr(strides);
//Allocate memory for in and out in host and fill in
double *in, *out, *d_in, *d_out;
//int *d_strides, *d_rdims, *d_dims;
in = (double*)malloc(N*sizeof(double));
int planeCount = N/(dims[rdims[0]]*dims[rdims[1]]);//dims[rplane] * dims[3];//8192;//131072;
/*
printf("Dimz: %d\n\n", dims[rplane]);
printf("Dimt: %d\n\n", dims[3]);
printf("PlaneCount: %d\n\n", planeCount);
*/
out = (double*)malloc(planeCount*sizeof(double));
srand(time(NULL));
for(int i=0; i<N;i++)
{
if((i/128)%2048 == 2047)
{
in[i] = double(i)/1000; //(float)rand() / (float)RAND_MAX;//
}
else
{
in[i] = double(i)/1000;
}
}
/*
for(int i=0;i<N;i++)
{
printf("%.1f ", in[i]);
}
*/
printf("\n\n");
//Allocate memory for in and out on device
cudaMalloc(&d_in, N*sizeof(double));
cudaMalloc(&d_out, planeCount*sizeof(double));
//Event variables
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//Transfer host data to device
gpuErrchk(cudaMemcpy(d_in, in, N*sizeof(double), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_out, out, planeCount*sizeof(double), cudaMemcpyHostToDevice));
int s1 = strides[rdims[0]];
int s2 = strides[rdims[1]];
int splane = strides[rplane];
int dim1 = dims[rdims[0]];
int dim2 = dims[rdims[1]];
//int noElems = noEls;
//Record kernel
int noMeasures = 10; //number of measurements to take
cudaEventRecord(start);
for(int mesIter=0; mesIter<noMeasures;mesIter++)
{
reduction<<<noOfBlocks,((dim1*dim2)/noEls)>>>(d_in,d_out, N, s1, s2, splane, dim1, dim2, planeCount, noEls);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float ms = 0;
cudaEventElapsedTime(&ms,start,stop);
gpuErrchk(cudaMemcpy(out, d_out, planeCount*sizeof(double), cudaMemcpyDeviceToHost));
ms = ms/noMeasures;
double total = (N)*8;
double ebw = total/(ms*1e6);
printf("EBW: %f\n", ebw);
//Check errors
printf("Strides: %d %d ", s1,s2);
printf("Plane Stride: %d\n ", splane);
printf("Plane Count: %d\n", planeCount);
for(int i=0;i<4;i++)
{
printf("%.3f %d ", out[i], i);
}
double sizeOut = sizeof(out);
double sizeD = sizeof(double);
int lengthOut = sizeOut/sizeD;
printf("Length: %d\n", lengthOut);
printf("%.3f", out[2047/*131071*/]);
for(int i=0;i<16;i++)
{
if(out[i] == 0)
{
printf("Incorrect : %d\n", i );
}
}
printf("\n");
cudaFree(d_in);
cudaFree(d_out);
}
int main(int argc, char *argv[])
{
{
int noEls = 8;
int noOfBlocks = 512;
int r1 = 0;
int r2 = 1;
int rplane = 2;
int dim1 = 32;
int dim2 = 32;
int dim3 = 16;
int dim4 = 4096;
if(argc > 1)
{
noEls = atoi(argv[1]);
noOfBlocks = atoi(argv[2]);
r1 = atoi(argv[3]);
r2 = atoi(argv[4]);
rplane = atoi(argv[5]);
dim1 = atoi(argv[6]);
dim2 = atoi(argv[7]);
dim3 = atoi(argv[8]);
dim4 = atoi(argv[9]);
}
run_test(noEls, noOfBlocks, r1, r2, rplane, dim1, dim2, dim3, dim4);
}
return 0;
}
void printArr(int *arr)
{
int i;
printf("Stride values in order: ");
for(i=0;i<=sizeof(arr)/sizeof(int);i++)
{
printf("%d ", arr[i]);
}
printf("\n\n");
}
|
77aa476b5e31364429ebe81dcdc1dc75c24b2a5f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <float.h>
#include "kernel.hip"
int main(int argc, char* argv[])
{
Timer timer;
startTime(&timer);
int line_count = 0;
int histo_row_count = 64;
int histo_col_count = 64;
int zoom_level = 6;
//stream number
int stream_number = 3;
//limit of the rows send to device
int chunk_size = 25000;
int line_limit = 1000000000;
FILE *file;
hipError_t cuda_ret;
const char* filename;
const char* output_file = "out.txt";
int debug = 0;
if(argc == 1) {
filename = "cemetery.csv";
} else if(argc == 2) {
filename = argv[1];
} else if(argc == 3) {
if(strcmp(argv[2], "d") == 0) {
filename = argv[1];
debug = 1;
} else {
filename = argv[1];
zoom_level = atoi(argv[2]);
histo_row_count = (1 << zoom_level);
histo_col_count = (1 << zoom_level);
}
} else if(argc == 4) {
filename = argv[1];
histo_row_count = atoi(argv[2]);
histo_col_count = atoi(argv[3]);
} else if(argc == 6) {
filename = argv[1];
histo_row_count = atoi(argv[2]);
histo_col_count = atoi(argv[3]);
stream_number = atoi(argv[4]);
chunk_size = atoi(argv[5]);
}
int histo_size = histo_row_count * histo_col_count;
hipStream_t stream[stream_number];
for(int i = 0; i < stream_number; i++) {
cuda_ret = hipStreamCreateWithFlags(&stream[i], hipStreamNonBlocking);
if(cuda_ret != hipSuccess) FATAL("Stream %d couldn't be created:%d",i, cuda_ret);
}
point* points[stream_number];
point* points_d[stream_number];
unsigned int *histogram[stream_number];
unsigned int *histogram_d[stream_number];
for(int i = 0; i < stream_number; i++) {
//Unified Memory Implementation
/*cuda_ret = hipMallocManaged((void **) &points[i], sizeof(point)*chunk_size);
if(cuda_ret != hipSuccess) FATAL("Chunck %d couldn't be allocated:%d",i, cuda_ret);
cuda_ret = hipStreamAttachMemAsync(stream[i], points[i]);
if(cuda_ret != hipSuccess) FATAL("Chunck %d couldn't be attached:%d",i, cuda_ret);*/
cuda_ret = hipHostMalloc((void **) &points[i], sizeof(point)*chunk_size, hipHostMallocDefault);
if(cuda_ret != hipSuccess) FATAL("Chunck %d couldn't be allocated:%d",i, cuda_ret);
cuda_ret = hipMalloc((void **) &points_d[i], sizeof(point)*chunk_size);
if(cuda_ret != hipSuccess) FATAL("Chunck %d couldn't be allocated at device:%d",i, cuda_ret);
cuda_ret = hipHostMalloc((void **) &histogram[i], sizeof(unsigned int)*histo_size, hipHostMallocDefault);
if(cuda_ret != hipSuccess) FATAL("Histogram %d couldn't be allocated:%d",i, cuda_ret);
cuda_ret = hipMalloc((void **) &histogram_d[i], sizeof(unsigned int)*histo_size);
if(cuda_ret != hipSuccess) FATAL("Histogram %d couldn't be allocated at device:%d",i, cuda_ret);
cuda_ret = hipMemset(histogram_d[i], 0, histo_size * sizeof(unsigned int));
if(cuda_ret != hipSuccess) FATAL("Unable to set device histogram %d", i);
hipDeviceSynchronize();
}
/**********Unified Memory Related Codes**********/
/*cuda_ret = hipMallocManaged((void **) &histogram, sizeof(unsigned int)*histo_size);
if(cuda_ret != hipSuccess) FATAL("Histogram couldn't be allocated on device: %d", cuda_ret);*/
//memset(histogram, 0, histo_size * sizeof(unsigned int));
//float *lats_d;
//float *lons_d;
//unsigned int *histo_d;
int BLOCK_SIZE = 512;
int MAX_GRID_SIZE = 12;
int GRID_SIZE = ((chunk_size - 1) / BLOCK_SIZE) + 1;
GRID_SIZE = GRID_SIZE > MAX_GRID_SIZE ? MAX_GRID_SIZE : GRID_SIZE;
dim3 DimGrid(GRID_SIZE, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
printf("\nLoading file... %s", filename);
file = fopen(filename, "r");
if(file == NULL) {
perror("fopen()");
}
//Read and print first 20 lines for deubgging purposes
int test_1 = 0;
if(debug == 1) {
char * line = NULL;
size_t len = 0;
ssize_t read;
int count = 0;
while ((read = getline(&line, &len, file)) != -1) {
printf("%s", line);
if(count == 20) break;
count++;
}
fclose(file);
return 0;
}
//Worl coordinate max and min values to calculate histogram
float lat_max = 90.0f, lat_min = -90.0f, lon_max = 180.0f, lon_min = -180.0f;
//lat_max:78.213684, lon_max:178.487671, lat_min:-54.901825, lon_min:-176.255707
char* line;
int end = 0;
int chunk_counter = 0;
int test = 0;
unsigned int *histogram_final;
histogram_final = (unsigned int*)malloc(histo_size * sizeof(unsigned int));
memset(histogram_final, 0.0f, histo_size * sizeof(unsigned int));
while (1) {
for(int i = 0; i < stream_number; i++) {
float lat, lon = lat = 0.0f;
if(strcmp(filename, "cemetery.csv") == 0)) {
//pattern for cemetery dataset
test = fscanf(file, "%f,%f\n", &lon, &lat);
} else {
//pattern for all_nodes dataset
test = fscanf(file, "%*s\t%f\t%f\t%*s\n", &lat, &lon);
}
if(test == EOF) {//If we reached EOF
end = 1;
break;
} else if(test != 2) { //If there are not 2 vals matched
i--;
continue;
}
//if there is an outlier set it to the limits, we could have skip those rows also but prefferd to add them to camputation
if(lat > lat_max) lat = lat_max;
if(lon > lon_max) lon = lon_max;
if(lat < lat_min) lat = lat_min;
if(lon < lon_min) lon = lon_min;
points[i][chunk_counter].lat = lat;
points[i][chunk_counter].lon = lon;
line_count++;
//if a limit set check it
if(line_limit != 0 && line_limit == line_count) {
end = 1;
break;
}
}
chunk_counter++;
if(chunk_counter == chunk_size || (end == 1 && chunk_counter != 1) || line_limit == line_count) {
if(lat_max > FLT_MAX) lat_max = FLT_MAX;
if(lon_max > FLT_MAX) lon_max = FLT_MAX;
if(lat_min > FLT_MIN) lat_min = FLT_MIN;
if(lon_min > FLT_MIN) lon_min = FLT_MIN;
int sent_size;
for(int i = 0; i < stream_number; i++) {
sent_size = chunk_size;
if(line_count%chunk_size != 0) sent_size = line_count%chunk_size;
hipMemcpyAsync(points_d[i], points[i], sent_size*sizeof(point*), hipMemcpyHostToDevice, stream[i]);
}
for(int i = 0; i < stream_number; i++) {
hipLaunchKernelGGL(( histogram_kernel), dim3(DimGrid), dim3(DimBlock), histo_size * sizeof(unsigned int), stream[i],
points_d[i],
histogram_d[i],
sent_size,
histo_row_count,
histo_col_count,
lat_max,
lat_min,
lon_max,
lon_min);
}
for(int i = 0; i < stream_number; i++) {
hipMemcpyAsync(histogram[i], histogram_d[i], histo_size*sizeof(unsigned int), hipMemcpyDeviceToHost, stream[i]);
}
for(int i = 0; i < stream_number; i++) {
hipStreamSynchronize(stream[i]);
}
for(int i = 0; i < stream_number; i++) {
for(int j = 0; j < histo_size; j++) {
histogram_final[j] += histogram[i][j];
}
}
chunk_counter = 0;
for(int i = 0; i < stream_number; i++) {
cuda_ret = hipMemset(histogram_d[i], 0, histo_size * sizeof(unsigned int));
if(cuda_ret != hipSuccess) FATAL("Unable to set device histogram %d", i);
}
}
if(end == 1) {
break;
}
}
fclose(file);
FILE *out_file;
out_file = fopen(output_file, "w");
printf("Done!");
for(int i = 0; i < histo_row_count; i++) {
for(int j = 0; j < histo_col_count; j++) {
fprintf(out_file, "%d\t%d\t%d\n", i, j, histogram_final[i * histo_col_count + j]);
printf("\n%d\t%d\t%d", i, j, histogram_final[i * histo_col_count + j]);
}
}
hipHostFree(points);
hipHostFree(histogram);
hipFree(points_d);
for(int i = 0; i < stream_number; i++) {
hipStreamDestroy(stream[i]);
}
return 0;
}
| 77aa476b5e31364429ebe81dcdc1dc75c24b2a5f.cu | #include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <float.h>
#include "kernel.cu"
int main(int argc, char* argv[])
{
Timer timer;
startTime(&timer);
int line_count = 0;
int histo_row_count = 64;
int histo_col_count = 64;
int zoom_level = 6;
//stream number
int stream_number = 3;
//limit of the rows send to device
int chunk_size = 25000;
int line_limit = 1000000000;
FILE *file;
cudaError_t cuda_ret;
const char* filename;
const char* output_file = "out.txt";
int debug = 0;
if(argc == 1) {
filename = "cemetery.csv";
} else if(argc == 2) {
filename = argv[1];
} else if(argc == 3) {
if(strcmp(argv[2], "d") == 0) {
filename = argv[1];
debug = 1;
} else {
filename = argv[1];
zoom_level = atoi(argv[2]);
histo_row_count = (1 << zoom_level);
histo_col_count = (1 << zoom_level);
}
} else if(argc == 4) {
filename = argv[1];
histo_row_count = atoi(argv[2]);
histo_col_count = atoi(argv[3]);
} else if(argc == 6) {
filename = argv[1];
histo_row_count = atoi(argv[2]);
histo_col_count = atoi(argv[3]);
stream_number = atoi(argv[4]);
chunk_size = atoi(argv[5]);
}
int histo_size = histo_row_count * histo_col_count;
cudaStream_t stream[stream_number];
for(int i = 0; i < stream_number; i++) {
cuda_ret = cudaStreamCreateWithFlags(&stream[i], cudaStreamNonBlocking);
if(cuda_ret != cudaSuccess) FATAL("Stream %d couldn't be created:%d",i, cuda_ret);
}
point* points[stream_number];
point* points_d[stream_number];
unsigned int *histogram[stream_number];
unsigned int *histogram_d[stream_number];
for(int i = 0; i < stream_number; i++) {
//Unified Memory Implementation
/*cuda_ret = cudaMallocManaged((void **) &points[i], sizeof(point)*chunk_size);
if(cuda_ret != cudaSuccess) FATAL("Chunck %d couldn't be allocated:%d",i, cuda_ret);
cuda_ret = cudaStreamAttachMemAsync(stream[i], points[i]);
if(cuda_ret != cudaSuccess) FATAL("Chunck %d couldn't be attached:%d",i, cuda_ret);*/
cuda_ret = cudaHostAlloc((void **) &points[i], sizeof(point)*chunk_size, cudaHostAllocDefault);
if(cuda_ret != cudaSuccess) FATAL("Chunck %d couldn't be allocated:%d",i, cuda_ret);
cuda_ret = cudaMalloc((void **) &points_d[i], sizeof(point)*chunk_size);
if(cuda_ret != cudaSuccess) FATAL("Chunck %d couldn't be allocated at device:%d",i, cuda_ret);
cuda_ret = cudaHostAlloc((void **) &histogram[i], sizeof(unsigned int)*histo_size, cudaHostAllocDefault);
if(cuda_ret != cudaSuccess) FATAL("Histogram %d couldn't be allocated:%d",i, cuda_ret);
cuda_ret = cudaMalloc((void **) &histogram_d[i], sizeof(unsigned int)*histo_size);
if(cuda_ret != cudaSuccess) FATAL("Histogram %d couldn't be allocated at device:%d",i, cuda_ret);
cuda_ret = cudaMemset(histogram_d[i], 0, histo_size * sizeof(unsigned int));
if(cuda_ret != cudaSuccess) FATAL("Unable to set device histogram %d", i);
cudaDeviceSynchronize();
}
/**********Unified Memory Related Codes**********/
/*cuda_ret = cudaMallocManaged((void **) &histogram, sizeof(unsigned int)*histo_size);
if(cuda_ret != cudaSuccess) FATAL("Histogram couldn't be allocated on device: %d", cuda_ret);*/
//memset(histogram, 0, histo_size * sizeof(unsigned int));
//float *lats_d;
//float *lons_d;
//unsigned int *histo_d;
int BLOCK_SIZE = 512;
int MAX_GRID_SIZE = 12;
int GRID_SIZE = ((chunk_size - 1) / BLOCK_SIZE) + 1;
GRID_SIZE = GRID_SIZE > MAX_GRID_SIZE ? MAX_GRID_SIZE : GRID_SIZE;
dim3 DimGrid(GRID_SIZE, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
printf("\nLoading file... %s", filename);
file = fopen(filename, "r");
if(file == NULL) {
perror("fopen()");
}
//Read and print first 20 lines for deubgging purposes
int test_1 = 0;
if(debug == 1) {
char * line = NULL;
size_t len = 0;
ssize_t read;
int count = 0;
while ((read = getline(&line, &len, file)) != -1) {
printf("%s", line);
if(count == 20) break;
count++;
}
fclose(file);
return 0;
}
//Worl coordinate max and min values to calculate histogram
float lat_max = 90.0f, lat_min = -90.0f, lon_max = 180.0f, lon_min = -180.0f;
//lat_max:78.213684, lon_max:178.487671, lat_min:-54.901825, lon_min:-176.255707
char* line;
int end = 0;
int chunk_counter = 0;
int test = 0;
unsigned int *histogram_final;
histogram_final = (unsigned int*)malloc(histo_size * sizeof(unsigned int));
memset(histogram_final, 0.0f, histo_size * sizeof(unsigned int));
while (1) {
for(int i = 0; i < stream_number; i++) {
float lat, lon = lat = 0.0f;
if(strcmp(filename, "cemetery.csv") == 0)) {
//pattern for cemetery dataset
test = fscanf(file, "%f,%f\n", &lon, &lat);
} else {
//pattern for all_nodes dataset
test = fscanf(file, "%*s\t%f\t%f\t%*s\n", &lat, &lon);
}
if(test == EOF) {//If we reached EOF
end = 1;
break;
} else if(test != 2) { //If there are not 2 vals matched
i--;
continue;
}
//if there is an outlier set it to the limits, we could have skip those rows also but prefferd to add them to camputation
if(lat > lat_max) lat = lat_max;
if(lon > lon_max) lon = lon_max;
if(lat < lat_min) lat = lat_min;
if(lon < lon_min) lon = lon_min;
points[i][chunk_counter].lat = lat;
points[i][chunk_counter].lon = lon;
line_count++;
//if a limit set check it
if(line_limit != 0 && line_limit == line_count) {
end = 1;
break;
}
}
chunk_counter++;
if(chunk_counter == chunk_size || (end == 1 && chunk_counter != 1) || line_limit == line_count) {
if(lat_max > FLT_MAX) lat_max = FLT_MAX;
if(lon_max > FLT_MAX) lon_max = FLT_MAX;
if(lat_min > FLT_MIN) lat_min = FLT_MIN;
if(lon_min > FLT_MIN) lon_min = FLT_MIN;
int sent_size;
for(int i = 0; i < stream_number; i++) {
sent_size = chunk_size;
if(line_count%chunk_size != 0) sent_size = line_count%chunk_size;
cudaMemcpyAsync(points_d[i], points[i], sent_size*sizeof(point*), cudaMemcpyHostToDevice, stream[i]);
}
for(int i = 0; i < stream_number; i++) {
histogram_kernel<<<DimGrid, DimBlock, histo_size * sizeof(unsigned int), stream[i]>>>(
points_d[i],
histogram_d[i],
sent_size,
histo_row_count,
histo_col_count,
lat_max,
lat_min,
lon_max,
lon_min);
}
for(int i = 0; i < stream_number; i++) {
cudaMemcpyAsync(histogram[i], histogram_d[i], histo_size*sizeof(unsigned int), cudaMemcpyDeviceToHost, stream[i]);
}
for(int i = 0; i < stream_number; i++) {
cudaStreamSynchronize(stream[i]);
}
for(int i = 0; i < stream_number; i++) {
for(int j = 0; j < histo_size; j++) {
histogram_final[j] += histogram[i][j];
}
}
chunk_counter = 0;
for(int i = 0; i < stream_number; i++) {
cuda_ret = cudaMemset(histogram_d[i], 0, histo_size * sizeof(unsigned int));
if(cuda_ret != cudaSuccess) FATAL("Unable to set device histogram %d", i);
}
}
if(end == 1) {
break;
}
}
fclose(file);
FILE *out_file;
out_file = fopen(output_file, "w");
printf("Done!");
for(int i = 0; i < histo_row_count; i++) {
for(int j = 0; j < histo_col_count; j++) {
fprintf(out_file, "%d\t%d\t%d\n", i, j, histogram_final[i * histo_col_count + j]);
printf("\n%d\t%d\t%d", i, j, histogram_final[i * histo_col_count + j]);
}
}
cudaFreeHost(points);
cudaFreeHost(histogram);
cudaFree(points_d);
for(int i = 0; i < stream_number; i++) {
cudaStreamDestroy(stream[i]);
}
return 0;
}
|
7d34c1cc778ee4efc0d37998792370dce2a9c2da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/pooling_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data, int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_slice[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_slice[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
template <typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const rand_idx, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
Dtype cumsum = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
}
}
const float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_slice[h * width + w];
return;
}
}
}
}
}
template <typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = 0.;
Dtype cumvalues = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];
}
}
top_data[index] = (cumsum > 0.) ? cumvalues / cumsum : 0.;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AvePoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (this->phase_ == TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolForwardTrain<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolForwardTest<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff,
const int* const mask, const Dtype* const top_mask, const int num,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const int kernel_h,
const int kernel_w, const int stride_h, const int stride_w, const int pad_h,
const int pad_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
const int phend = min((h + pad_h) / stride_h + 1, pooled_height);
const int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
Dtype gradient = 0;
const int offset = (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice = top_diff + offset;
if (mask) {
const int* const mask_slice = mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
} else {
const Dtype* const top_mask_slice = top_mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* const rand_idx, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const rand_idx_slice =
rand_idx + (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff_slice[ph * pooled_width + pw] *
(index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_,
bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AvePoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer);
} // namespace caffe
| 7d34c1cc778ee4efc0d37998792370dce2a9c2da.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/pooling_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data, int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_slice[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_slice[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads,
const Dtype* const bottom_data, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_slice[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
template <typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const rand_idx, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
Dtype cumsum = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
}
}
const float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_slice[h * width + w];
return;
}
}
}
}
}
template <typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* const bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const int hstart = ph * stride_h;
const int hend = min(hstart + kernel_h, height);
const int wstart = pw * stride_w;
const int wend = min(wstart + kernel_w, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = 0.;
Dtype cumvalues = 0.;
const Dtype* const bottom_slice =
bottom_data + (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_slice[h * width + w];
cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w];
}
}
top_data[index] = (cumsum > 0.) ? cumvalues / cumsum : 0.;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (this->phase_ == TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTrain<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTest<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff,
const int* const mask, const Dtype* const top_mask, const int num,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, const int kernel_h,
const int kernel_w, const int stride_h, const int stride_w, const int pad_h,
const int pad_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
const int phend = min((h + pad_h) / stride_h + 1, pooled_height);
const int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
const int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
Dtype gradient = 0;
const int offset = (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice = top_diff + offset;
if (mask) {
const int* const mask_slice = mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
} else {
const Dtype* const top_mask_slice = top_mask + offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask_slice[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff_slice[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_w;
const int h = (index / width) % height + pad_h;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff_slice[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* const rand_idx, const Dtype* const top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
const Dtype* const rand_idx_slice =
rand_idx + (n * channels + c) * pooled_height * pooled_width;
const Dtype* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff_slice[ph * pooled_width + pw] *
(index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_,
bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer);
} // namespace caffe
|
fa5c8aee4da1f0384b0acb04ea893878d6f3b5a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
hipError_t bitonicWithCuda(int *a, unsigned int size);
__global__ void bitonicKernel(int* a, int nodes, int sets, int core, int index, int allDown, int parentIndex)
{
int i = threadIdx.x;
int leftIndex = ((nodes / sets) * (i / (core / sets))) + (i % (core / sets));
int rightIndex = leftIndex ^ (1 << (index));
int direction = ((i / (core / (nodes / 2 / (0 ^ (1 << (parentIndex))))))) % 2;
if (allDown == 1) {
direction = 0;
}
//printf("pre %d: %d; %d: %d; alldown: %d; direction: %d \n", leftIndex, a[leftIndex], rightIndex, a[rightIndex], allDown, direction);
if (a[leftIndex] > a[rightIndex] && (direction == 0)) {
int temp = a[leftIndex];
a[leftIndex] = a[rightIndex];
a[rightIndex] = temp;
}
else if (a[leftIndex] < a[rightIndex] && (direction == 1)) {
int temp = a[leftIndex];
a[leftIndex] = a[rightIndex];
a[rightIndex] = temp;
}
//printf("post %d: %d; %d: %d; direction: %d \n", leftIndex, a[leftIndex], rightIndex, a[rightIndex], direction);
}
__global__ void print(int* a) {
if (threadIdx.x == 0) {
for (int i = 0; i < 8; i++) {
printf("%d ", a[i]);
}
printf("\n");
}
}
int main()
{
const int arraySize = 32;
int a[arraySize] = { 1,2,3,4,8,7,6,5,1,2,3,4,8,7,6,5,3,4,2,1,66,77,5,4,3,4,5,6,789,5,4,3 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = bitonicWithCuda(a, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
for (int i = 0; i < arraySize; i++) {
std::cout << a[i] << " ";
}
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t bitonicWithCuda(int *a, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each pair of elements.
//(int *a, int nodes, int sets, int core, int index, int allDown)
int index = 0;
int allDown = 0;
for (int i = 0; 0 ^ (1 << (i)) < size; i++) {
index = i;
if (0 ^ (1 << (i)) == size) {
allDown = 1;
}
while (index > -1) {
bitonicKernel << <1, size / 2 >> > (dev_a, size, size/2/( 0 ^ (1 << (index))), size/2, index, allDown, i);
index--;
//print << <1, 1 >> > (dev_a);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(a, dev_a, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
}
}
Error:
hipFree(dev_a);
return cudaStatus;
}
| fa5c8aee4da1f0384b0acb04ea893878d6f3b5a3.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
cudaError_t bitonicWithCuda(int *a, unsigned int size);
__global__ void bitonicKernel(int* a, int nodes, int sets, int core, int index, int allDown, int parentIndex)
{
int i = threadIdx.x;
int leftIndex = ((nodes / sets) * (i / (core / sets))) + (i % (core / sets));
int rightIndex = leftIndex ^ (1 << (index));
int direction = ((i / (core / (nodes / 2 / (0 ^ (1 << (parentIndex))))))) % 2;
if (allDown == 1) {
direction = 0;
}
//printf("pre %d: %d; %d: %d; alldown: %d; direction: %d \n", leftIndex, a[leftIndex], rightIndex, a[rightIndex], allDown, direction);
if (a[leftIndex] > a[rightIndex] && (direction == 0)) {
int temp = a[leftIndex];
a[leftIndex] = a[rightIndex];
a[rightIndex] = temp;
}
else if (a[leftIndex] < a[rightIndex] && (direction == 1)) {
int temp = a[leftIndex];
a[leftIndex] = a[rightIndex];
a[rightIndex] = temp;
}
//printf("post %d: %d; %d: %d; direction: %d \n", leftIndex, a[leftIndex], rightIndex, a[rightIndex], direction);
}
__global__ void print(int* a) {
if (threadIdx.x == 0) {
for (int i = 0; i < 8; i++) {
printf("%d ", a[i]);
}
printf("\n");
}
}
int main()
{
const int arraySize = 32;
int a[arraySize] = { 1,2,3,4,8,7,6,5,1,2,3,4,8,7,6,5,3,4,2,1,66,77,5,4,3,4,5,6,789,5,4,3 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = bitonicWithCuda(a, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
for (int i = 0; i < arraySize; i++) {
std::cout << a[i] << " ";
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t bitonicWithCuda(int *a, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each pair of elements.
//(int *a, int nodes, int sets, int core, int index, int allDown)
int index = 0;
int allDown = 0;
for (int i = 0; 0 ^ (1 << (i)) < size; i++) {
index = i;
if (0 ^ (1 << (i)) == size) {
allDown = 1;
}
while (index > -1) {
bitonicKernel << <1, size / 2 >> > (dev_a, size, size/2/( 0 ^ (1 << (index))), size/2, index, allDown, i);
index--;
//print << <1, 1 >> > (dev_a);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(a, dev_a, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
}
}
Error:
cudaFree(dev_a);
return cudaStatus;
}
|
2299a2aed61568ba7b04f69e8fd1366c8d51cd98.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <raft/random/rng.cuh>
#include <raft/stats/mean.cuh>
#include <stats/cov.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Stats {
template <typename T>
struct CovInputs {
T tolerance, mean, var;
int rows, cols;
bool sample, rowMajor, stable;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const CovInputs<T> &dims) {
return os;
}
template <typename T>
class CovTest : public ::testing::TestWithParam<CovInputs<T>> {
protected:
void SetUp() override {
raft::handle_t handle;
hipStream_t stream = handle.get_stream();
params = ::testing::TestWithParam<CovInputs<T>>::GetParam();
params.tolerance *= 2;
raft::random::Rng r(params.seed);
int rows = params.rows, cols = params.cols;
int len = rows * cols;
T var = params.var;
raft::allocate(data, len);
raft::allocate(mean_act, cols);
raft::allocate(cov_act, cols * cols);
r.normal(data, len, params.mean, var, stream);
raft::stats::mean(mean_act, data, cols, rows, params.sample,
params.rowMajor, stream);
cov(handle, cov_act, data, mean_act, cols, rows, params.sample,
params.rowMajor, params.stable, stream);
T data_h[6] = {1.0, 2.0, 5.0, 4.0, 2.0, 1.0};
T cov_cm_ref_h[4] = {4.3333, -2.8333, -2.8333, 2.333};
raft::allocate(data_cm, 6);
raft::allocate(cov_cm, 4);
raft::allocate(cov_cm_ref, 4);
raft::allocate(mean_cm, 2);
raft::update_device(data_cm, data_h, 6, stream);
raft::update_device(cov_cm_ref, cov_cm_ref_h, 4, stream);
raft::stats::mean(mean_cm, data_cm, 2, 3, true, false, stream);
cov(handle, cov_cm, data_cm, mean_cm, 2, 3, true, false, true, stream);
}
void TearDown() override {
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(mean_act));
CUDA_CHECK(hipFree(cov_act));
CUDA_CHECK(hipFree(data_cm));
CUDA_CHECK(hipFree(cov_cm));
CUDA_CHECK(hipFree(cov_cm_ref));
CUDA_CHECK(hipFree(mean_cm));
}
protected:
CovInputs<T> params;
T *data, *mean_act, *cov_act;
hipblasHandle_t handle;
hipStream_t stream;
T *data_cm, *cov_cm, *cov_cm_ref, *mean_cm;
};
///@todo: add stable=false after it has been implemented
const std::vector<CovInputs<float>> inputsf = {
{0.03f, 1.f, 2.f, 32 * 1024, 32, true, false, true, 1234ULL},
{0.03f, 1.f, 2.f, 32 * 1024, 64, true, false, true, 1234ULL},
{0.03f, 1.f, 2.f, 32 * 1024, 128, true, false, true, 1234ULL},
{0.03f, 1.f, 2.f, 32 * 1024, 256, true, false, true, 1234ULL},
{0.03f, -1.f, 2.f, 32 * 1024, 32, false, false, true, 1234ULL},
{0.03f, -1.f, 2.f, 32 * 1024, 64, false, false, true, 1234ULL},
{0.03f, -1.f, 2.f, 32 * 1024, 128, false, false, true, 1234ULL},
{0.03f, -1.f, 2.f, 32 * 1024, 256, false, false, true, 1234ULL},
{0.03f, 1.f, 2.f, 32 * 1024, 32, true, true, true, 1234ULL},
{0.03f, 1.f, 2.f, 32 * 1024, 64, true, true, true, 1234ULL},
{0.03f, 1.f, 2.f, 32 * 1024, 128, true, true, true, 1234ULL},
{0.03f, 1.f, 2.f, 32 * 1024, 256, true, true, true, 1234ULL},
{0.03f, -1.f, 2.f, 32 * 1024, 32, false, true, true, 1234ULL},
{0.03f, -1.f, 2.f, 32 * 1024, 64, false, true, true, 1234ULL},
{0.03f, -1.f, 2.f, 32 * 1024, 128, false, true, true, 1234ULL},
{0.03f, -1.f, 2.f, 32 * 1024, 256, false, true, true, 1234ULL}};
const std::vector<CovInputs<double>> inputsd = {
{0.03, 1.0, 2.0, 32 * 1024, 32, true, false, true, 1234ULL},
{0.03, 1.0, 2.0, 32 * 1024, 64, true, false, true, 1234ULL},
{0.03, 1.0, 2.0, 32 * 1024, 128, true, false, true, 1234ULL},
{0.03, 1.0, 2.0, 32 * 1024, 256, true, false, true, 1234ULL},
{0.03, -1.0, 2.0, 32 * 1024, 32, false, false, true, 1234ULL},
{0.03, -1.0, 2.0, 32 * 1024, 64, false, false, true, 1234ULL},
{0.03, -1.0, 2.0, 32 * 1024, 128, false, false, true, 1234ULL},
{0.03, -1.0, 2.0, 32 * 1024, 256, false, false, true, 1234ULL},
{0.03, 1.0, 2.0, 32 * 1024, 32, true, true, true, 1234ULL},
{0.03, 1.0, 2.0, 32 * 1024, 64, true, true, true, 1234ULL},
{0.03, 1.0, 2.0, 32 * 1024, 128, true, true, true, 1234ULL},
{0.03, 1.0, 2.0, 32 * 1024, 256, true, true, true, 1234ULL},
{0.03, -1.0, 2.0, 32 * 1024, 32, false, true, true, 1234ULL},
{0.03, -1.0, 2.0, 32 * 1024, 64, false, true, true, 1234ULL},
{0.03, -1.0, 2.0, 32 * 1024, 128, false, true, true, 1234ULL},
{0.03, -1.0, 2.0, 32 * 1024, 256, false, true, true, 1234ULL}};
typedef CovTest<float> CovTestF;
TEST_P(CovTestF, Result) {
ASSERT_TRUE(raft::diagonalMatch(
params.var * params.var, cov_act, params.cols, params.cols,
raft::CompareApprox<float>(params.tolerance)));
}
typedef CovTest<double> CovTestD;
TEST_P(CovTestD, Result) {
ASSERT_TRUE(raft::diagonalMatch(
params.var * params.var, cov_act, params.cols, params.cols,
raft::CompareApprox<double>(params.tolerance)));
}
typedef CovTest<float> CovTestSmallF;
TEST_P(CovTestSmallF, Result) {
ASSERT_TRUE(raft::devArrMatch(cov_cm_ref, cov_cm, 2, 2,
raft::CompareApprox<float>(params.tolerance)));
}
typedef CovTest<double> CovTestSmallD;
TEST_P(CovTestSmallD, Result) {
ASSERT_TRUE(raft::devArrMatch(cov_cm_ref, cov_cm, 2, 2,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(CovTests, CovTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(CovTests, CovTestD, ::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(CovTests, CovTestSmallF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(CovTests, CovTestSmallD, ::testing::ValuesIn(inputsd));
} // end namespace Stats
} // end namespace MLCommon
| 2299a2aed61568ba7b04f69e8fd1366c8d51cd98.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <raft/random/rng.cuh>
#include <raft/stats/mean.cuh>
#include <stats/cov.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Stats {
template <typename T>
struct CovInputs {
T tolerance, mean, var;
int rows, cols;
bool sample, rowMajor, stable;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const CovInputs<T> &dims) {
return os;
}
template <typename T>
class CovTest : public ::testing::TestWithParam<CovInputs<T>> {
protected:
void SetUp() override {
raft::handle_t handle;
cudaStream_t stream = handle.get_stream();
params = ::testing::TestWithParam<CovInputs<T>>::GetParam();
params.tolerance *= 2;
raft::random::Rng r(params.seed);
int rows = params.rows, cols = params.cols;
int len = rows * cols;
T var = params.var;
raft::allocate(data, len);
raft::allocate(mean_act, cols);
raft::allocate(cov_act, cols * cols);
r.normal(data, len, params.mean, var, stream);
raft::stats::mean(mean_act, data, cols, rows, params.sample,
params.rowMajor, stream);
cov(handle, cov_act, data, mean_act, cols, rows, params.sample,
params.rowMajor, params.stable, stream);
T data_h[6] = {1.0, 2.0, 5.0, 4.0, 2.0, 1.0};
T cov_cm_ref_h[4] = {4.3333, -2.8333, -2.8333, 2.333};
raft::allocate(data_cm, 6);
raft::allocate(cov_cm, 4);
raft::allocate(cov_cm_ref, 4);
raft::allocate(mean_cm, 2);
raft::update_device(data_cm, data_h, 6, stream);
raft::update_device(cov_cm_ref, cov_cm_ref_h, 4, stream);
raft::stats::mean(mean_cm, data_cm, 2, 3, true, false, stream);
cov(handle, cov_cm, data_cm, mean_cm, 2, 3, true, false, true, stream);
}
void TearDown() override {
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(mean_act));
CUDA_CHECK(cudaFree(cov_act));
CUDA_CHECK(cudaFree(data_cm));
CUDA_CHECK(cudaFree(cov_cm));
CUDA_CHECK(cudaFree(cov_cm_ref));
CUDA_CHECK(cudaFree(mean_cm));
}
protected:
CovInputs<T> params;
T *data, *mean_act, *cov_act;
cublasHandle_t handle;
cudaStream_t stream;
T *data_cm, *cov_cm, *cov_cm_ref, *mean_cm;
};
///@todo: add stable=false after it has been implemented
const std::vector<CovInputs<float>> inputsf = {
{0.03f, 1.f, 2.f, 32 * 1024, 32, true, false, true, 1234ULL},
{0.03f, 1.f, 2.f, 32 * 1024, 64, true, false, true, 1234ULL},
{0.03f, 1.f, 2.f, 32 * 1024, 128, true, false, true, 1234ULL},
{0.03f, 1.f, 2.f, 32 * 1024, 256, true, false, true, 1234ULL},
{0.03f, -1.f, 2.f, 32 * 1024, 32, false, false, true, 1234ULL},
{0.03f, -1.f, 2.f, 32 * 1024, 64, false, false, true, 1234ULL},
{0.03f, -1.f, 2.f, 32 * 1024, 128, false, false, true, 1234ULL},
{0.03f, -1.f, 2.f, 32 * 1024, 256, false, false, true, 1234ULL},
{0.03f, 1.f, 2.f, 32 * 1024, 32, true, true, true, 1234ULL},
{0.03f, 1.f, 2.f, 32 * 1024, 64, true, true, true, 1234ULL},
{0.03f, 1.f, 2.f, 32 * 1024, 128, true, true, true, 1234ULL},
{0.03f, 1.f, 2.f, 32 * 1024, 256, true, true, true, 1234ULL},
{0.03f, -1.f, 2.f, 32 * 1024, 32, false, true, true, 1234ULL},
{0.03f, -1.f, 2.f, 32 * 1024, 64, false, true, true, 1234ULL},
{0.03f, -1.f, 2.f, 32 * 1024, 128, false, true, true, 1234ULL},
{0.03f, -1.f, 2.f, 32 * 1024, 256, false, true, true, 1234ULL}};
const std::vector<CovInputs<double>> inputsd = {
{0.03, 1.0, 2.0, 32 * 1024, 32, true, false, true, 1234ULL},
{0.03, 1.0, 2.0, 32 * 1024, 64, true, false, true, 1234ULL},
{0.03, 1.0, 2.0, 32 * 1024, 128, true, false, true, 1234ULL},
{0.03, 1.0, 2.0, 32 * 1024, 256, true, false, true, 1234ULL},
{0.03, -1.0, 2.0, 32 * 1024, 32, false, false, true, 1234ULL},
{0.03, -1.0, 2.0, 32 * 1024, 64, false, false, true, 1234ULL},
{0.03, -1.0, 2.0, 32 * 1024, 128, false, false, true, 1234ULL},
{0.03, -1.0, 2.0, 32 * 1024, 256, false, false, true, 1234ULL},
{0.03, 1.0, 2.0, 32 * 1024, 32, true, true, true, 1234ULL},
{0.03, 1.0, 2.0, 32 * 1024, 64, true, true, true, 1234ULL},
{0.03, 1.0, 2.0, 32 * 1024, 128, true, true, true, 1234ULL},
{0.03, 1.0, 2.0, 32 * 1024, 256, true, true, true, 1234ULL},
{0.03, -1.0, 2.0, 32 * 1024, 32, false, true, true, 1234ULL},
{0.03, -1.0, 2.0, 32 * 1024, 64, false, true, true, 1234ULL},
{0.03, -1.0, 2.0, 32 * 1024, 128, false, true, true, 1234ULL},
{0.03, -1.0, 2.0, 32 * 1024, 256, false, true, true, 1234ULL}};
typedef CovTest<float> CovTestF;
TEST_P(CovTestF, Result) {
ASSERT_TRUE(raft::diagonalMatch(
params.var * params.var, cov_act, params.cols, params.cols,
raft::CompareApprox<float>(params.tolerance)));
}
typedef CovTest<double> CovTestD;
TEST_P(CovTestD, Result) {
ASSERT_TRUE(raft::diagonalMatch(
params.var * params.var, cov_act, params.cols, params.cols,
raft::CompareApprox<double>(params.tolerance)));
}
typedef CovTest<float> CovTestSmallF;
TEST_P(CovTestSmallF, Result) {
ASSERT_TRUE(raft::devArrMatch(cov_cm_ref, cov_cm, 2, 2,
raft::CompareApprox<float>(params.tolerance)));
}
typedef CovTest<double> CovTestSmallD;
TEST_P(CovTestSmallD, Result) {
ASSERT_TRUE(raft::devArrMatch(cov_cm_ref, cov_cm, 2, 2,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(CovTests, CovTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(CovTests, CovTestD, ::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(CovTests, CovTestSmallF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(CovTests, CovTestSmallD, ::testing::ValuesIn(inputsd));
} // end namespace Stats
} // end namespace MLCommon
|
a24d71788dde7ffc739c7575ff3f0a4ea53c6460.hip | // !!! This is a file automatically generated by hipify!!!
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
// @author Yurii Shyrma (iuriish@yahoo.com)
//
#include <array/DataTypeUtils.h>
#include <exceptions/allocation_exception.h>
#include <exceptions/cuda_exception.h>
#include <execution/AffinityManager.h>
#include <memory/MemoryCounter.h>
#include <system/op_boilerplate.h>
#include "../DataBuffer.h"
namespace sd {
void DataBuffer::expand(const uint64_t size) {
if (size > _lenInBytes) {
// allocate new buffer
int8_t* newBuffer = nullptr;
int8_t* newSpecialBuffer = nullptr;
ALLOCATE_SPECIAL(newSpecialBuffer, _workspace, size, int8_t);
// copy data from existing buffer
if (_primaryBuffer != nullptr) {
// there's non-zero chance that primary buffer doesn't exist yet
ALLOCATE(newBuffer, _workspace, size, int8_t);
std::memcpy(newBuffer, _primaryBuffer, _lenInBytes);
if (_isOwnerPrimary) {
auto ipb = reinterpret_cast<int8_t*>(_primaryBuffer);
RELEASE(ipb, _workspace);
}
_primaryBuffer = newBuffer;
_isOwnerPrimary = true;
}
hipMemcpy(newSpecialBuffer, _specialBuffer, _lenInBytes, hipMemcpyDeviceToDevice);
if (_isOwnerSpecial) {
auto isb = reinterpret_cast<int8_t*>(_specialBuffer);
RELEASE_SPECIAL(isb, _workspace);
}
_specialBuffer = newSpecialBuffer;
_lenInBytes = size;
_isOwnerSpecial = true;
}
}
void DataBuffer::showBufferLimited() {
#if defined(DEBUG_VEDA_LOGS)
float* x = (float*)_primaryBuffer;
size_t size = getLenInBytes();
size = size > 80 ? 80 : 0;
sd_debug("cpu: %p\n", (void*)x);
for (int i = 0; i < size / sizeof(float); i++) sd_debug("%f, ", x[i]);
sd_debug("%s", "\n");
#endif
}
void DataBuffer::showCounters(const char* msg1, const char* msg2) {
#if defined(HAVE_VEDA) && defined(DEBUG_VEDA_LOGS)
sd_debug("%s %s || primary %p special %p :: wP: %d wS: %d rP: %d rS: %d\n", msg1, msg2, _primaryBuffer,
_specialBuffer, (int)_writePrimary.load(), (int)_writeSpecial.load(), (int)_readPrimary.load(),
(int)_readSpecial.load());
#endif
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::allocateSpecial() {
if (_specialBuffer == nullptr && getLenInBytes() > 0) {
auto deviceId = sd::AffinityManager::currentDeviceId();
if (_workspace == nullptr)
if (!sd::memory::MemoryCounter::getInstance().validate(getLenInBytes()))
throw sd::allocation_exception::build("Requested amount exceeds device limits",
sd::memory::MemoryCounter::getInstance().deviceLimit(deviceId),
getLenInBytes());
ALLOCATE_SPECIAL(_specialBuffer, _workspace, getLenInBytes(), int8_t);
_isOwnerSpecial = true;
if (_workspace == nullptr) {
sd::memory::MemoryCounter::getInstance().countIn(deviceId, getLenInBytes());
sd::memory::MemoryCounter::getInstance().countIn(sd::memory::MemoryType::DEVICE, getLenInBytes());
}
}
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::syncToPrimary(const LaunchContext* context, const bool forceSync) {
if (isPrimaryActual() && !forceSync) {
return;
}
allocatePrimary();
auto res = hipStreamSynchronize(*context->getCudaStream());
if (res != 0) throw cuda_exception::build("DataBuffer::syncToPrimary failed to to some previous kernel failre", res);
res = hipMemcpy(_primaryBuffer, _specialBuffer, getLenInBytes(), hipMemcpyDeviceToHost);
if (res != 0) throw cuda_exception::build("DataBuffer::syncToPrimary hipMemcpy failed", res);
readPrimary();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::syncToSpecial(const bool forceSync) {
// in this case there's nothing to do here
if (_primaryBuffer == nullptr) return;
if (isSpecialActual() && !forceSync) {
return;
}
allocateSpecial();
auto res = hipMemcpy(_specialBuffer, _primaryBuffer, getLenInBytes(), hipMemcpyHostToDevice);
if (res != 0) throw cuda_exception::build("DataBuffer::syncToSpecial hipMemcpy failed", res);
readSpecial();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::deleteSpecial() {
if (_isOwnerSpecial && _specialBuffer != nullptr && getLenInBytes() != 0) {
auto p = reinterpret_cast<int8_t*>(_specialBuffer);
RELEASE_SPECIAL(p, _workspace);
_specialBuffer = nullptr;
_isOwnerSpecial = false;
// count out towards DataBuffer device, only if we're not in workspace
if (_workspace == nullptr) {
sd::memory::MemoryCounter::getInstance().countOut(_deviceId, getLenInBytes());
sd::memory::MemoryCounter::getInstance().countOut(sd::memory::MemoryType::DEVICE, getLenInBytes());
}
}
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::setCountersToZero() {
_counter.store(0L);
_writePrimary.store(0L);
_writeSpecial.store(0L);
_readPrimary.store(0L);
_readSpecial.store(0L);
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::copyCounters(const DataBuffer& other) {
_counter.store(other._counter);
_writePrimary.store(other._readSpecial);
_writeSpecial.store(other._readPrimary);
_readPrimary.store(other._writeSpecial);
_readSpecial.store(other._writePrimary);
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::copyBufferFrom(const DataBuffer& other, size_t sizeToCopyinBytes, const sd::LongType offsetThis,
const sd::LongType offsetOther) { // copies only to special buffer
if (other._primaryBuffer == nullptr && other._specialBuffer == nullptr) return;
if (sizeToCopyinBytes == 0) sizeToCopyinBytes = other.getLenInBytes();
if (sizeToCopyinBytes == 0) return;
if (other.isPrimaryActual()) {
auto res = hipMemcpy(
static_cast<int8_t*>(_specialBuffer) + offsetThis * DataTypeUtils::sizeOfElement(_dataType),
static_cast<const int8_t*>(other._primaryBuffer) + offsetOther * DataTypeUtils::sizeOfElement(other._dataType),
sizeToCopyinBytes, hipMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("DataBuffer::copyBufferFrom: cudaMemcpy_cudaMemcpyHostToDevice failed!", res);
other.readPrimary();
} else {
auto res = hipMemcpy(
static_cast<int8_t*>(_specialBuffer) + offsetThis * DataTypeUtils::sizeOfElement(_dataType),
static_cast<const int8_t*>(other._specialBuffer) + offsetOther * DataTypeUtils::sizeOfElement(other._dataType),
sizeToCopyinBytes, hipMemcpyDeviceToDevice);
if (res != 0)
throw cuda_exception::build("DataBuffer::copyBufferFrom: cudaMemcpy_cudaMemcpyDeviceToDevice failed!", res);
other.readSpecial();
}
writeSpecial();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::copyBufferFromHost(const void* hostBuffer, size_t sizeToCopyinBytes, const sd::LongType offsetThis,
const sd::LongType offsetHostBuffer) { // copies only to special buffer
if (hostBuffer == nullptr) return;
if (sizeToCopyinBytes == 0) sizeToCopyinBytes = getLenInBytes();
if (sizeToCopyinBytes == 0) return;
auto res =
hipMemcpy(static_cast<int8_t*>(_specialBuffer) + offsetThis * DataTypeUtils::sizeOfElement(_dataType),
static_cast<const int8_t*>(hostBuffer) + offsetHostBuffer * DataTypeUtils::sizeOfElement(_dataType),
sizeToCopyinBytes, hipMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("DataBuffer::copyBufferFromHost: cudaMemcpy_cudaMemcpyHostToDevice failed!", res);
writeSpecial();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::setSpecial(void* special, const bool isOwnerSpecial) {
deleteSpecial();
_specialBuffer = special;
_isOwnerSpecial = isOwnerSpecial;
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::allocateBuffers(const bool allocBoth) { // always allocate special buffer only (cuda case)
allocateSpecial();
if (allocBoth) allocatePrimary();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::setToZeroBuffers(const bool both) {
hipMemsetAsync(special(), 0, getLenInBytes(), *LaunchContext::defaultContext()->getCudaStream());
auto res = hipStreamSynchronize(*LaunchContext::defaultContext()->getCudaStream());
if (res != 0) throw cuda_exception::build("DataBuffer::setToZeroBuffers: streamSync failed!", res);
writeSpecial();
if (both) {
memset(primary(), 0, getLenInBytes());
readPrimary();
}
}
/////////////////////////
void DataBuffer::memcpy(const DataBuffer& dst, const DataBuffer& src) {
if (src._lenInBytes > dst._lenInBytes)
throw std::runtime_error("DataBuffer::memcpy: Source data buffer is larger than destination");
int res = 0;
if (src.isSpecialActual()) {
res = hipMemcpyAsync(dst._specialBuffer, src._specialBuffer, src.getLenInBytes(), hipMemcpyDeviceToDevice,
*LaunchContext::defaultContext()->getCudaStream());
} else if (src.isPrimaryActual()) {
res = hipMemcpyAsync(dst._specialBuffer, src._primaryBuffer, src.getLenInBytes(), hipMemcpyHostToDevice,
*LaunchContext::defaultContext()->getCudaStream());
}
if (res != 0) throw cuda_exception::build("DataBuffer::memcpy: hipMemcpyAsync failed!", res);
res = hipStreamSynchronize(*LaunchContext::defaultContext()->getCudaStream());
if (res != 0) throw cuda_exception::build("DataBuffer::memcpy: streamSync failed!", res);
dst.writeSpecial();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::migrate() {
memory::Workspace* newWorkspace = nullptr;
void* newBuffer;
ALLOCATE_SPECIAL(newBuffer, newWorkspace, getLenInBytes(), int8_t);
auto res = hipMemcpy(newBuffer, _specialBuffer, getLenInBytes(), hipMemcpyDeviceToDevice);
if (res != 0) throw cuda_exception::build("DataBuffer::migrate: hipMemcpyAsync failed!", res);
if (_isOwnerSpecial) {
// now we're releasing original buffer
RELEASE_SPECIAL(_specialBuffer, _workspace);
}
_isOwnerSpecial = true;
_specialBuffer = newBuffer;
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::writePrimary() const { _writePrimary = ++_counter; }
void DataBuffer::writeSpecial() const { _writeSpecial = ++_counter; }
void DataBuffer::readPrimary() const { _readPrimary = ++_counter; }
void DataBuffer::readSpecial() const { _readSpecial = ++_counter; }
bool DataBuffer::isPrimaryActual() const {
return (_writePrimary.load() > _writeSpecial.load() || _readPrimary.load() > _writeSpecial.load());
}
bool DataBuffer::isSpecialActual() const {
return (_writeSpecial.load() > _writePrimary.load() || _readSpecial.load() > _writePrimary.load());
}
} // namespace sd
| a24d71788dde7ffc739c7575ff3f0a4ea53c6460.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
// @author Yurii Shyrma (iuriish@yahoo.com)
//
#include <array/DataTypeUtils.h>
#include <exceptions/allocation_exception.h>
#include <exceptions/cuda_exception.h>
#include <execution/AffinityManager.h>
#include <memory/MemoryCounter.h>
#include <system/op_boilerplate.h>
#include "../DataBuffer.h"
namespace sd {
void DataBuffer::expand(const uint64_t size) {
if (size > _lenInBytes) {
// allocate new buffer
int8_t* newBuffer = nullptr;
int8_t* newSpecialBuffer = nullptr;
ALLOCATE_SPECIAL(newSpecialBuffer, _workspace, size, int8_t);
// copy data from existing buffer
if (_primaryBuffer != nullptr) {
// there's non-zero chance that primary buffer doesn't exist yet
ALLOCATE(newBuffer, _workspace, size, int8_t);
std::memcpy(newBuffer, _primaryBuffer, _lenInBytes);
if (_isOwnerPrimary) {
auto ipb = reinterpret_cast<int8_t*>(_primaryBuffer);
RELEASE(ipb, _workspace);
}
_primaryBuffer = newBuffer;
_isOwnerPrimary = true;
}
cudaMemcpy(newSpecialBuffer, _specialBuffer, _lenInBytes, cudaMemcpyDeviceToDevice);
if (_isOwnerSpecial) {
auto isb = reinterpret_cast<int8_t*>(_specialBuffer);
RELEASE_SPECIAL(isb, _workspace);
}
_specialBuffer = newSpecialBuffer;
_lenInBytes = size;
_isOwnerSpecial = true;
}
}
void DataBuffer::showBufferLimited() {
#if defined(DEBUG_VEDA_LOGS)
float* x = (float*)_primaryBuffer;
size_t size = getLenInBytes();
size = size > 80 ? 80 : 0;
sd_debug("cpu: %p\n", (void*)x);
for (int i = 0; i < size / sizeof(float); i++) sd_debug("%f, ", x[i]);
sd_debug("%s", "\n");
#endif
}
void DataBuffer::showCounters(const char* msg1, const char* msg2) {
#if defined(HAVE_VEDA) && defined(DEBUG_VEDA_LOGS)
sd_debug("%s %s || primary %p special %p :: wP: %d wS: %d rP: %d rS: %d\n", msg1, msg2, _primaryBuffer,
_specialBuffer, (int)_writePrimary.load(), (int)_writeSpecial.load(), (int)_readPrimary.load(),
(int)_readSpecial.load());
#endif
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::allocateSpecial() {
if (_specialBuffer == nullptr && getLenInBytes() > 0) {
auto deviceId = sd::AffinityManager::currentDeviceId();
if (_workspace == nullptr)
if (!sd::memory::MemoryCounter::getInstance().validate(getLenInBytes()))
throw sd::allocation_exception::build("Requested amount exceeds device limits",
sd::memory::MemoryCounter::getInstance().deviceLimit(deviceId),
getLenInBytes());
ALLOCATE_SPECIAL(_specialBuffer, _workspace, getLenInBytes(), int8_t);
_isOwnerSpecial = true;
if (_workspace == nullptr) {
sd::memory::MemoryCounter::getInstance().countIn(deviceId, getLenInBytes());
sd::memory::MemoryCounter::getInstance().countIn(sd::memory::MemoryType::DEVICE, getLenInBytes());
}
}
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::syncToPrimary(const LaunchContext* context, const bool forceSync) {
if (isPrimaryActual() && !forceSync) {
return;
}
allocatePrimary();
auto res = cudaStreamSynchronize(*context->getCudaStream());
if (res != 0) throw cuda_exception::build("DataBuffer::syncToPrimary failed to to some previous kernel failre", res);
res = cudaMemcpy(_primaryBuffer, _specialBuffer, getLenInBytes(), cudaMemcpyDeviceToHost);
if (res != 0) throw cuda_exception::build("DataBuffer::syncToPrimary cudaMemcpy failed", res);
readPrimary();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::syncToSpecial(const bool forceSync) {
// in this case there's nothing to do here
if (_primaryBuffer == nullptr) return;
if (isSpecialActual() && !forceSync) {
return;
}
allocateSpecial();
auto res = cudaMemcpy(_specialBuffer, _primaryBuffer, getLenInBytes(), cudaMemcpyHostToDevice);
if (res != 0) throw cuda_exception::build("DataBuffer::syncToSpecial cudaMemcpy failed", res);
readSpecial();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::deleteSpecial() {
if (_isOwnerSpecial && _specialBuffer != nullptr && getLenInBytes() != 0) {
auto p = reinterpret_cast<int8_t*>(_specialBuffer);
RELEASE_SPECIAL(p, _workspace);
_specialBuffer = nullptr;
_isOwnerSpecial = false;
// count out towards DataBuffer device, only if we're not in workspace
if (_workspace == nullptr) {
sd::memory::MemoryCounter::getInstance().countOut(_deviceId, getLenInBytes());
sd::memory::MemoryCounter::getInstance().countOut(sd::memory::MemoryType::DEVICE, getLenInBytes());
}
}
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::setCountersToZero() {
_counter.store(0L);
_writePrimary.store(0L);
_writeSpecial.store(0L);
_readPrimary.store(0L);
_readSpecial.store(0L);
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::copyCounters(const DataBuffer& other) {
_counter.store(other._counter);
_writePrimary.store(other._readSpecial);
_writeSpecial.store(other._readPrimary);
_readPrimary.store(other._writeSpecial);
_readSpecial.store(other._writePrimary);
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::copyBufferFrom(const DataBuffer& other, size_t sizeToCopyinBytes, const sd::LongType offsetThis,
const sd::LongType offsetOther) { // copies only to special buffer
if (other._primaryBuffer == nullptr && other._specialBuffer == nullptr) return;
if (sizeToCopyinBytes == 0) sizeToCopyinBytes = other.getLenInBytes();
if (sizeToCopyinBytes == 0) return;
if (other.isPrimaryActual()) {
auto res = cudaMemcpy(
static_cast<int8_t*>(_specialBuffer) + offsetThis * DataTypeUtils::sizeOfElement(_dataType),
static_cast<const int8_t*>(other._primaryBuffer) + offsetOther * DataTypeUtils::sizeOfElement(other._dataType),
sizeToCopyinBytes, cudaMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("DataBuffer::copyBufferFrom: cudaMemcpy_cudaMemcpyHostToDevice failed!", res);
other.readPrimary();
} else {
auto res = cudaMemcpy(
static_cast<int8_t*>(_specialBuffer) + offsetThis * DataTypeUtils::sizeOfElement(_dataType),
static_cast<const int8_t*>(other._specialBuffer) + offsetOther * DataTypeUtils::sizeOfElement(other._dataType),
sizeToCopyinBytes, cudaMemcpyDeviceToDevice);
if (res != 0)
throw cuda_exception::build("DataBuffer::copyBufferFrom: cudaMemcpy_cudaMemcpyDeviceToDevice failed!", res);
other.readSpecial();
}
writeSpecial();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::copyBufferFromHost(const void* hostBuffer, size_t sizeToCopyinBytes, const sd::LongType offsetThis,
const sd::LongType offsetHostBuffer) { // copies only to special buffer
if (hostBuffer == nullptr) return;
if (sizeToCopyinBytes == 0) sizeToCopyinBytes = getLenInBytes();
if (sizeToCopyinBytes == 0) return;
auto res =
cudaMemcpy(static_cast<int8_t*>(_specialBuffer) + offsetThis * DataTypeUtils::sizeOfElement(_dataType),
static_cast<const int8_t*>(hostBuffer) + offsetHostBuffer * DataTypeUtils::sizeOfElement(_dataType),
sizeToCopyinBytes, cudaMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("DataBuffer::copyBufferFromHost: cudaMemcpy_cudaMemcpyHostToDevice failed!", res);
writeSpecial();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::setSpecial(void* special, const bool isOwnerSpecial) {
deleteSpecial();
_specialBuffer = special;
_isOwnerSpecial = isOwnerSpecial;
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::allocateBuffers(const bool allocBoth) { // always allocate special buffer only (cuda case)
allocateSpecial();
if (allocBoth) allocatePrimary();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::setToZeroBuffers(const bool both) {
cudaMemsetAsync(special(), 0, getLenInBytes(), *LaunchContext::defaultContext()->getCudaStream());
auto res = cudaStreamSynchronize(*LaunchContext::defaultContext()->getCudaStream());
if (res != 0) throw cuda_exception::build("DataBuffer::setToZeroBuffers: streamSync failed!", res);
writeSpecial();
if (both) {
memset(primary(), 0, getLenInBytes());
readPrimary();
}
}
/////////////////////////
void DataBuffer::memcpy(const DataBuffer& dst, const DataBuffer& src) {
if (src._lenInBytes > dst._lenInBytes)
throw std::runtime_error("DataBuffer::memcpy: Source data buffer is larger than destination");
int res = 0;
if (src.isSpecialActual()) {
res = cudaMemcpyAsync(dst._specialBuffer, src._specialBuffer, src.getLenInBytes(), cudaMemcpyDeviceToDevice,
*LaunchContext::defaultContext()->getCudaStream());
} else if (src.isPrimaryActual()) {
res = cudaMemcpyAsync(dst._specialBuffer, src._primaryBuffer, src.getLenInBytes(), cudaMemcpyHostToDevice,
*LaunchContext::defaultContext()->getCudaStream());
}
if (res != 0) throw cuda_exception::build("DataBuffer::memcpy: cudaMemcpyAsync failed!", res);
res = cudaStreamSynchronize(*LaunchContext::defaultContext()->getCudaStream());
if (res != 0) throw cuda_exception::build("DataBuffer::memcpy: streamSync failed!", res);
dst.writeSpecial();
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::migrate() {
memory::Workspace* newWorkspace = nullptr;
void* newBuffer;
ALLOCATE_SPECIAL(newBuffer, newWorkspace, getLenInBytes(), int8_t);
auto res = cudaMemcpy(newBuffer, _specialBuffer, getLenInBytes(), cudaMemcpyDeviceToDevice);
if (res != 0) throw cuda_exception::build("DataBuffer::migrate: cudaMemcpyAsync failed!", res);
if (_isOwnerSpecial) {
// now we're releasing original buffer
RELEASE_SPECIAL(_specialBuffer, _workspace);
}
_isOwnerSpecial = true;
_specialBuffer = newBuffer;
}
////////////////////////////////////////////////////////////////////////
void DataBuffer::writePrimary() const { _writePrimary = ++_counter; }
void DataBuffer::writeSpecial() const { _writeSpecial = ++_counter; }
void DataBuffer::readPrimary() const { _readPrimary = ++_counter; }
void DataBuffer::readSpecial() const { _readSpecial = ++_counter; }
bool DataBuffer::isPrimaryActual() const {
return (_writePrimary.load() > _writeSpecial.load() || _readPrimary.load() > _writeSpecial.load());
}
bool DataBuffer::isSpecialActual() const {
return (_writeSpecial.load() > _writePrimary.load() || _readSpecial.load() > _writePrimary.load());
}
} // namespace sd
|
1eebdcfc41ea18a24c6b078e0859b1667734ea81.hip | // !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<sys/time.h>
#include<string.h>
#include<assert.h>
void safe_call(hipError_t ret, int line)
{
if(ret!=hipSuccess)
{
printf("Error at line %d : %s\n",line,hipGetErrorString(ret));
exit(-1);
}
}
void fill_mat(double *arr, int len)
{
int i;
for(i=0;i<len;i++)
arr[i] = drand48();
}
int main(int argc, char **argv)
{
int SIZE, MODE, i; // 0=pageable 1=pinned
char memmode[10], tempmode[10];
if(argc<2 || argc>3)
{
printf("Syntax : exec -<memory mode> <size>\n");
exit(-1);
}
else if(argc==2)
{
MODE = 0;
SIZE = atoi(argv[1]);
}
else if(argc==3)
{
strcpy(tempmode,argv[1]);
i=0;
while(tempmode[i]=='-') { i++; }
if(i==0)
{
printf("Syntax : exec -<memory mode> <size>\n");
exit(-1);
}
strcpy(memmode,&tempmode[i]);
if(strcmp(memmode,"pinned") == 0)
MODE = 1;
else if(strcmp(memmode,"pageable") == 0)
MODE = 0;
else
{
printf("Memory modes pinned and pageable only\n");
exit(-1);
}
SIZE = atoi(argv[2]);
}
double *h_A, *h_B;
double *d_A, *d_B;
hipEvent_t start, stop;
double time, bandwidth;
float diff;
double time_start, time_end;
struct timeval tv;
struct timezone tz;
safe_call(hipEventCreate(&start),__LINE__);
safe_call(hipEventCreate(&stop),__LINE__);
if(MODE==0) //if memory mode = pageable
{
h_A = (double *) malloc(SIZE*sizeof(double));
h_B = (double *) malloc(SIZE*sizeof(double));
if(h_A==NULL || h_B==NULL)
{
printf("Error : host memory allocation\n");
exit(-1);
}
safe_call(hipMalloc((void **)&d_A, SIZE*sizeof(double)),__LINE__);
safe_call(hipMalloc((void **)&d_B, SIZE*sizeof(double)),__LINE__);
fill_mat(h_A,SIZE);
printf("Pageable Memory\n");
gettimeofday(&tv, &tz);
time_start = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0;
memcpy((void *)h_B, (void *)h_A, SIZE*sizeof(double));
gettimeofday(&tv, &tz);
time_end = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0;
bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * ( time_end - time_start ) ) ;
printf("CPU Memcpy H2H Bandwidth = %f GB/s\n",bandwidth);
safe_call(hipEventRecord(start, 0),__LINE__);
safe_call(hipMemcpy((void *)d_A, (void *)h_A, SIZE*sizeof(double), hipMemcpyHostToDevice),__LINE__);
safe_call(hipEventRecord(stop, 0),__LINE__);
safe_call(hipEventSynchronize(stop),__LINE__);
safe_call(hipEventElapsedTime(&diff,start,stop),__LINE__);
time = diff*1.0e-3;
bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * time ) ;
printf("CUDA Memcpy H2D Bandwidth = %f GB/s\n",bandwidth);
safe_call(hipEventRecord(start, 0),__LINE__);
safe_call(hipMemcpy((void *)d_B, (void *)d_A, SIZE*sizeof(double), hipMemcpyDeviceToDevice),__LINE__);
safe_call(hipEventRecord(stop, 0),__LINE__);
safe_call(hipEventSynchronize(stop),__LINE__);
safe_call(hipEventElapsedTime(&diff,start,stop),__LINE__);
time = diff*1.0e-3;
bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * time ) ;
printf("CUDA Memcpy D2D Bandwidth = %f GB/s\n",bandwidth);
safe_call(hipEventRecord(start, 0),__LINE__);
safe_call(hipMemcpy((void *)h_B, (void *)d_B, SIZE*sizeof(double), hipMemcpyDeviceToHost),__LINE__);
safe_call(hipEventRecord(stop, 0),__LINE__);
safe_call(hipEventSynchronize(stop),__LINE__);
safe_call(hipEventElapsedTime(&diff,start,stop),__LINE__);
time = diff*1.0e-3;
bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * time ) ;
printf("CUDA Memcpy D2H Bandwidth = %f GB/s\n",bandwidth);
for(i=0;i<SIZE;i++)
assert(h_A[i]==h_B[i]);
safe_call(hipFree(d_A),__LINE__);
safe_call(hipFree(d_B),__LINE__);
free(h_A);
free(h_B);
}
else //if memory mode = pinned
{
safe_call(hipHostMalloc((void **)&h_A, SIZE*sizeof(double)),__LINE__);
safe_call(hipHostMalloc((void **)&h_B, SIZE*sizeof(double)),__LINE__);
safe_call(hipMalloc((void **)&d_A, SIZE*sizeof(double)),__LINE__);
safe_call(hipMalloc((void **)&d_B, SIZE*sizeof(double)),__LINE__);
fill_mat(h_A,SIZE);
printf("Pinned Memory\n");
gettimeofday(&tv, &tz);
time_start = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0;
memcpy((void *)h_B, (void *)h_A, SIZE*sizeof(double));
gettimeofday(&tv, &tz);
time_end = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0;
bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * ( time_end - time_start ) ) ;
printf("CPU Memcpy H2H Bandwidth = %f GB/s\n",bandwidth);
safe_call(hipEventRecord(start, 0),__LINE__);
safe_call(hipMemcpyAsync((void *)d_A, (void *)h_A, SIZE*sizeof(double), hipMemcpyHostToDevice, 0),__LINE__);
safe_call(hipEventRecord(stop, 0),__LINE__);
safe_call(hipEventSynchronize(stop),__LINE__);
safe_call(hipEventElapsedTime(&diff,start,stop),__LINE__);
time = diff*1.0e-3;
bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * time ) ;
printf("CUDA Memcpy H2D Bandwidth = %f GB/s\n",bandwidth);
safe_call(hipEventRecord(start, 0),__LINE__);
safe_call(hipMemcpyAsync((void *)d_B, (void *)d_A, SIZE*sizeof(double), hipMemcpyDeviceToDevice, 0),__LINE__);
safe_call(hipEventRecord(stop, 0),__LINE__);
safe_call(hipEventSynchronize(stop),__LINE__);
safe_call(hipEventElapsedTime(&diff,start,stop),__LINE__);
time = diff*1.0e-3;
bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * time ) ;
printf("CUDA Memcpy D2D Bandwidth = %f GB/s\n",bandwidth);
safe_call(hipEventRecord(start, 0),__LINE__);
safe_call(hipMemcpyAsync((void *)h_B, (void *)d_B, SIZE*sizeof(double), hipMemcpyDeviceToHost, 0),__LINE__);
safe_call(hipEventRecord(stop, 0),__LINE__);
safe_call(hipEventSynchronize(stop),__LINE__);
safe_call(hipEventElapsedTime(&diff,start,stop),__LINE__);
time = diff*1.0e-3;
bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * time ) ;
printf("CUDA Memcpy D2H Bandwidth = %f GB/s\n",bandwidth);
for(i=0;i<SIZE;i++)
assert(h_A[i]==h_B[i]);
safe_call(hipFree(d_A),__LINE__);
safe_call(hipFree(d_B),__LINE__);
safe_call(hipHostFree(h_A),__LINE__);
safe_call(hipHostFree(h_B),__LINE__);
}
safe_call(hipEventDestroy(start),__LINE__);
safe_call(hipEventDestroy(stop),__LINE__);
return 0;
}
| 1eebdcfc41ea18a24c6b078e0859b1667734ea81.cu | #include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<sys/time.h>
#include<string.h>
#include<assert.h>
void safe_call(cudaError_t ret, int line)
{
if(ret!=cudaSuccess)
{
printf("Error at line %d : %s\n",line,cudaGetErrorString(ret));
exit(-1);
}
}
void fill_mat(double *arr, int len)
{
int i;
for(i=0;i<len;i++)
arr[i] = drand48();
}
int main(int argc, char **argv)
{
int SIZE, MODE, i; // 0=pageable 1=pinned
char memmode[10], tempmode[10];
if(argc<2 || argc>3)
{
printf("Syntax : exec -<memory mode> <size>\n");
exit(-1);
}
else if(argc==2)
{
MODE = 0;
SIZE = atoi(argv[1]);
}
else if(argc==3)
{
strcpy(tempmode,argv[1]);
i=0;
while(tempmode[i]=='-') { i++; }
if(i==0)
{
printf("Syntax : exec -<memory mode> <size>\n");
exit(-1);
}
strcpy(memmode,&tempmode[i]);
if(strcmp(memmode,"pinned") == 0)
MODE = 1;
else if(strcmp(memmode,"pageable") == 0)
MODE = 0;
else
{
printf("Memory modes pinned and pageable only\n");
exit(-1);
}
SIZE = atoi(argv[2]);
}
double *h_A, *h_B;
double *d_A, *d_B;
cudaEvent_t start, stop;
double time, bandwidth;
float diff;
double time_start, time_end;
struct timeval tv;
struct timezone tz;
safe_call(cudaEventCreate(&start),__LINE__);
safe_call(cudaEventCreate(&stop),__LINE__);
if(MODE==0) //if memory mode = pageable
{
h_A = (double *) malloc(SIZE*sizeof(double));
h_B = (double *) malloc(SIZE*sizeof(double));
if(h_A==NULL || h_B==NULL)
{
printf("Error : host memory allocation\n");
exit(-1);
}
safe_call(cudaMalloc((void **)&d_A, SIZE*sizeof(double)),__LINE__);
safe_call(cudaMalloc((void **)&d_B, SIZE*sizeof(double)),__LINE__);
fill_mat(h_A,SIZE);
printf("Pageable Memory\n");
gettimeofday(&tv, &tz);
time_start = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0;
memcpy((void *)h_B, (void *)h_A, SIZE*sizeof(double));
gettimeofday(&tv, &tz);
time_end = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0;
bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * ( time_end - time_start ) ) ;
printf("CPU Memcpy H2H Bandwidth = %f GB/s\n",bandwidth);
safe_call(cudaEventRecord(start, 0),__LINE__);
safe_call(cudaMemcpy((void *)d_A, (void *)h_A, SIZE*sizeof(double), cudaMemcpyHostToDevice),__LINE__);
safe_call(cudaEventRecord(stop, 0),__LINE__);
safe_call(cudaEventSynchronize(stop),__LINE__);
safe_call(cudaEventElapsedTime(&diff,start,stop),__LINE__);
time = diff*1.0e-3;
bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * time ) ;
printf("CUDA Memcpy H2D Bandwidth = %f GB/s\n",bandwidth);
safe_call(cudaEventRecord(start, 0),__LINE__);
safe_call(cudaMemcpy((void *)d_B, (void *)d_A, SIZE*sizeof(double), cudaMemcpyDeviceToDevice),__LINE__);
safe_call(cudaEventRecord(stop, 0),__LINE__);
safe_call(cudaEventSynchronize(stop),__LINE__);
safe_call(cudaEventElapsedTime(&diff,start,stop),__LINE__);
time = diff*1.0e-3;
bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * time ) ;
printf("CUDA Memcpy D2D Bandwidth = %f GB/s\n",bandwidth);
safe_call(cudaEventRecord(start, 0),__LINE__);
safe_call(cudaMemcpy((void *)h_B, (void *)d_B, SIZE*sizeof(double), cudaMemcpyDeviceToHost),__LINE__);
safe_call(cudaEventRecord(stop, 0),__LINE__);
safe_call(cudaEventSynchronize(stop),__LINE__);
safe_call(cudaEventElapsedTime(&diff,start,stop),__LINE__);
time = diff*1.0e-3;
bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * time ) ;
printf("CUDA Memcpy D2H Bandwidth = %f GB/s\n",bandwidth);
for(i=0;i<SIZE;i++)
assert(h_A[i]==h_B[i]);
safe_call(cudaFree(d_A),__LINE__);
safe_call(cudaFree(d_B),__LINE__);
free(h_A);
free(h_B);
}
else //if memory mode = pinned
{
safe_call(cudaMallocHost((void **)&h_A, SIZE*sizeof(double)),__LINE__);
safe_call(cudaMallocHost((void **)&h_B, SIZE*sizeof(double)),__LINE__);
safe_call(cudaMalloc((void **)&d_A, SIZE*sizeof(double)),__LINE__);
safe_call(cudaMalloc((void **)&d_B, SIZE*sizeof(double)),__LINE__);
fill_mat(h_A,SIZE);
printf("Pinned Memory\n");
gettimeofday(&tv, &tz);
time_start = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0;
memcpy((void *)h_B, (void *)h_A, SIZE*sizeof(double));
gettimeofday(&tv, &tz);
time_end = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0;
bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * ( time_end - time_start ) ) ;
printf("CPU Memcpy H2H Bandwidth = %f GB/s\n",bandwidth);
safe_call(cudaEventRecord(start, 0),__LINE__);
safe_call(cudaMemcpyAsync((void *)d_A, (void *)h_A, SIZE*sizeof(double), cudaMemcpyHostToDevice, 0),__LINE__);
safe_call(cudaEventRecord(stop, 0),__LINE__);
safe_call(cudaEventSynchronize(stop),__LINE__);
safe_call(cudaEventElapsedTime(&diff,start,stop),__LINE__);
time = diff*1.0e-3;
bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * time ) ;
printf("CUDA Memcpy H2D Bandwidth = %f GB/s\n",bandwidth);
safe_call(cudaEventRecord(start, 0),__LINE__);
safe_call(cudaMemcpyAsync((void *)d_B, (void *)d_A, SIZE*sizeof(double), cudaMemcpyDeviceToDevice, 0),__LINE__);
safe_call(cudaEventRecord(stop, 0),__LINE__);
safe_call(cudaEventSynchronize(stop),__LINE__);
safe_call(cudaEventElapsedTime(&diff,start,stop),__LINE__);
time = diff*1.0e-3;
bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * time ) ;
printf("CUDA Memcpy D2D Bandwidth = %f GB/s\n",bandwidth);
safe_call(cudaEventRecord(start, 0),__LINE__);
safe_call(cudaMemcpyAsync((void *)h_B, (void *)d_B, SIZE*sizeof(double), cudaMemcpyDeviceToHost, 0),__LINE__);
safe_call(cudaEventRecord(stop, 0),__LINE__);
safe_call(cudaEventSynchronize(stop),__LINE__);
safe_call(cudaEventElapsedTime(&diff,start,stop),__LINE__);
time = diff*1.0e-3;
bandwidth = ( SIZE * sizeof(double) * 2.0 ) / ( 1024 * 1024 * 1024 * time ) ;
printf("CUDA Memcpy D2H Bandwidth = %f GB/s\n",bandwidth);
for(i=0;i<SIZE;i++)
assert(h_A[i]==h_B[i]);
safe_call(cudaFree(d_A),__LINE__);
safe_call(cudaFree(d_B),__LINE__);
safe_call(cudaFreeHost(h_A),__LINE__);
safe_call(cudaFreeHost(h_B),__LINE__);
}
safe_call(cudaEventDestroy(start),__LINE__);
safe_call(cudaEventDestroy(stop),__LINE__);
return 0;
}
|
9c85d9e15060c5afd1a61ae22732eb8c03a5ea0b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2020, American University of Beirut
// See LICENSE.txt for copyright license
#include "graph.h"
#include <assert.h>
#include <stdio.h>
struct COOGraph* createEmptyCOO(unsigned int numNodes, unsigned int capacity) {
struct COOGraph* cooGraph = (COOGraph*) malloc(sizeof(COOGraph));
cooGraph->numNodes = numNodes;
cooGraph->numEdges = 0;
cooGraph->capacity = capacity;
cooGraph->srcIdx = (unsigned int*) malloc(cooGraph->capacity*sizeof(unsigned int));
cooGraph->dstIdx = (unsigned int*) malloc(cooGraph->capacity*sizeof(unsigned int));
return cooGraph;
}
struct COOGraph* createEmptyCOOOnDevice(unsigned int numNodes, unsigned int capacity) {
struct COOGraph g_shd;
g_shd.numNodes = numNodes;
g_shd.numEdges = 0;
g_shd.capacity = capacity;
hipMalloc((void**) &g_shd.srcIdx, g_shd.capacity*sizeof(unsigned int));
hipMalloc((void**) &g_shd.dstIdx, g_shd.capacity*sizeof(unsigned int));
struct COOGraph* g_d;
hipMalloc((void**) &g_d, sizeof(COOGraph));
hipMemcpy(g_d, &g_shd, sizeof(COOGraph), hipMemcpyHostToDevice);
return g_d;
}
struct COOGraph* createCOOFromFile(const char* fileName) {
// Allocate
struct COOGraph* cooGraph = (COOGraph*) malloc(sizeof(COOGraph));
cooGraph->capacity = 1 << 20;
cooGraph->srcIdx = (unsigned int*) malloc(cooGraph->capacity*sizeof(unsigned int));
cooGraph->dstIdx = (unsigned int*) malloc(cooGraph->capacity*sizeof(unsigned int));
// Read edges
FILE* fp = fopen(fileName, "r");
assert(fp != NULL);
unsigned int numNodes = 0;
unsigned int numEdges = 0;
unsigned int src, dst, x;
while(fscanf(fp, "%u", &dst) == 1) {
assert(fscanf(fp, "%u", &src));
assert(fscanf(fp, "%u", &x));
assert(src != dst && "Edges from a vertex to itself are not allowed!");
if(numEdges == cooGraph->capacity) {
cooGraph->capacity = 2*cooGraph->capacity;
cooGraph->srcIdx = (unsigned int*) realloc(cooGraph->srcIdx, cooGraph->capacity*sizeof(unsigned int));
cooGraph->dstIdx = (unsigned int*) realloc(cooGraph->dstIdx, cooGraph->capacity*sizeof(unsigned int));
}
cooGraph->srcIdx[numEdges] = src - 1;
cooGraph->dstIdx[numEdges] = dst - 1;
++numEdges;
if(src > numNodes) {
numNodes = src;
}
if(dst > numNodes) {
numNodes = dst;
}
}
// Update counts
cooGraph->numNodes = numNodes;
cooGraph->numEdges = numEdges;
fclose(fp);
return cooGraph;
}
void freeCOOGraph(struct COOGraph* cooGraph) {
free(cooGraph->srcIdx);
free(cooGraph->dstIdx);
free(cooGraph);
}
void freeCOOGraphOnDevice(struct COOGraph* g_d) {
struct COOGraph g_shd;
hipMemcpy(&g_shd, g_d, sizeof(COOGraph), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipFree(g_shd.srcIdx);
hipFree(g_shd.dstIdx);
hipFree(g_d);
}
void writeCOOGraphToFile(COOGraph* cooGraph, const char* fileName) {
FILE* fp = fopen(fileName, "w");
for(unsigned int e = 0; e < cooGraph->numEdges; ++e) {
fprintf(fp, "%u\t", cooGraph->dstIdx[e] + 1);
fprintf(fp, "%u\t", cooGraph->srcIdx[e] + 1);
fprintf(fp, "%u\n", 1);
}
fclose(fp);
}
void copyCOOToDevice(struct COOGraph* g, struct COOGraph* g_d) {
struct COOGraph g_shd;
hipMemcpy(&g_shd, g_d, sizeof(COOGraph), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
assert(g_shd.numNodes == g->numNodes);
assert(g_shd.capacity >= g->numEdges);
hipMemcpy(&g_d->numEdges, &g->numEdges, sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(g_shd.srcIdx, g->srcIdx, g->numEdges*sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(g_shd.dstIdx, g->dstIdx, g->numEdges*sizeof(unsigned int), hipMemcpyHostToDevice);
}
void copyCOOFromDevice(struct COOGraph* g_d, struct COOGraph* g) {
struct COOGraph g_shd;
hipMemcpy(&g_shd, g_d, sizeof(COOGraph), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
assert(g->numNodes == g_shd.numNodes);
assert(g->capacity >= g_shd.numEdges);
g->numEdges = g_shd.numEdges;
hipMemcpy(g->srcIdx, g_shd.srcIdx, g->numEdges*sizeof(unsigned int), hipMemcpyDeviceToHost);
hipMemcpy(g->dstIdx, g_shd.dstIdx, g->numEdges*sizeof(unsigned int), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
}
void quicksort(unsigned int *key, unsigned int* data, unsigned int start, unsigned int end) {
if((end - start + 1) > 1) {
unsigned int left = start, right = end;
unsigned int pivot = key[right];
while(left <= right) {
while(key[left] < pivot) {
left = left + 1;
}
while(key[right] > pivot) {
right = right - 1;
}
if(left <= right) {
unsigned int tmpKey = key[left]; key[left] = key[right]; key[right] = tmpKey;
unsigned int tmpData = data[left]; data[left] = data[right]; data[right] = tmpData;
left = left + 1;
right = right - 1;
}
}
quicksort(key, data, start, right);
quicksort(key, data, left, end);
}
}
void sortByDegree(COOGraph* graph, unsigned int* new2old) {
// Initialize permutation vector
for(unsigned int i = 0; i < graph->numNodes; ++i) {
new2old[i] = i;
}
// Find degree of each node
unsigned int* degree = (unsigned int*) calloc(graph->numNodes, sizeof(unsigned int));
for(unsigned int e = 0; e < graph->numEdges; ++e) {
degree[graph->srcIdx[e]]++;
}
// Sort nodes by degree
quicksort(degree, new2old, 0, graph->numNodes - 1);
// Find inverse permutation
unsigned int* old2new = (unsigned int*) malloc(graph->numNodes*sizeof(unsigned int));
for(unsigned int newIdx = 0; newIdx < graph->numNodes; ++newIdx) {
unsigned int oldIdx = new2old[newIdx];
old2new[oldIdx] = newIdx;
}
// Update edges
for(unsigned int e = 0; e < graph->numEdges; ++e) {
graph->srcIdx[e] = old2new[graph->srcIdx[e]];
graph->dstIdx[e] = old2new[graph->dstIdx[e]];
}
// Free intermediate data
free(degree);
free(old2new);
}
void unsort(COOGraph* graph, unsigned int* new2old) {
for(unsigned int e = 0; e < graph->numEdges; ++e) {
graph->srcIdx[e] = new2old[graph->srcIdx[e]];
graph->dstIdx[e] = new2old[graph->dstIdx[e]];
}
}
void undirected2directedCOO(struct COOGraph* gundirected, struct COOGraph* gdirected) {
assert(gdirected->numNodes == gundirected->numNodes);
gdirected->numEdges = 0;
for(unsigned int e = 0; e < gundirected->numEdges; ++e) {
unsigned int src = gundirected->srcIdx[e];
unsigned int dst = gundirected->dstIdx[e];
if(src < dst) {
unsigned int eout = gdirected->numEdges++;
assert(eout < gdirected->capacity);
gdirected->srcIdx[eout] = src;
gdirected->dstIdx[eout] = dst;
}
}
}
__global__ void undirected2directedCOO_kernel(struct COOGraph* gundirected, struct COOGraph* gdirected) {
unsigned int e = blockIdx.x*blockDim.x + threadIdx.x;
if(e < gundirected->numEdges) {
unsigned int src = gundirected->srcIdx[e];
unsigned int dst = gundirected->dstIdx[e];
if(src < dst) {
unsigned int eout = atomicAdd(&gdirected->numEdges, 1);
gdirected->srcIdx[eout] = src;
gdirected->dstIdx[eout] = dst;
}
}
}
void undirected2directedCOOOnDevice(struct COOGraph* gundirected_d, struct COOGraph* gdirected_d) {
// Copy shadows from device
COOGraph gundirected_shd;
COOGraph gdirected_shd;
hipMemcpy(&gundirected_shd, gundirected_d, sizeof(COOGraph), hipMemcpyDeviceToHost);
hipMemcpy(&gdirected_shd, gdirected_d, sizeof(COOGraph), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
// Convert
assert(gdirected_shd.numNodes == gundirected_shd.numNodes);
assert(gdirected_shd.capacity >= gundirected_shd.numEdges/2);
hipMemset(&gdirected_d->numEdges, 0, sizeof(unsigned int));
hipLaunchKernelGGL(( undirected2directedCOO_kernel) , dim3((gundirected_shd.numEdges + 1024 - 1)/1024), dim3(1024) , 0, 0, gundirected_d, gdirected_d);
}
void directed2undirectedCOO(struct COOGraph* g) {
unsigned int numDirectedEdges = g->numEdges;
for(unsigned int e = 0; e < numDirectedEdges; ++e) {
unsigned int src = g->srcIdx[e];
unsigned int dst = g->dstIdx[e];
if(src != dst) {
unsigned int eout = g->numEdges++;
assert(eout < g->capacity);
g->srcIdx[eout] = dst;
g->dstIdx[eout] = src;
}
}
}
__global__ void directed2undirectedCOO_kernel(struct COOGraph* g, unsigned int numDirectedEdges) {
unsigned int e = blockIdx.x*blockDim.x + threadIdx.x;
if(e < numDirectedEdges) {
unsigned int src = g->srcIdx[e];
unsigned int dst = g->dstIdx[e];
if(src != dst) {
unsigned int eout = atomicAdd(&g->numEdges, 1);
g->srcIdx[eout] = dst;
g->dstIdx[eout] = src;
}
}
}
void directed2undirectedCOOOnDevice(struct COOGraph* g_d) {
// Copy shadow from device
COOGraph g_shd;
hipMemcpy(&g_shd, g_d, sizeof(COOGraph), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
// Convert
assert(g_shd.capacity >= 2*g_shd.numEdges);
hipLaunchKernelGGL(( directed2undirectedCOO_kernel) , dim3((g_shd.numEdges + 1024 - 1)/1024), dim3(1024) , 0, 0, g_d, g_shd.numEdges);
}
void quicksort(unsigned int *key, unsigned int start, unsigned int end) {
if((end - start + 1) > 1) {
unsigned int left = start, right = end;
unsigned int pivot = key[right];
while(left <= right) {
while(key[left] < pivot) {
left = left + 1;
}
while(key[right] > pivot) {
right = right - 1;
}
if(left <= right) {
unsigned int tmpKey = key[left]; key[left] = key[right]; key[right] = tmpKey;
left = left + 1;
right = right - 1;
}
}
quicksort(key, start, right);
quicksort(key, left, end);
}
}
struct COOCSRGraph* createEmptyCOOCSR(unsigned int numNodes, unsigned int capacity) {
struct COOCSRGraph* graph = (COOCSRGraph*) malloc(sizeof(COOCSRGraph));
graph->numNodes = numNodes;
graph->numEdges = 0;
graph->capacity = capacity;
graph->srcPtr = (unsigned int*) malloc((graph->numNodes + 1)*sizeof(unsigned int));
graph->srcIdx = (unsigned int*) malloc(graph->capacity*sizeof(unsigned int));
graph->dstIdx = (unsigned int*) malloc(graph->capacity*sizeof(unsigned int));
return graph;
}
struct COOCSRGraph* createEmptyCOOCSROnDevice(unsigned int numNodes, unsigned int capacity) {
struct COOCSRGraph g_shd;
g_shd.numNodes = numNodes;
g_shd.numEdges = 0;
g_shd.capacity = capacity;
hipMalloc((void**) &g_shd.srcPtr, (g_shd.numNodes + 1)*sizeof(unsigned int));
hipMalloc((void**) &g_shd.srcIdx, g_shd.capacity*sizeof(unsigned int));
hipMalloc((void**) &g_shd.dstIdx, g_shd.capacity*sizeof(unsigned int));
struct COOCSRGraph* g_d;
hipMalloc((void**) &g_d, sizeof(COOCSRGraph));
hipMemcpy(g_d, &g_shd, sizeof(COOCSRGraph), hipMemcpyHostToDevice);
return g_d;
}
void freeCOOCSRGraph(struct COOCSRGraph* graph) {
free(graph->srcPtr);
free(graph->srcIdx);
free(graph->dstIdx);
free(graph);
}
void freeCOOCSRGraphOnDevice(struct COOCSRGraph* g_d) {
struct COOCSRGraph g_shd;
hipMemcpy(&g_shd, g_d, sizeof(COOCSRGraph), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipFree(g_shd.srcPtr);
hipFree(g_shd.srcIdx);
hipFree(g_shd.dstIdx);
hipFree(g_d);
}
void copyCOOCSRToDevice(struct COOCSRGraph* g, struct COOCSRGraph* g_d) {
struct COOCSRGraph g_shd;
hipMemcpy(&g_shd, g_d, sizeof(COOCSRGraph), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
assert(g_shd.numNodes == g->numNodes);
assert(g_shd.capacity >= g->numEdges);
hipMemcpy(&g_d->numEdges, &g->numEdges, sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(g_shd.srcPtr, g->srcPtr, (g->numNodes + 1)*sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(g_shd.srcIdx, g->srcIdx, g->numEdges*sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(g_shd.dstIdx, g->dstIdx, g->numEdges*sizeof(unsigned int), hipMemcpyHostToDevice);
}
void copyCOOCSRFromDevice(struct COOCSRGraph* g_d, struct COOCSRGraph* g) {
struct COOCSRGraph g_shd;
hipMemcpy(&g_shd, g_d, sizeof(COOCSRGraph), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
assert(g->numNodes == g_shd.numNodes);
assert(g->capacity >= g_shd.numEdges);
g->numEdges = g_shd.numEdges;
hipMemcpy(g->srcPtr, g_shd.srcPtr, (g->numNodes + 1)*sizeof(unsigned int), hipMemcpyDeviceToHost);
hipMemcpy(g->srcIdx, g_shd.srcIdx, g->numEdges*sizeof(unsigned int), hipMemcpyDeviceToHost);
hipMemcpy(g->dstIdx, g_shd.dstIdx, g->numEdges*sizeof(unsigned int), hipMemcpyDeviceToHost);
}
void coo2coocsr(struct COOGraph* cooGraph, struct COOCSRGraph* graph) {
// Initialize
unsigned int numNodes = cooGraph->numNodes;
assert(graph->numNodes == numNodes);
unsigned int numEdges = cooGraph->numEdges;
assert(graph->capacity >= numEdges);
graph->numEdges = numEdges;
// Histogram
// NOTE: (src + 1) used instead of (src) because it will get shifted by the binning operation
memset(graph->srcPtr, 0, (numNodes + 1)*sizeof(unsigned int));
for(unsigned int e = 0; e < numEdges; ++e) {
unsigned int src = cooGraph->srcIdx[e];
graph->srcPtr[src + 1]++;
}
// Prefix sum
unsigned int sum = 0;
for(unsigned int src = 0; src < numNodes; ++src) {
unsigned int val = graph->srcPtr[src + 1];
graph->srcPtr[src + 1] = sum;
sum += val;
}
// Binning
for(unsigned int e = 0; e < numEdges; ++e) {
unsigned int src = cooGraph->srcIdx[e];
unsigned int j = graph->srcPtr[src + 1]++;
graph->srcIdx[j] = src;
graph->dstIdx[j] = cooGraph->dstIdx[e];
}
// Sort outgoing edges of each source node
for(unsigned int src = 0; src < numNodes; ++src) {
unsigned int start = graph->srcPtr[src];
unsigned int end = graph->srcPtr[src + 1] - 1;
quicksort(graph->dstIdx, start, end); // NOTE: No need to sort srcIdx because they are all the same
}
}
#include <thrust/execution_policy.h>
#include <thrust/scan.h>
__global__ void histogram_kernel(unsigned int* data, unsigned int* bins, unsigned int N) {
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < N) {
unsigned int val = data[i];
atomicAdd(&bins[val], 1);
}
}
__global__ void binning_kernel(COOGraph* cooGraph, COOCSRGraph* graph) {
unsigned int e = blockIdx.x*blockDim.x + threadIdx.x;
if(e < cooGraph->numEdges) {
unsigned int src = cooGraph->srcIdx[e];
unsigned int j = atomicAdd(&graph->srcPtr[src + 1], 1);
graph->srcIdx[j] = src;
graph->dstIdx[j] = cooGraph->dstIdx[e];
}
}
void coo2coocsrOnDevice(struct COOGraph* cooGraph_d, struct COOCSRGraph* graph_d) {
// Copy shadows from device
COOGraph cooGraph_shd;
COOCSRGraph graph_shd;
hipMemcpy(&cooGraph_shd, cooGraph_d, sizeof(COOGraph), hipMemcpyDeviceToHost);
hipMemcpy(&graph_shd, graph_d, sizeof(COOCSRGraph), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
// Initialize
unsigned int numNodes = cooGraph_shd.numNodes;
assert(graph_shd.numNodes == numNodes);
unsigned int numEdges = cooGraph_shd.numEdges;
assert(graph_shd.capacity >= numEdges);
hipMemcpy(&graph_d->numEdges, &cooGraph_shd.numEdges, sizeof(unsigned int), hipMemcpyHostToDevice);
// Histogram
// NOTE: (srcPtr + 1) used instead of (srcPtr) because it will get shifted by the binning operation
hipMemset(graph_shd.srcPtr, 0, (numNodes + 1)*sizeof(unsigned int));
hipLaunchKernelGGL(( histogram_kernel) , dim3((numEdges + 1024 - 1)/1024), dim3(1024) , 0, 0, cooGraph_shd.srcIdx, graph_shd.srcPtr + 1, numEdges);
// Prefix sum
thrust::exclusive_scan(thrust::device, graph_shd.srcPtr + 1, graph_shd.srcPtr + numNodes + 1, graph_shd.srcPtr + 1);
// Binning
hipLaunchKernelGGL(( binning_kernel) , dim3((numEdges + 1024 - 1)/1024), dim3(1024) , 0, 0, cooGraph_d, graph_d);
// Sort outgoing edges of each source node (on CPU)
// TODO: Implement sorting on GPU
unsigned int* srcPtr = (unsigned int*) malloc((numNodes + 1)*sizeof(unsigned int));
unsigned int* dstIdx = (unsigned int*) malloc(numEdges*sizeof(unsigned int));
hipMemcpy(srcPtr, graph_shd.srcPtr, (numNodes + 1)*sizeof(unsigned int), hipMemcpyDeviceToHost);
hipMemcpy(dstIdx, graph_shd.dstIdx, numEdges*sizeof(unsigned int), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
for(unsigned int src = 0; src < numNodes; ++src) {
unsigned int start = srcPtr[src];
unsigned int end = srcPtr[src + 1] - 1;
quicksort(dstIdx, start, end); // NOTE: No need to sort srcIdx because they are all the same
}
hipMemcpy(graph_shd.dstIdx, dstIdx, numEdges*sizeof(unsigned int), hipMemcpyHostToDevice);
free(srcPtr);
free(dstIdx);
}
void removeCOOCSRDeletedEdges(struct COOCSRGraph* g) {
// Compact edges
unsigned int oldNumEdges = g->numEdges;
g->numEdges = 0;
for(unsigned int e = 0; e < oldNumEdges; ++e) {
if(g->dstIdx[e] != DELETED) {
g->srcIdx[g->numEdges] = g->srcIdx[e];
g->dstIdx[g->numEdges] = g->dstIdx[e];
g->numEdges++;
}
}
// Histogram
memset(g->srcPtr, 0, (g->numNodes + 1)*sizeof(unsigned int));
for(unsigned int e = 0; e < g->numEdges; ++e) {
unsigned int src = g->srcIdx[e];
g->srcPtr[src]++;
}
// Prefix sum
unsigned int sum = 0;
for(unsigned int src = 0; src < g->numNodes; ++src) {
unsigned int val = g->srcPtr[src];
g->srcPtr[src] = sum;
sum += val;
}
g->srcPtr[g->numNodes] = sum;
}
__global__ void mark_deleted_srcs_kernel(COOCSRGraph* g) {
unsigned int e = blockIdx.x*blockDim.x + threadIdx.x;
if(e < g->numEdges) {
if(g->dstIdx[e] == DELETED) {
g->srcIdx[e] = DELETED;
}
}
}
void removeCOOCSRDeletedEdgesOnDevice(struct COOCSRGraph* g_d) {
// Copy shadow
COOCSRGraph g_shd;
hipMemcpy(&g_shd, g_d, sizeof(COOCSRGraph), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
// Mark deleted sources
hipLaunchKernelGGL(( mark_deleted_srcs_kernel) , dim3((g_shd.numEdges + 1024 - 1)/1024), dim3(1024) , 0, 0, g_d);
// Compact edges
unsigned int* endSrcIdx = thrust::remove(thrust::device, g_shd.srcIdx, g_shd.srcIdx + g_shd.numEdges, DELETED);
unsigned int* endDstIdx = thrust::remove(thrust::device, g_shd.dstIdx, g_shd.dstIdx + g_shd.numEdges, DELETED);
assert(endSrcIdx - g_shd.srcIdx == endDstIdx - g_shd.dstIdx);
g_shd.numEdges = endSrcIdx - g_shd.srcIdx;
hipMemcpy(&g_d->numEdges, &g_shd.numEdges, sizeof(unsigned int), hipMemcpyHostToDevice);
// Histogram
unsigned int numNodes = g_shd.numNodes;
unsigned int numEdges = g_shd.numEdges;
hipMemset(g_shd.srcPtr, 0, (numNodes + 1)*sizeof(unsigned int));
hipLaunchKernelGGL(( histogram_kernel) , dim3((numEdges + 1024 - 1)/1024), dim3(1024) , 0, 0, g_shd.srcIdx, g_shd.srcPtr, numEdges);
// Prefix sum
thrust::exclusive_scan(thrust::device, g_shd.srcPtr, g_shd.srcPtr + numNodes + 1, g_shd.srcPtr);
}
void coocsr2coo(struct COOCSRGraph* in, struct COOGraph* out) {
assert(out->numNodes == in->numNodes);
assert(out->capacity >= in->numEdges);
out->numEdges = in->numEdges;
memcpy(out->srcIdx, in->srcIdx, in->numEdges*sizeof(unsigned int));
memcpy(out->dstIdx, in->dstIdx, in->numEdges*sizeof(unsigned int));
}
void coocsr2cooOnDevice(struct COOCSRGraph* in_d, struct COOGraph* out_d) {
// Copy shadows from device
COOCSRGraph in_shd;
COOGraph out_shd;
hipMemcpy(&in_shd, in_d, sizeof(COOCSRGraph), hipMemcpyDeviceToHost);
hipMemcpy(&out_shd, out_d, sizeof(COOGraph), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
// Convert
assert(out_shd.numNodes == in_shd.numNodes);
assert(out_shd.capacity >= in_shd.numEdges);
hipMemcpy(&out_d->numEdges, &in_shd.numEdges, sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(out_shd.srcIdx, in_shd.srcIdx, in_shd.numEdges*sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(out_shd.dstIdx, in_shd.dstIdx, in_shd.numEdges*sizeof(unsigned int), hipMemcpyHostToDevice);
}
struct TiledCOOCSRGraph* createEmptyTiledCOOCSR(unsigned int numNodes, unsigned int tilesPerDim, unsigned int capacity) {
struct TiledCOOCSRGraph* graph = (TiledCOOCSRGraph*) malloc(sizeof(TiledCOOCSRGraph));
graph->numNodes = numNodes;
graph->numEdges = 0;
graph->capacity = capacity;
graph->tilesPerDim = tilesPerDim;
graph->tileSize = (numNodes + tilesPerDim - 1)/tilesPerDim;
unsigned int numTileSrcPtrs = tilesPerDim*tilesPerDim*graph->tileSize + 1;
graph->tileSrcPtr = (unsigned int*) malloc(numTileSrcPtrs*sizeof(unsigned int));
graph->srcIdx = (unsigned int*) malloc(graph->capacity*sizeof(unsigned int));
graph->dstIdx = (unsigned int*) malloc(graph->capacity*sizeof(unsigned int));
return graph;
}
struct TiledCOOCSRGraph* createEmptyTiledCOOCSROnDevice(unsigned int numNodes, unsigned int tilesPerDim, unsigned int capacity) {
struct TiledCOOCSRGraph g_shd;
g_shd.numNodes = numNodes;
g_shd.numEdges = 0;
g_shd.capacity = capacity;
g_shd.tilesPerDim = tilesPerDim;
g_shd.tileSize = (numNodes + tilesPerDim - 1)/tilesPerDim;
unsigned int numTileSrcPtrs = tilesPerDim*tilesPerDim*g_shd.tileSize + 1;
hipMalloc((void**) &g_shd.tileSrcPtr, numTileSrcPtrs*sizeof(unsigned int));
hipMalloc((void**) &g_shd.srcIdx, g_shd.capacity*sizeof(unsigned int));
hipMalloc((void**) &g_shd.dstIdx, g_shd.capacity*sizeof(unsigned int));
struct TiledCOOCSRGraph* g_d;
hipMalloc((void**) &g_d, sizeof(TiledCOOCSRGraph));
hipMemcpy(g_d, &g_shd, sizeof(TiledCOOCSRGraph), hipMemcpyHostToDevice);
return g_d;
}
void freeTiledCOOCSRGraph(struct TiledCOOCSRGraph* graph) {
free(graph->tileSrcPtr);
free(graph->srcIdx);
free(graph->dstIdx);
free(graph);
}
void freeTiledCOOCSRGraphOnDevice(struct TiledCOOCSRGraph* g_d) {
struct TiledCOOCSRGraph g_shd;
hipMemcpy(&g_shd, g_d, sizeof(TiledCOOCSRGraph), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipFree(g_shd.tileSrcPtr);
hipFree(g_shd.srcIdx);
hipFree(g_shd.dstIdx);
hipFree(g_d);
}
void copyTiledCOOCSRToDevice(struct TiledCOOCSRGraph* g, struct TiledCOOCSRGraph* g_d) {
struct TiledCOOCSRGraph g_shd;
hipMemcpy(&g_shd, g_d, sizeof(TiledCOOCSRGraph), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
assert(g_shd.numNodes == g->numNodes);
assert(g_shd.capacity >= g->numEdges);
assert(g_shd.tilesPerDim == g->tilesPerDim);
assert(g_shd.tileSize == g->tileSize);
unsigned int numTileSrcPtrs = g->tilesPerDim*g->tilesPerDim*g->tileSize + 1;
hipMemcpy(&g_d->numEdges, &g->numEdges, sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(g_shd.tileSrcPtr, g->tileSrcPtr, numTileSrcPtrs*sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(g_shd.srcIdx, g->srcIdx, g->numEdges*sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(g_shd.dstIdx, g->dstIdx, g->numEdges*sizeof(unsigned int), hipMemcpyHostToDevice);
}
void copyTiledCOOCSRFromDevice(struct TiledCOOCSRGraph* g_d, struct TiledCOOCSRGraph* g) {
struct TiledCOOCSRGraph g_shd;
hipMemcpy(&g_shd, g_d, sizeof(TiledCOOCSRGraph), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
assert(g->numNodes == g_shd.numNodes);
assert(g->capacity >= g_shd.numEdges);
assert(g->tilesPerDim == g_shd.tilesPerDim);
assert(g->tileSize == g_shd.tileSize);
g->numEdges = g_shd.numEdges;
unsigned int numTileSrcPtrs = g->tilesPerDim*g->tilesPerDim*g->tileSize + 1;
hipMemcpy(g->tileSrcPtr, g_shd.tileSrcPtr, numTileSrcPtrs*sizeof(unsigned int), hipMemcpyDeviceToHost);
hipMemcpy(g->srcIdx, g_shd.srcIdx, g->numEdges*sizeof(unsigned int), hipMemcpyDeviceToHost);
hipMemcpy(g->dstIdx, g_shd.dstIdx, g->numEdges*sizeof(unsigned int), hipMemcpyDeviceToHost);
}
void coo2tiledcoocsr(struct COOGraph* cooGraph, struct TiledCOOCSRGraph* graph) {
// Initialize
unsigned int numNodes = cooGraph->numNodes;
assert(graph->numNodes == numNodes);
unsigned int numEdges = cooGraph->numEdges;
assert(graph->capacity >= numEdges);
graph->numEdges = numEdges;
// Histogram
// NOTE: (tileSrc + 1) used instead of (tileSrc) because it will get shifted by the binning operation
unsigned int tileSize = graph->tileSize;
unsigned int tilesPerDim = graph->tilesPerDim;
unsigned int numTileSrcPtrs = tilesPerDim*tilesPerDim*tileSize;
memset(graph->tileSrcPtr, 0, (numTileSrcPtrs + 1)*sizeof(unsigned int));
for(unsigned int e = 0; e < numEdges; ++e) {
unsigned int src = cooGraph->srcIdx[e];
unsigned int dst = cooGraph->dstIdx[e];
unsigned int srcTile = src/tileSize;
unsigned int dstTile = dst/tileSize;
unsigned int tileSrc = (srcTile*tilesPerDim + dstTile)*tileSize + src%tileSize;
graph->tileSrcPtr[tileSrc + 1]++;
}
// Prefix sum
unsigned int sum = 0;
for(unsigned int tileSrc = 0; tileSrc < numTileSrcPtrs; ++tileSrc) {
unsigned int val = graph->tileSrcPtr[tileSrc + 1];
graph->tileSrcPtr[tileSrc + 1] = sum;
sum += val;
}
// Binning
for(unsigned int e = 0; e < numEdges; ++e) {
unsigned int src = cooGraph->srcIdx[e];
unsigned int dst = cooGraph->dstIdx[e];
unsigned int srcTile = src/tileSize;
unsigned int dstTile = dst/tileSize;
unsigned int tileSrc = (srcTile*tilesPerDim + dstTile)*tileSize + src%tileSize;
unsigned int j = graph->tileSrcPtr[tileSrc + 1]++;
graph->srcIdx[j] = src;
graph->dstIdx[j] = cooGraph->dstIdx[e];
}
// Sort outgoing edges of each source node
for(unsigned int tileSrc = 0; tileSrc < numTileSrcPtrs; ++tileSrc) {
unsigned int start = graph->tileSrcPtr[tileSrc];
unsigned int end = graph->tileSrcPtr[tileSrc + 1] - 1;
quicksort(graph->dstIdx, start, end); // NOTE: No need to sort srcIdx because they are all the same
}
}
__global__ void histogram_tiled_kernel(COOGraph* cooGraph, TiledCOOCSRGraph* graph) {
unsigned int e = blockIdx.x*blockDim.x + threadIdx.x;
if(e < cooGraph->numEdges) {
unsigned int src = cooGraph->srcIdx[e];
unsigned int dst = cooGraph->dstIdx[e];
unsigned int tileSize = graph->tileSize;
unsigned int tilesPerDim = graph->tilesPerDim;
unsigned int tileSrc = (src/tileSize*tilesPerDim + dst/tileSize)*tileSize + src%tileSize;
atomicAdd(&graph->tileSrcPtr[tileSrc + 1], 1);
}
}
__global__ void binning_kernel(COOGraph* cooGraph, TiledCOOCSRGraph* graph) {
unsigned int e = blockIdx.x*blockDim.x + threadIdx.x;
if(e < cooGraph->numEdges) {
unsigned int src = cooGraph->srcIdx[e];
unsigned int dst = cooGraph->dstIdx[e];
unsigned int tileSize = graph->tileSize;
unsigned int tilesPerDim = graph->tilesPerDim;
unsigned int tileSrc = (src/tileSize*tilesPerDim + dst/tileSize)*tileSize + src%tileSize;
unsigned int j = atomicAdd(&graph->tileSrcPtr[tileSrc + 1], 1);
graph->srcIdx[j] = src;
graph->dstIdx[j] = cooGraph->dstIdx[e];
}
}
void coo2tiledcoocsrOnDevice(struct COOGraph* cooGraph_d, struct TiledCOOCSRGraph* graph_d) {
// Copy shadows from device
COOGraph cooGraph_shd;
TiledCOOCSRGraph graph_shd;
hipMemcpy(&cooGraph_shd, cooGraph_d, sizeof(COOGraph), hipMemcpyDeviceToHost);
hipMemcpy(&graph_shd, graph_d, sizeof(TiledCOOCSRGraph), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
// Initialize
unsigned int numNodes = cooGraph_shd.numNodes;
assert(graph_shd.numNodes == numNodes);
unsigned int numEdges = cooGraph_shd.numEdges;
assert(graph_shd.capacity >= numEdges);
hipMemcpy(&graph_d->numEdges, &cooGraph_shd.numEdges, sizeof(unsigned int), hipMemcpyHostToDevice);
// Histogram
// NOTE: (tileSrc + 1) used instead of (tileSrc) because it will get shifted by the binning operation
unsigned int tilesPerDim = graph_shd.tilesPerDim;
unsigned int numTileSrcPtrs = tilesPerDim*tilesPerDim*graph_shd.tileSize;
hipMemset(graph_shd.tileSrcPtr, 0, (numTileSrcPtrs + 1)*sizeof(unsigned int));
hipLaunchKernelGGL(( histogram_tiled_kernel) , dim3((numEdges + 1024 - 1)/1024), dim3(1024) , 0, 0, cooGraph_d, graph_d);
// Prefix sum
thrust::exclusive_scan(thrust::device, graph_shd.tileSrcPtr + 1, graph_shd.tileSrcPtr + numTileSrcPtrs + 1, graph_shd.tileSrcPtr + 1);
// Binning
hipLaunchKernelGGL(( binning_kernel) , dim3((numEdges + 1024 - 1)/1024), dim3(1024) , 0, 0, cooGraph_d, graph_d);
// Sort outgoing edges of each source node (on CPU)
// TODO: Implement sorting on GPU
unsigned int* tileSrcPtr = (unsigned int*) malloc((numTileSrcPtrs + 1)*sizeof(unsigned int));
unsigned int* dstIdx = (unsigned int*) malloc(numEdges*sizeof(unsigned int));
hipMemcpy(tileSrcPtr, graph_shd.tileSrcPtr, (numTileSrcPtrs + 1)*sizeof(unsigned int), hipMemcpyDeviceToHost);
hipMemcpy(dstIdx, graph_shd.dstIdx, numEdges*sizeof(unsigned int), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
for(unsigned int tileSrc = 0; tileSrc < numTileSrcPtrs; ++tileSrc) {
unsigned int start = tileSrcPtr[tileSrc];
unsigned int end = tileSrcPtr[tileSrc + 1] - 1;
quicksort(dstIdx, start, end); // NOTE: No need to sort srcIdx because they are all the same
}
hipMemcpy(graph_shd.dstIdx, dstIdx, numEdges*sizeof(unsigned int), hipMemcpyHostToDevice);
free(tileSrcPtr);
free(dstIdx);
}
void removeTiledCOOCSRDeletedEdges(struct TiledCOOCSRGraph* g) {
// Compact edges
unsigned int oldNumEdges = g->numEdges;
g->numEdges = 0;
for(unsigned int e = 0; e < oldNumEdges; ++e) {
if(g->dstIdx[e] != DELETED) {
g->srcIdx[g->numEdges] = g->srcIdx[e];
g->dstIdx[g->numEdges] = g->dstIdx[e];
g->numEdges++;
}
}
// Histogram
unsigned int tileSize = g->tileSize;
unsigned int tilesPerDim = g->tilesPerDim;
unsigned int numTileSrcPtrs = tilesPerDim*tilesPerDim*tileSize;
memset(g->tileSrcPtr, 0, (numTileSrcPtrs + 1)*sizeof(unsigned int));
for(unsigned int e = 0; e < g->numEdges; ++e) {
unsigned int src = g->srcIdx[e];
unsigned int dst = g->dstIdx[e];
unsigned int srcTile = src/tileSize;
unsigned int dstTile = dst/tileSize;
unsigned int tileSrc = (srcTile*tilesPerDim + dstTile)*tileSize + src%tileSize;
g->tileSrcPtr[tileSrc]++;
}
// Prefix sum
unsigned int sum = 0;
for(unsigned int tileSrc = 0; tileSrc < numTileSrcPtrs; ++tileSrc) {
unsigned int val = g->tileSrcPtr[tileSrc];
g->tileSrcPtr[tileSrc] = sum;
sum += val;
}
g->tileSrcPtr[numTileSrcPtrs] = sum;
}
__global__ void histogram_tiled_remove_kernel(TiledCOOCSRGraph* g) {
unsigned int e = blockIdx.x*blockDim.x + threadIdx.x;
if(e < g->numEdges) {
unsigned int src = g->srcIdx[e];
unsigned int dst = g->dstIdx[e];
unsigned int tileSize = g->tileSize;
unsigned int tilesPerDim = g->tilesPerDim;
unsigned int tileSrc = (src/tileSize*tilesPerDim + dst/tileSize)*tileSize + src%tileSize;
atomicAdd(&g->tileSrcPtr[tileSrc], 1);
}
}
__global__ void mark_deleted_srcs_tiled_kernel(TiledCOOCSRGraph* g) {
unsigned int e = blockIdx.x*blockDim.x + threadIdx.x;
if(e < g->numEdges) {
if(g->dstIdx[e] == DELETED) {
g->srcIdx[e] = DELETED;
}
}
}
void removeTiledCOOCSRDeletedEdgesOnDevice(struct TiledCOOCSRGraph* g_d) {
// Copy shadow
TiledCOOCSRGraph g_shd;
hipMemcpy(&g_shd, g_d, sizeof(TiledCOOCSRGraph), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
// Mark deleted sources
hipLaunchKernelGGL(( mark_deleted_srcs_tiled_kernel) , dim3((g_shd.numEdges + 1024 - 1)/1024), dim3(1024) , 0, 0, g_d);
// Compact edges
unsigned int* endSrcIdx = thrust::remove(thrust::device, g_shd.srcIdx, g_shd.srcIdx + g_shd.numEdges, DELETED);
unsigned int* endDstIdx = thrust::remove(thrust::device, g_shd.dstIdx, g_shd.dstIdx + g_shd.numEdges, DELETED);
assert(endSrcIdx - g_shd.srcIdx == endDstIdx - g_shd.dstIdx);
g_shd.numEdges = endSrcIdx - g_shd.srcIdx;
hipMemcpy(&g_d->numEdges, &g_shd.numEdges, sizeof(unsigned int), hipMemcpyHostToDevice);
// Histogram
unsigned int numEdges = g_shd.numEdges;
unsigned int tileSize = g_shd.tileSize;
unsigned int tilesPerDim = g_shd.tilesPerDim;
unsigned int numTileSrcPtrs = tilesPerDim*tilesPerDim*tileSize;
hipMemset(g_shd.tileSrcPtr, 0, (numTileSrcPtrs + 1)*sizeof(unsigned int));
hipLaunchKernelGGL(( histogram_tiled_remove_kernel) , dim3((numEdges + 1024 - 1)/1024), dim3(1024) , 0, 0, g_d);
// Prefix sum
thrust::exclusive_scan(thrust::device, g_shd.tileSrcPtr, g_shd.tileSrcPtr + numTileSrcPtrs + 1, g_shd.tileSrcPtr);
}
void tiledcoocsr2coo(struct TiledCOOCSRGraph* in, struct COOGraph* out) {
assert(out->numNodes == in->numNodes);
assert(out->capacity >= in->numEdges);
out->numEdges = in->numEdges;
memcpy(out->srcIdx, in->srcIdx, in->numEdges*sizeof(unsigned int));
memcpy(out->dstIdx, in->dstIdx, in->numEdges*sizeof(unsigned int));
}
void tiledcoocsr2cooOnDevice(struct TiledCOOCSRGraph* in_d, struct COOGraph* out_d) {
// Copy shadows from device
TiledCOOCSRGraph in_shd;
COOGraph out_shd;
hipMemcpy(&in_shd, in_d, sizeof(TiledCOOCSRGraph), hipMemcpyDeviceToHost);
hipMemcpy(&out_shd, out_d, sizeof(COOGraph), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
// Convert
assert(out_shd.numNodes == in_shd.numNodes);
assert(out_shd.capacity >= in_shd.numEdges);
hipMemcpy(&out_d->numEdges, &in_shd.numEdges, sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(out_shd.srcIdx, in_shd.srcIdx, in_shd.numEdges*sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(out_shd.dstIdx, in_shd.dstIdx, in_shd.numEdges*sizeof(unsigned int), hipMemcpyHostToDevice);
}
| 9c85d9e15060c5afd1a61ae22732eb8c03a5ea0b.cu |
// Copyright (c) 2020, American University of Beirut
// See LICENSE.txt for copyright license
#include "graph.h"
#include <assert.h>
#include <stdio.h>
struct COOGraph* createEmptyCOO(unsigned int numNodes, unsigned int capacity) {
struct COOGraph* cooGraph = (COOGraph*) malloc(sizeof(COOGraph));
cooGraph->numNodes = numNodes;
cooGraph->numEdges = 0;
cooGraph->capacity = capacity;
cooGraph->srcIdx = (unsigned int*) malloc(cooGraph->capacity*sizeof(unsigned int));
cooGraph->dstIdx = (unsigned int*) malloc(cooGraph->capacity*sizeof(unsigned int));
return cooGraph;
}
struct COOGraph* createEmptyCOOOnDevice(unsigned int numNodes, unsigned int capacity) {
struct COOGraph g_shd;
g_shd.numNodes = numNodes;
g_shd.numEdges = 0;
g_shd.capacity = capacity;
cudaMalloc((void**) &g_shd.srcIdx, g_shd.capacity*sizeof(unsigned int));
cudaMalloc((void**) &g_shd.dstIdx, g_shd.capacity*sizeof(unsigned int));
struct COOGraph* g_d;
cudaMalloc((void**) &g_d, sizeof(COOGraph));
cudaMemcpy(g_d, &g_shd, sizeof(COOGraph), cudaMemcpyHostToDevice);
return g_d;
}
struct COOGraph* createCOOFromFile(const char* fileName) {
// Allocate
struct COOGraph* cooGraph = (COOGraph*) malloc(sizeof(COOGraph));
cooGraph->capacity = 1 << 20;
cooGraph->srcIdx = (unsigned int*) malloc(cooGraph->capacity*sizeof(unsigned int));
cooGraph->dstIdx = (unsigned int*) malloc(cooGraph->capacity*sizeof(unsigned int));
// Read edges
FILE* fp = fopen(fileName, "r");
assert(fp != NULL);
unsigned int numNodes = 0;
unsigned int numEdges = 0;
unsigned int src, dst, x;
while(fscanf(fp, "%u", &dst) == 1) {
assert(fscanf(fp, "%u", &src));
assert(fscanf(fp, "%u", &x));
assert(src != dst && "Edges from a vertex to itself are not allowed!");
if(numEdges == cooGraph->capacity) {
cooGraph->capacity = 2*cooGraph->capacity;
cooGraph->srcIdx = (unsigned int*) realloc(cooGraph->srcIdx, cooGraph->capacity*sizeof(unsigned int));
cooGraph->dstIdx = (unsigned int*) realloc(cooGraph->dstIdx, cooGraph->capacity*sizeof(unsigned int));
}
cooGraph->srcIdx[numEdges] = src - 1;
cooGraph->dstIdx[numEdges] = dst - 1;
++numEdges;
if(src > numNodes) {
numNodes = src;
}
if(dst > numNodes) {
numNodes = dst;
}
}
// Update counts
cooGraph->numNodes = numNodes;
cooGraph->numEdges = numEdges;
fclose(fp);
return cooGraph;
}
void freeCOOGraph(struct COOGraph* cooGraph) {
free(cooGraph->srcIdx);
free(cooGraph->dstIdx);
free(cooGraph);
}
void freeCOOGraphOnDevice(struct COOGraph* g_d) {
struct COOGraph g_shd;
cudaMemcpy(&g_shd, g_d, sizeof(COOGraph), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaFree(g_shd.srcIdx);
cudaFree(g_shd.dstIdx);
cudaFree(g_d);
}
void writeCOOGraphToFile(COOGraph* cooGraph, const char* fileName) {
FILE* fp = fopen(fileName, "w");
for(unsigned int e = 0; e < cooGraph->numEdges; ++e) {
fprintf(fp, "%u\t", cooGraph->dstIdx[e] + 1);
fprintf(fp, "%u\t", cooGraph->srcIdx[e] + 1);
fprintf(fp, "%u\n", 1);
}
fclose(fp);
}
void copyCOOToDevice(struct COOGraph* g, struct COOGraph* g_d) {
struct COOGraph g_shd;
cudaMemcpy(&g_shd, g_d, sizeof(COOGraph), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
assert(g_shd.numNodes == g->numNodes);
assert(g_shd.capacity >= g->numEdges);
cudaMemcpy(&g_d->numEdges, &g->numEdges, sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(g_shd.srcIdx, g->srcIdx, g->numEdges*sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(g_shd.dstIdx, g->dstIdx, g->numEdges*sizeof(unsigned int), cudaMemcpyHostToDevice);
}
void copyCOOFromDevice(struct COOGraph* g_d, struct COOGraph* g) {
struct COOGraph g_shd;
cudaMemcpy(&g_shd, g_d, sizeof(COOGraph), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
assert(g->numNodes == g_shd.numNodes);
assert(g->capacity >= g_shd.numEdges);
g->numEdges = g_shd.numEdges;
cudaMemcpy(g->srcIdx, g_shd.srcIdx, g->numEdges*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpy(g->dstIdx, g_shd.dstIdx, g->numEdges*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
}
void quicksort(unsigned int *key, unsigned int* data, unsigned int start, unsigned int end) {
if((end - start + 1) > 1) {
unsigned int left = start, right = end;
unsigned int pivot = key[right];
while(left <= right) {
while(key[left] < pivot) {
left = left + 1;
}
while(key[right] > pivot) {
right = right - 1;
}
if(left <= right) {
unsigned int tmpKey = key[left]; key[left] = key[right]; key[right] = tmpKey;
unsigned int tmpData = data[left]; data[left] = data[right]; data[right] = tmpData;
left = left + 1;
right = right - 1;
}
}
quicksort(key, data, start, right);
quicksort(key, data, left, end);
}
}
void sortByDegree(COOGraph* graph, unsigned int* new2old) {
// Initialize permutation vector
for(unsigned int i = 0; i < graph->numNodes; ++i) {
new2old[i] = i;
}
// Find degree of each node
unsigned int* degree = (unsigned int*) calloc(graph->numNodes, sizeof(unsigned int));
for(unsigned int e = 0; e < graph->numEdges; ++e) {
degree[graph->srcIdx[e]]++;
}
// Sort nodes by degree
quicksort(degree, new2old, 0, graph->numNodes - 1);
// Find inverse permutation
unsigned int* old2new = (unsigned int*) malloc(graph->numNodes*sizeof(unsigned int));
for(unsigned int newIdx = 0; newIdx < graph->numNodes; ++newIdx) {
unsigned int oldIdx = new2old[newIdx];
old2new[oldIdx] = newIdx;
}
// Update edges
for(unsigned int e = 0; e < graph->numEdges; ++e) {
graph->srcIdx[e] = old2new[graph->srcIdx[e]];
graph->dstIdx[e] = old2new[graph->dstIdx[e]];
}
// Free intermediate data
free(degree);
free(old2new);
}
void unsort(COOGraph* graph, unsigned int* new2old) {
for(unsigned int e = 0; e < graph->numEdges; ++e) {
graph->srcIdx[e] = new2old[graph->srcIdx[e]];
graph->dstIdx[e] = new2old[graph->dstIdx[e]];
}
}
void undirected2directedCOO(struct COOGraph* gundirected, struct COOGraph* gdirected) {
assert(gdirected->numNodes == gundirected->numNodes);
gdirected->numEdges = 0;
for(unsigned int e = 0; e < gundirected->numEdges; ++e) {
unsigned int src = gundirected->srcIdx[e];
unsigned int dst = gundirected->dstIdx[e];
if(src < dst) {
unsigned int eout = gdirected->numEdges++;
assert(eout < gdirected->capacity);
gdirected->srcIdx[eout] = src;
gdirected->dstIdx[eout] = dst;
}
}
}
__global__ void undirected2directedCOO_kernel(struct COOGraph* gundirected, struct COOGraph* gdirected) {
unsigned int e = blockIdx.x*blockDim.x + threadIdx.x;
if(e < gundirected->numEdges) {
unsigned int src = gundirected->srcIdx[e];
unsigned int dst = gundirected->dstIdx[e];
if(src < dst) {
unsigned int eout = atomicAdd(&gdirected->numEdges, 1);
gdirected->srcIdx[eout] = src;
gdirected->dstIdx[eout] = dst;
}
}
}
void undirected2directedCOOOnDevice(struct COOGraph* gundirected_d, struct COOGraph* gdirected_d) {
// Copy shadows from device
COOGraph gundirected_shd;
COOGraph gdirected_shd;
cudaMemcpy(&gundirected_shd, gundirected_d, sizeof(COOGraph), cudaMemcpyDeviceToHost);
cudaMemcpy(&gdirected_shd, gdirected_d, sizeof(COOGraph), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Convert
assert(gdirected_shd.numNodes == gundirected_shd.numNodes);
assert(gdirected_shd.capacity >= gundirected_shd.numEdges/2);
cudaMemset(&gdirected_d->numEdges, 0, sizeof(unsigned int));
undirected2directedCOO_kernel <<< (gundirected_shd.numEdges + 1024 - 1)/1024, 1024 >>> (gundirected_d, gdirected_d);
}
void directed2undirectedCOO(struct COOGraph* g) {
unsigned int numDirectedEdges = g->numEdges;
for(unsigned int e = 0; e < numDirectedEdges; ++e) {
unsigned int src = g->srcIdx[e];
unsigned int dst = g->dstIdx[e];
if(src != dst) {
unsigned int eout = g->numEdges++;
assert(eout < g->capacity);
g->srcIdx[eout] = dst;
g->dstIdx[eout] = src;
}
}
}
__global__ void directed2undirectedCOO_kernel(struct COOGraph* g, unsigned int numDirectedEdges) {
unsigned int e = blockIdx.x*blockDim.x + threadIdx.x;
if(e < numDirectedEdges) {
unsigned int src = g->srcIdx[e];
unsigned int dst = g->dstIdx[e];
if(src != dst) {
unsigned int eout = atomicAdd(&g->numEdges, 1);
g->srcIdx[eout] = dst;
g->dstIdx[eout] = src;
}
}
}
void directed2undirectedCOOOnDevice(struct COOGraph* g_d) {
// Copy shadow from device
COOGraph g_shd;
cudaMemcpy(&g_shd, g_d, sizeof(COOGraph), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Convert
assert(g_shd.capacity >= 2*g_shd.numEdges);
directed2undirectedCOO_kernel <<< (g_shd.numEdges + 1024 - 1)/1024, 1024 >>> (g_d, g_shd.numEdges);
}
void quicksort(unsigned int *key, unsigned int start, unsigned int end) {
if((end - start + 1) > 1) {
unsigned int left = start, right = end;
unsigned int pivot = key[right];
while(left <= right) {
while(key[left] < pivot) {
left = left + 1;
}
while(key[right] > pivot) {
right = right - 1;
}
if(left <= right) {
unsigned int tmpKey = key[left]; key[left] = key[right]; key[right] = tmpKey;
left = left + 1;
right = right - 1;
}
}
quicksort(key, start, right);
quicksort(key, left, end);
}
}
struct COOCSRGraph* createEmptyCOOCSR(unsigned int numNodes, unsigned int capacity) {
struct COOCSRGraph* graph = (COOCSRGraph*) malloc(sizeof(COOCSRGraph));
graph->numNodes = numNodes;
graph->numEdges = 0;
graph->capacity = capacity;
graph->srcPtr = (unsigned int*) malloc((graph->numNodes + 1)*sizeof(unsigned int));
graph->srcIdx = (unsigned int*) malloc(graph->capacity*sizeof(unsigned int));
graph->dstIdx = (unsigned int*) malloc(graph->capacity*sizeof(unsigned int));
return graph;
}
struct COOCSRGraph* createEmptyCOOCSROnDevice(unsigned int numNodes, unsigned int capacity) {
struct COOCSRGraph g_shd;
g_shd.numNodes = numNodes;
g_shd.numEdges = 0;
g_shd.capacity = capacity;
cudaMalloc((void**) &g_shd.srcPtr, (g_shd.numNodes + 1)*sizeof(unsigned int));
cudaMalloc((void**) &g_shd.srcIdx, g_shd.capacity*sizeof(unsigned int));
cudaMalloc((void**) &g_shd.dstIdx, g_shd.capacity*sizeof(unsigned int));
struct COOCSRGraph* g_d;
cudaMalloc((void**) &g_d, sizeof(COOCSRGraph));
cudaMemcpy(g_d, &g_shd, sizeof(COOCSRGraph), cudaMemcpyHostToDevice);
return g_d;
}
void freeCOOCSRGraph(struct COOCSRGraph* graph) {
free(graph->srcPtr);
free(graph->srcIdx);
free(graph->dstIdx);
free(graph);
}
void freeCOOCSRGraphOnDevice(struct COOCSRGraph* g_d) {
struct COOCSRGraph g_shd;
cudaMemcpy(&g_shd, g_d, sizeof(COOCSRGraph), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaFree(g_shd.srcPtr);
cudaFree(g_shd.srcIdx);
cudaFree(g_shd.dstIdx);
cudaFree(g_d);
}
void copyCOOCSRToDevice(struct COOCSRGraph* g, struct COOCSRGraph* g_d) {
struct COOCSRGraph g_shd;
cudaMemcpy(&g_shd, g_d, sizeof(COOCSRGraph), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
assert(g_shd.numNodes == g->numNodes);
assert(g_shd.capacity >= g->numEdges);
cudaMemcpy(&g_d->numEdges, &g->numEdges, sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(g_shd.srcPtr, g->srcPtr, (g->numNodes + 1)*sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(g_shd.srcIdx, g->srcIdx, g->numEdges*sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(g_shd.dstIdx, g->dstIdx, g->numEdges*sizeof(unsigned int), cudaMemcpyHostToDevice);
}
void copyCOOCSRFromDevice(struct COOCSRGraph* g_d, struct COOCSRGraph* g) {
struct COOCSRGraph g_shd;
cudaMemcpy(&g_shd, g_d, sizeof(COOCSRGraph), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
assert(g->numNodes == g_shd.numNodes);
assert(g->capacity >= g_shd.numEdges);
g->numEdges = g_shd.numEdges;
cudaMemcpy(g->srcPtr, g_shd.srcPtr, (g->numNodes + 1)*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpy(g->srcIdx, g_shd.srcIdx, g->numEdges*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpy(g->dstIdx, g_shd.dstIdx, g->numEdges*sizeof(unsigned int), cudaMemcpyDeviceToHost);
}
void coo2coocsr(struct COOGraph* cooGraph, struct COOCSRGraph* graph) {
// Initialize
unsigned int numNodes = cooGraph->numNodes;
assert(graph->numNodes == numNodes);
unsigned int numEdges = cooGraph->numEdges;
assert(graph->capacity >= numEdges);
graph->numEdges = numEdges;
// Histogram
// NOTE: (src + 1) used instead of (src) because it will get shifted by the binning operation
memset(graph->srcPtr, 0, (numNodes + 1)*sizeof(unsigned int));
for(unsigned int e = 0; e < numEdges; ++e) {
unsigned int src = cooGraph->srcIdx[e];
graph->srcPtr[src + 1]++;
}
// Prefix sum
unsigned int sum = 0;
for(unsigned int src = 0; src < numNodes; ++src) {
unsigned int val = graph->srcPtr[src + 1];
graph->srcPtr[src + 1] = sum;
sum += val;
}
// Binning
for(unsigned int e = 0; e < numEdges; ++e) {
unsigned int src = cooGraph->srcIdx[e];
unsigned int j = graph->srcPtr[src + 1]++;
graph->srcIdx[j] = src;
graph->dstIdx[j] = cooGraph->dstIdx[e];
}
// Sort outgoing edges of each source node
for(unsigned int src = 0; src < numNodes; ++src) {
unsigned int start = graph->srcPtr[src];
unsigned int end = graph->srcPtr[src + 1] - 1;
quicksort(graph->dstIdx, start, end); // NOTE: No need to sort srcIdx because they are all the same
}
}
#include <thrust/execution_policy.h>
#include <thrust/scan.h>
__global__ void histogram_kernel(unsigned int* data, unsigned int* bins, unsigned int N) {
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < N) {
unsigned int val = data[i];
atomicAdd(&bins[val], 1);
}
}
__global__ void binning_kernel(COOGraph* cooGraph, COOCSRGraph* graph) {
unsigned int e = blockIdx.x*blockDim.x + threadIdx.x;
if(e < cooGraph->numEdges) {
unsigned int src = cooGraph->srcIdx[e];
unsigned int j = atomicAdd(&graph->srcPtr[src + 1], 1);
graph->srcIdx[j] = src;
graph->dstIdx[j] = cooGraph->dstIdx[e];
}
}
void coo2coocsrOnDevice(struct COOGraph* cooGraph_d, struct COOCSRGraph* graph_d) {
// Copy shadows from device
COOGraph cooGraph_shd;
COOCSRGraph graph_shd;
cudaMemcpy(&cooGraph_shd, cooGraph_d, sizeof(COOGraph), cudaMemcpyDeviceToHost);
cudaMemcpy(&graph_shd, graph_d, sizeof(COOCSRGraph), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Initialize
unsigned int numNodes = cooGraph_shd.numNodes;
assert(graph_shd.numNodes == numNodes);
unsigned int numEdges = cooGraph_shd.numEdges;
assert(graph_shd.capacity >= numEdges);
cudaMemcpy(&graph_d->numEdges, &cooGraph_shd.numEdges, sizeof(unsigned int), cudaMemcpyHostToDevice);
// Histogram
// NOTE: (srcPtr + 1) used instead of (srcPtr) because it will get shifted by the binning operation
cudaMemset(graph_shd.srcPtr, 0, (numNodes + 1)*sizeof(unsigned int));
histogram_kernel <<< (numEdges + 1024 - 1)/1024, 1024 >>> (cooGraph_shd.srcIdx, graph_shd.srcPtr + 1, numEdges);
// Prefix sum
thrust::exclusive_scan(thrust::device, graph_shd.srcPtr + 1, graph_shd.srcPtr + numNodes + 1, graph_shd.srcPtr + 1);
// Binning
binning_kernel <<< (numEdges + 1024 - 1)/1024, 1024 >>> (cooGraph_d, graph_d);
// Sort outgoing edges of each source node (on CPU)
// TODO: Implement sorting on GPU
unsigned int* srcPtr = (unsigned int*) malloc((numNodes + 1)*sizeof(unsigned int));
unsigned int* dstIdx = (unsigned int*) malloc(numEdges*sizeof(unsigned int));
cudaMemcpy(srcPtr, graph_shd.srcPtr, (numNodes + 1)*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpy(dstIdx, graph_shd.dstIdx, numEdges*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
for(unsigned int src = 0; src < numNodes; ++src) {
unsigned int start = srcPtr[src];
unsigned int end = srcPtr[src + 1] - 1;
quicksort(dstIdx, start, end); // NOTE: No need to sort srcIdx because they are all the same
}
cudaMemcpy(graph_shd.dstIdx, dstIdx, numEdges*sizeof(unsigned int), cudaMemcpyHostToDevice);
free(srcPtr);
free(dstIdx);
}
void removeCOOCSRDeletedEdges(struct COOCSRGraph* g) {
// Compact edges
unsigned int oldNumEdges = g->numEdges;
g->numEdges = 0;
for(unsigned int e = 0; e < oldNumEdges; ++e) {
if(g->dstIdx[e] != DELETED) {
g->srcIdx[g->numEdges] = g->srcIdx[e];
g->dstIdx[g->numEdges] = g->dstIdx[e];
g->numEdges++;
}
}
// Histogram
memset(g->srcPtr, 0, (g->numNodes + 1)*sizeof(unsigned int));
for(unsigned int e = 0; e < g->numEdges; ++e) {
unsigned int src = g->srcIdx[e];
g->srcPtr[src]++;
}
// Prefix sum
unsigned int sum = 0;
for(unsigned int src = 0; src < g->numNodes; ++src) {
unsigned int val = g->srcPtr[src];
g->srcPtr[src] = sum;
sum += val;
}
g->srcPtr[g->numNodes] = sum;
}
__global__ void mark_deleted_srcs_kernel(COOCSRGraph* g) {
unsigned int e = blockIdx.x*blockDim.x + threadIdx.x;
if(e < g->numEdges) {
if(g->dstIdx[e] == DELETED) {
g->srcIdx[e] = DELETED;
}
}
}
void removeCOOCSRDeletedEdgesOnDevice(struct COOCSRGraph* g_d) {
// Copy shadow
COOCSRGraph g_shd;
cudaMemcpy(&g_shd, g_d, sizeof(COOCSRGraph), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Mark deleted sources
mark_deleted_srcs_kernel <<< (g_shd.numEdges + 1024 - 1)/1024, 1024 >>> (g_d);
// Compact edges
unsigned int* endSrcIdx = thrust::remove(thrust::device, g_shd.srcIdx, g_shd.srcIdx + g_shd.numEdges, DELETED);
unsigned int* endDstIdx = thrust::remove(thrust::device, g_shd.dstIdx, g_shd.dstIdx + g_shd.numEdges, DELETED);
assert(endSrcIdx - g_shd.srcIdx == endDstIdx - g_shd.dstIdx);
g_shd.numEdges = endSrcIdx - g_shd.srcIdx;
cudaMemcpy(&g_d->numEdges, &g_shd.numEdges, sizeof(unsigned int), cudaMemcpyHostToDevice);
// Histogram
unsigned int numNodes = g_shd.numNodes;
unsigned int numEdges = g_shd.numEdges;
cudaMemset(g_shd.srcPtr, 0, (numNodes + 1)*sizeof(unsigned int));
histogram_kernel <<< (numEdges + 1024 - 1)/1024, 1024 >>> (g_shd.srcIdx, g_shd.srcPtr, numEdges);
// Prefix sum
thrust::exclusive_scan(thrust::device, g_shd.srcPtr, g_shd.srcPtr + numNodes + 1, g_shd.srcPtr);
}
void coocsr2coo(struct COOCSRGraph* in, struct COOGraph* out) {
assert(out->numNodes == in->numNodes);
assert(out->capacity >= in->numEdges);
out->numEdges = in->numEdges;
memcpy(out->srcIdx, in->srcIdx, in->numEdges*sizeof(unsigned int));
memcpy(out->dstIdx, in->dstIdx, in->numEdges*sizeof(unsigned int));
}
void coocsr2cooOnDevice(struct COOCSRGraph* in_d, struct COOGraph* out_d) {
// Copy shadows from device
COOCSRGraph in_shd;
COOGraph out_shd;
cudaMemcpy(&in_shd, in_d, sizeof(COOCSRGraph), cudaMemcpyDeviceToHost);
cudaMemcpy(&out_shd, out_d, sizeof(COOGraph), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Convert
assert(out_shd.numNodes == in_shd.numNodes);
assert(out_shd.capacity >= in_shd.numEdges);
cudaMemcpy(&out_d->numEdges, &in_shd.numEdges, sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(out_shd.srcIdx, in_shd.srcIdx, in_shd.numEdges*sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(out_shd.dstIdx, in_shd.dstIdx, in_shd.numEdges*sizeof(unsigned int), cudaMemcpyHostToDevice);
}
struct TiledCOOCSRGraph* createEmptyTiledCOOCSR(unsigned int numNodes, unsigned int tilesPerDim, unsigned int capacity) {
struct TiledCOOCSRGraph* graph = (TiledCOOCSRGraph*) malloc(sizeof(TiledCOOCSRGraph));
graph->numNodes = numNodes;
graph->numEdges = 0;
graph->capacity = capacity;
graph->tilesPerDim = tilesPerDim;
graph->tileSize = (numNodes + tilesPerDim - 1)/tilesPerDim;
unsigned int numTileSrcPtrs = tilesPerDim*tilesPerDim*graph->tileSize + 1;
graph->tileSrcPtr = (unsigned int*) malloc(numTileSrcPtrs*sizeof(unsigned int));
graph->srcIdx = (unsigned int*) malloc(graph->capacity*sizeof(unsigned int));
graph->dstIdx = (unsigned int*) malloc(graph->capacity*sizeof(unsigned int));
return graph;
}
struct TiledCOOCSRGraph* createEmptyTiledCOOCSROnDevice(unsigned int numNodes, unsigned int tilesPerDim, unsigned int capacity) {
struct TiledCOOCSRGraph g_shd;
g_shd.numNodes = numNodes;
g_shd.numEdges = 0;
g_shd.capacity = capacity;
g_shd.tilesPerDim = tilesPerDim;
g_shd.tileSize = (numNodes + tilesPerDim - 1)/tilesPerDim;
unsigned int numTileSrcPtrs = tilesPerDim*tilesPerDim*g_shd.tileSize + 1;
cudaMalloc((void**) &g_shd.tileSrcPtr, numTileSrcPtrs*sizeof(unsigned int));
cudaMalloc((void**) &g_shd.srcIdx, g_shd.capacity*sizeof(unsigned int));
cudaMalloc((void**) &g_shd.dstIdx, g_shd.capacity*sizeof(unsigned int));
struct TiledCOOCSRGraph* g_d;
cudaMalloc((void**) &g_d, sizeof(TiledCOOCSRGraph));
cudaMemcpy(g_d, &g_shd, sizeof(TiledCOOCSRGraph), cudaMemcpyHostToDevice);
return g_d;
}
void freeTiledCOOCSRGraph(struct TiledCOOCSRGraph* graph) {
free(graph->tileSrcPtr);
free(graph->srcIdx);
free(graph->dstIdx);
free(graph);
}
void freeTiledCOOCSRGraphOnDevice(struct TiledCOOCSRGraph* g_d) {
struct TiledCOOCSRGraph g_shd;
cudaMemcpy(&g_shd, g_d, sizeof(TiledCOOCSRGraph), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaFree(g_shd.tileSrcPtr);
cudaFree(g_shd.srcIdx);
cudaFree(g_shd.dstIdx);
cudaFree(g_d);
}
void copyTiledCOOCSRToDevice(struct TiledCOOCSRGraph* g, struct TiledCOOCSRGraph* g_d) {
struct TiledCOOCSRGraph g_shd;
cudaMemcpy(&g_shd, g_d, sizeof(TiledCOOCSRGraph), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
assert(g_shd.numNodes == g->numNodes);
assert(g_shd.capacity >= g->numEdges);
assert(g_shd.tilesPerDim == g->tilesPerDim);
assert(g_shd.tileSize == g->tileSize);
unsigned int numTileSrcPtrs = g->tilesPerDim*g->tilesPerDim*g->tileSize + 1;
cudaMemcpy(&g_d->numEdges, &g->numEdges, sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(g_shd.tileSrcPtr, g->tileSrcPtr, numTileSrcPtrs*sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(g_shd.srcIdx, g->srcIdx, g->numEdges*sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(g_shd.dstIdx, g->dstIdx, g->numEdges*sizeof(unsigned int), cudaMemcpyHostToDevice);
}
void copyTiledCOOCSRFromDevice(struct TiledCOOCSRGraph* g_d, struct TiledCOOCSRGraph* g) {
struct TiledCOOCSRGraph g_shd;
cudaMemcpy(&g_shd, g_d, sizeof(TiledCOOCSRGraph), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
assert(g->numNodes == g_shd.numNodes);
assert(g->capacity >= g_shd.numEdges);
assert(g->tilesPerDim == g_shd.tilesPerDim);
assert(g->tileSize == g_shd.tileSize);
g->numEdges = g_shd.numEdges;
unsigned int numTileSrcPtrs = g->tilesPerDim*g->tilesPerDim*g->tileSize + 1;
cudaMemcpy(g->tileSrcPtr, g_shd.tileSrcPtr, numTileSrcPtrs*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpy(g->srcIdx, g_shd.srcIdx, g->numEdges*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpy(g->dstIdx, g_shd.dstIdx, g->numEdges*sizeof(unsigned int), cudaMemcpyDeviceToHost);
}
void coo2tiledcoocsr(struct COOGraph* cooGraph, struct TiledCOOCSRGraph* graph) {
// Initialize
unsigned int numNodes = cooGraph->numNodes;
assert(graph->numNodes == numNodes);
unsigned int numEdges = cooGraph->numEdges;
assert(graph->capacity >= numEdges);
graph->numEdges = numEdges;
// Histogram
// NOTE: (tileSrc + 1) used instead of (tileSrc) because it will get shifted by the binning operation
unsigned int tileSize = graph->tileSize;
unsigned int tilesPerDim = graph->tilesPerDim;
unsigned int numTileSrcPtrs = tilesPerDim*tilesPerDim*tileSize;
memset(graph->tileSrcPtr, 0, (numTileSrcPtrs + 1)*sizeof(unsigned int));
for(unsigned int e = 0; e < numEdges; ++e) {
unsigned int src = cooGraph->srcIdx[e];
unsigned int dst = cooGraph->dstIdx[e];
unsigned int srcTile = src/tileSize;
unsigned int dstTile = dst/tileSize;
unsigned int tileSrc = (srcTile*tilesPerDim + dstTile)*tileSize + src%tileSize;
graph->tileSrcPtr[tileSrc + 1]++;
}
// Prefix sum
unsigned int sum = 0;
for(unsigned int tileSrc = 0; tileSrc < numTileSrcPtrs; ++tileSrc) {
unsigned int val = graph->tileSrcPtr[tileSrc + 1];
graph->tileSrcPtr[tileSrc + 1] = sum;
sum += val;
}
// Binning
for(unsigned int e = 0; e < numEdges; ++e) {
unsigned int src = cooGraph->srcIdx[e];
unsigned int dst = cooGraph->dstIdx[e];
unsigned int srcTile = src/tileSize;
unsigned int dstTile = dst/tileSize;
unsigned int tileSrc = (srcTile*tilesPerDim + dstTile)*tileSize + src%tileSize;
unsigned int j = graph->tileSrcPtr[tileSrc + 1]++;
graph->srcIdx[j] = src;
graph->dstIdx[j] = cooGraph->dstIdx[e];
}
// Sort outgoing edges of each source node
for(unsigned int tileSrc = 0; tileSrc < numTileSrcPtrs; ++tileSrc) {
unsigned int start = graph->tileSrcPtr[tileSrc];
unsigned int end = graph->tileSrcPtr[tileSrc + 1] - 1;
quicksort(graph->dstIdx, start, end); // NOTE: No need to sort srcIdx because they are all the same
}
}
__global__ void histogram_tiled_kernel(COOGraph* cooGraph, TiledCOOCSRGraph* graph) {
unsigned int e = blockIdx.x*blockDim.x + threadIdx.x;
if(e < cooGraph->numEdges) {
unsigned int src = cooGraph->srcIdx[e];
unsigned int dst = cooGraph->dstIdx[e];
unsigned int tileSize = graph->tileSize;
unsigned int tilesPerDim = graph->tilesPerDim;
unsigned int tileSrc = (src/tileSize*tilesPerDim + dst/tileSize)*tileSize + src%tileSize;
atomicAdd(&graph->tileSrcPtr[tileSrc + 1], 1);
}
}
__global__ void binning_kernel(COOGraph* cooGraph, TiledCOOCSRGraph* graph) {
unsigned int e = blockIdx.x*blockDim.x + threadIdx.x;
if(e < cooGraph->numEdges) {
unsigned int src = cooGraph->srcIdx[e];
unsigned int dst = cooGraph->dstIdx[e];
unsigned int tileSize = graph->tileSize;
unsigned int tilesPerDim = graph->tilesPerDim;
unsigned int tileSrc = (src/tileSize*tilesPerDim + dst/tileSize)*tileSize + src%tileSize;
unsigned int j = atomicAdd(&graph->tileSrcPtr[tileSrc + 1], 1);
graph->srcIdx[j] = src;
graph->dstIdx[j] = cooGraph->dstIdx[e];
}
}
void coo2tiledcoocsrOnDevice(struct COOGraph* cooGraph_d, struct TiledCOOCSRGraph* graph_d) {
// Copy shadows from device
COOGraph cooGraph_shd;
TiledCOOCSRGraph graph_shd;
cudaMemcpy(&cooGraph_shd, cooGraph_d, sizeof(COOGraph), cudaMemcpyDeviceToHost);
cudaMemcpy(&graph_shd, graph_d, sizeof(TiledCOOCSRGraph), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Initialize
unsigned int numNodes = cooGraph_shd.numNodes;
assert(graph_shd.numNodes == numNodes);
unsigned int numEdges = cooGraph_shd.numEdges;
assert(graph_shd.capacity >= numEdges);
cudaMemcpy(&graph_d->numEdges, &cooGraph_shd.numEdges, sizeof(unsigned int), cudaMemcpyHostToDevice);
// Histogram
// NOTE: (tileSrc + 1) used instead of (tileSrc) because it will get shifted by the binning operation
unsigned int tilesPerDim = graph_shd.tilesPerDim;
unsigned int numTileSrcPtrs = tilesPerDim*tilesPerDim*graph_shd.tileSize;
cudaMemset(graph_shd.tileSrcPtr, 0, (numTileSrcPtrs + 1)*sizeof(unsigned int));
histogram_tiled_kernel <<< (numEdges + 1024 - 1)/1024, 1024 >>> (cooGraph_d, graph_d);
// Prefix sum
thrust::exclusive_scan(thrust::device, graph_shd.tileSrcPtr + 1, graph_shd.tileSrcPtr + numTileSrcPtrs + 1, graph_shd.tileSrcPtr + 1);
// Binning
binning_kernel <<< (numEdges + 1024 - 1)/1024, 1024 >>> (cooGraph_d, graph_d);
// Sort outgoing edges of each source node (on CPU)
// TODO: Implement sorting on GPU
unsigned int* tileSrcPtr = (unsigned int*) malloc((numTileSrcPtrs + 1)*sizeof(unsigned int));
unsigned int* dstIdx = (unsigned int*) malloc(numEdges*sizeof(unsigned int));
cudaMemcpy(tileSrcPtr, graph_shd.tileSrcPtr, (numTileSrcPtrs + 1)*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpy(dstIdx, graph_shd.dstIdx, numEdges*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
for(unsigned int tileSrc = 0; tileSrc < numTileSrcPtrs; ++tileSrc) {
unsigned int start = tileSrcPtr[tileSrc];
unsigned int end = tileSrcPtr[tileSrc + 1] - 1;
quicksort(dstIdx, start, end); // NOTE: No need to sort srcIdx because they are all the same
}
cudaMemcpy(graph_shd.dstIdx, dstIdx, numEdges*sizeof(unsigned int), cudaMemcpyHostToDevice);
free(tileSrcPtr);
free(dstIdx);
}
void removeTiledCOOCSRDeletedEdges(struct TiledCOOCSRGraph* g) {
// Compact edges
unsigned int oldNumEdges = g->numEdges;
g->numEdges = 0;
for(unsigned int e = 0; e < oldNumEdges; ++e) {
if(g->dstIdx[e] != DELETED) {
g->srcIdx[g->numEdges] = g->srcIdx[e];
g->dstIdx[g->numEdges] = g->dstIdx[e];
g->numEdges++;
}
}
// Histogram
unsigned int tileSize = g->tileSize;
unsigned int tilesPerDim = g->tilesPerDim;
unsigned int numTileSrcPtrs = tilesPerDim*tilesPerDim*tileSize;
memset(g->tileSrcPtr, 0, (numTileSrcPtrs + 1)*sizeof(unsigned int));
for(unsigned int e = 0; e < g->numEdges; ++e) {
unsigned int src = g->srcIdx[e];
unsigned int dst = g->dstIdx[e];
unsigned int srcTile = src/tileSize;
unsigned int dstTile = dst/tileSize;
unsigned int tileSrc = (srcTile*tilesPerDim + dstTile)*tileSize + src%tileSize;
g->tileSrcPtr[tileSrc]++;
}
// Prefix sum
unsigned int sum = 0;
for(unsigned int tileSrc = 0; tileSrc < numTileSrcPtrs; ++tileSrc) {
unsigned int val = g->tileSrcPtr[tileSrc];
g->tileSrcPtr[tileSrc] = sum;
sum += val;
}
g->tileSrcPtr[numTileSrcPtrs] = sum;
}
__global__ void histogram_tiled_remove_kernel(TiledCOOCSRGraph* g) {
unsigned int e = blockIdx.x*blockDim.x + threadIdx.x;
if(e < g->numEdges) {
unsigned int src = g->srcIdx[e];
unsigned int dst = g->dstIdx[e];
unsigned int tileSize = g->tileSize;
unsigned int tilesPerDim = g->tilesPerDim;
unsigned int tileSrc = (src/tileSize*tilesPerDim + dst/tileSize)*tileSize + src%tileSize;
atomicAdd(&g->tileSrcPtr[tileSrc], 1);
}
}
__global__ void mark_deleted_srcs_tiled_kernel(TiledCOOCSRGraph* g) {
unsigned int e = blockIdx.x*blockDim.x + threadIdx.x;
if(e < g->numEdges) {
if(g->dstIdx[e] == DELETED) {
g->srcIdx[e] = DELETED;
}
}
}
void removeTiledCOOCSRDeletedEdgesOnDevice(struct TiledCOOCSRGraph* g_d) {
// Copy shadow
TiledCOOCSRGraph g_shd;
cudaMemcpy(&g_shd, g_d, sizeof(TiledCOOCSRGraph), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Mark deleted sources
mark_deleted_srcs_tiled_kernel <<< (g_shd.numEdges + 1024 - 1)/1024, 1024 >>> (g_d);
// Compact edges
unsigned int* endSrcIdx = thrust::remove(thrust::device, g_shd.srcIdx, g_shd.srcIdx + g_shd.numEdges, DELETED);
unsigned int* endDstIdx = thrust::remove(thrust::device, g_shd.dstIdx, g_shd.dstIdx + g_shd.numEdges, DELETED);
assert(endSrcIdx - g_shd.srcIdx == endDstIdx - g_shd.dstIdx);
g_shd.numEdges = endSrcIdx - g_shd.srcIdx;
cudaMemcpy(&g_d->numEdges, &g_shd.numEdges, sizeof(unsigned int), cudaMemcpyHostToDevice);
// Histogram
unsigned int numEdges = g_shd.numEdges;
unsigned int tileSize = g_shd.tileSize;
unsigned int tilesPerDim = g_shd.tilesPerDim;
unsigned int numTileSrcPtrs = tilesPerDim*tilesPerDim*tileSize;
cudaMemset(g_shd.tileSrcPtr, 0, (numTileSrcPtrs + 1)*sizeof(unsigned int));
histogram_tiled_remove_kernel <<< (numEdges + 1024 - 1)/1024, 1024 >>> (g_d);
// Prefix sum
thrust::exclusive_scan(thrust::device, g_shd.tileSrcPtr, g_shd.tileSrcPtr + numTileSrcPtrs + 1, g_shd.tileSrcPtr);
}
void tiledcoocsr2coo(struct TiledCOOCSRGraph* in, struct COOGraph* out) {
assert(out->numNodes == in->numNodes);
assert(out->capacity >= in->numEdges);
out->numEdges = in->numEdges;
memcpy(out->srcIdx, in->srcIdx, in->numEdges*sizeof(unsigned int));
memcpy(out->dstIdx, in->dstIdx, in->numEdges*sizeof(unsigned int));
}
void tiledcoocsr2cooOnDevice(struct TiledCOOCSRGraph* in_d, struct COOGraph* out_d) {
// Copy shadows from device
TiledCOOCSRGraph in_shd;
COOGraph out_shd;
cudaMemcpy(&in_shd, in_d, sizeof(TiledCOOCSRGraph), cudaMemcpyDeviceToHost);
cudaMemcpy(&out_shd, out_d, sizeof(COOGraph), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Convert
assert(out_shd.numNodes == in_shd.numNodes);
assert(out_shd.capacity >= in_shd.numEdges);
cudaMemcpy(&out_d->numEdges, &in_shd.numEdges, sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(out_shd.srcIdx, in_shd.srcIdx, in_shd.numEdges*sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(out_shd.dstIdx, in_shd.dstIdx, in_shd.numEdges*sizeof(unsigned int), cudaMemcpyHostToDevice);
}
|
d52d27f5c969849cd952905565b9221bcb7e3413.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../common/common.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <omp.h>
/*
* The crypt application implements IDEA encryption and decryption of a single
* input file using the secret key provided.
*/
// Chunking size for IDEA, in bytes
#define CHUNK_SIZE 8
// Length of the encryption/decryption keys, in bytes
#define KEY_LENGTH 52
#define BLOCK_SIZE_IN_CHUNKS 1024000
// Length of the secret key, in bytes
#define USERKEY_LENGTH 8
#define BITS_PER_BYTE 8
typedef struct _device_context
{
signed char *dPlain, *dCrypt;
hipStream_t *streams;
int nBlocks;
} device_context;
typedef enum { ENCRYPT, DECRYPT } action;
__constant__ int dkey[KEY_LENGTH];
/*
* doCrypt implements the core logic of IDEA. It iterates over the byte
* chunks stored in plainList and outputs their encrypted/decrypted form to the
* corresponding element in cryptList using the secret key provided.
*/
__host__ __device__ void doCrypt(int chunk, signed char *plain,
signed char *crypt, int *key)
{
long x1, x2, x3, x4, t1, t2, ik, r;
x1 = (((unsigned int)plain[chunk * CHUNK_SIZE]) & 0xff);
x1 |= ((((unsigned int)plain[chunk * CHUNK_SIZE + 1]) & 0xff) <<
BITS_PER_BYTE);
x2 = (((unsigned int)plain[chunk * CHUNK_SIZE + 2]) & 0xff);
x2 |= ((((unsigned int)plain[chunk * CHUNK_SIZE + 3]) & 0xff) <<
BITS_PER_BYTE);
x3 = (((unsigned int)plain[chunk * CHUNK_SIZE + 4]) & 0xff);
x3 |= ((((unsigned int)plain[chunk * CHUNK_SIZE + 5]) & 0xff) <<
BITS_PER_BYTE);
x4 = (((unsigned int)plain[chunk * CHUNK_SIZE + 6]) & 0xff);
x4 |= ((((unsigned int)plain[chunk * CHUNK_SIZE + 7]) & 0xff) <<
BITS_PER_BYTE);
ik = 0;
r = CHUNK_SIZE;
do
{
x1 = (int)((((long)x1 * key[ik++]) % 0x10001L) & 0xffff);
x2 = ((x2 + key[ik++]) & 0xffff);
x3 = ((x3 + key[ik++]) & 0xffff);
x4 = (int)((((long)x4 * key[ik++]) % 0x10001L) & 0xffff);
t2 = (x1 ^ x3);
t2 = (int)((((long)t2 * key[ik++]) % 0x10001L) & 0xffff);
t1 = ((t2 + (x2 ^ x4)) & 0xffff);
t1 = (int)((((long)t1 * key[ik++]) % 0x10001L) & 0xffff);
t2 = (t1 + t2 & 0xffff);
x1 = (x1 ^ t1);
x4 = (x4 ^ t2);
t2 = (t2 ^ x2);
x2 = (x3 ^ t1);
x3 = t2;
}
while(--r != 0);
x1 = (int)((((long)x1 * key[ik++]) % 0x10001L) & 0xffff);
x3 = ((x3 + key[ik++]) & 0xffff);
x2 = ((x2 + key[ik++]) & 0xffff);
x4 = (int)((((long)x4 * key[ik++]) % 0x10001L) & 0xffff);
crypt[chunk * CHUNK_SIZE] = (signed char) x1;
crypt[chunk * CHUNK_SIZE + 1] = (signed char) ((unsigned long)x1 >>
BITS_PER_BYTE);
crypt[chunk * CHUNK_SIZE + 2] = (signed char) x3;
crypt[chunk * CHUNK_SIZE + 3] = (signed char) ((unsigned long)x3 >>
BITS_PER_BYTE);
crypt[chunk * CHUNK_SIZE + 4] = (signed char) x2;
crypt[chunk * CHUNK_SIZE + 5] = (signed char) ((unsigned long)x2 >>
BITS_PER_BYTE);
crypt[chunk * CHUNK_SIZE + 6] = (signed char) x4;
crypt[chunk * CHUNK_SIZE + 7] = (signed char) ((unsigned long)x4 >>
BITS_PER_BYTE);
}
__global__ void d_encrypt_decrypt(signed char *plain, signed char *crypt, int nChunks)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int nthreads = blockDim.x * gridDim.x;
for ( ; tid < nChunks; tid += nthreads)
{
doCrypt(tid, plain, crypt, dkey);
}
}
static void h_encrypt_decrypt(signed char *plain, signed char *crypt, int *key,
int plainLength)
{
int c;
int nChunks = plainLength / CHUNK_SIZE;
#pragma omp parallel for firstprivate(nChunks) private(c)
for (c = 0; c < nChunks; c++)
{
doCrypt(c, plain, crypt, key);
}
}
static void init_context(device_context *ctx, int plainLength)
{
signed char *dPlain, *dCrypt;
hipStream_t *streams;
int nBlocks, b;
if (plainLength % CHUNK_SIZE != 0)
{
fprintf(stderr, "Invalid encryption: length of plain must be an even "
"multiple of %d but is %d\n", CHUNK_SIZE, plainLength);
exit(-1);
}
CHECK(hipMalloc((void **)&dPlain, plainLength * sizeof(signed char)));
CHECK(hipMalloc((void **)&dCrypt, plainLength * sizeof(signed char)));
int nChunks = plainLength / CHUNK_SIZE;
nBlocks = (nChunks + BLOCK_SIZE_IN_CHUNKS - 1) / BLOCK_SIZE_IN_CHUNKS;
streams = (hipStream_t *)malloc(sizeof(hipStream_t) * nBlocks);
for (b = 0; b < nBlocks; b++)
{
CHECK(hipStreamCreate(streams + b));
}
ctx->dPlain = dPlain;
ctx->dCrypt = dCrypt;
ctx->streams = streams;
ctx->nBlocks = nBlocks;
}
static void encrypt_decrypt_driver(signed char *plain, signed char *crypt,
int *key,
int plainLength, int nThreadsPerBlock,
device_context *ctx)
{
int b;
hipDeviceProp_t info;
CHECK(hipGetDeviceProperties(&info, 0));
int nChunks = plainLength / CHUNK_SIZE;
int nThreadBlocks = (nChunks + nThreadsPerBlock - 1) / nThreadsPerBlock;
if (nThreadBlocks > info.maxGridSize[0])
{
nThreadBlocks = info.maxGridSize[0];
}
CHECK(hipMemcpyToSymbolAsync(dkey, key, KEY_LENGTH * sizeof(int), 0,
hipMemcpyHostToDevice, (ctx->streams)[0]));
CHECK(hipStreamSynchronize((ctx->streams)[0]));
for (b = 0; b < ctx->nBlocks; b++)
{
int blockOffset = b * BLOCK_SIZE_IN_CHUNKS * CHUNK_SIZE;
int localChunks = BLOCK_SIZE_IN_CHUNKS;
if (b * BLOCK_SIZE_IN_CHUNKS + localChunks > nChunks)
{
localChunks = nChunks - b * BLOCK_SIZE_IN_CHUNKS;
}
CHECK(hipMemcpyAsync(ctx->dPlain + blockOffset, plain + blockOffset,
localChunks * CHUNK_SIZE * sizeof(signed char),
hipMemcpyHostToDevice, (ctx->streams)[b]));
hipLaunchKernelGGL(( d_encrypt_decrypt), dim3(nThreadBlocks), dim3(nThreadsPerBlock), 0, (ctx->streams)[b] ,
ctx->dPlain + blockOffset, ctx->dCrypt + blockOffset, localChunks);
CHECK(hipMemcpyAsync(crypt + blockOffset, ctx->dCrypt + blockOffset,
localChunks * CHUNK_SIZE * sizeof(signed char),
hipMemcpyDeviceToHost, (ctx->streams)[b]));
}
}
static void cleanup_context(device_context *ctx)
{
int b;
for (b = 0; b < ctx->nBlocks; b++)
{
CHECK(hipStreamDestroy(ctx->streams[b]));
}
free(ctx->streams);
CHECK(hipFree(ctx->dPlain));
CHECK(hipFree(ctx->dCrypt));
}
/*
* Get the length of a file on disk.
*/
static size_t getFileLength(FILE *fp)
{
fseek(fp, 0L, SEEK_END);
size_t fileLen = ftell(fp);
fseek(fp, 0L, SEEK_SET);
return (fileLen);
}
/*
* inv is used to generate the key used for decryption from the secret key.
*/
static int inv(int x)
{
int t0, t1;
int q, y;
if (x <= 1) // Assumes positive x.
return (x); // 0 and 1 are self-inverse.
t1 = 0x10001 / x; // (2**16+1)/x; x is >= 2, so fits 16 bits.
y = 0x10001 % x;
if (y == 1)
return ((1 - t1) & 0xffff);
t0 = 1;
do
{
q = x / y;
x = x % y;
t0 += q * t1;
if (x == 1) return (t0);
q = y / x;
y = y % x;
t1 += q * t0;
}
while (y != 1);
return ((1 - t1) & 0xffff);
}
/*
* Generate the key to be used for encryption, based on the user key read from
* disk.
*/
static int *generateEncryptKey(int16_t *userkey)
{
int i, j;
int *key;
CHECK(hipHostMalloc(&key, KEY_LENGTH * sizeof(int)));
memset(key, 0x00, sizeof(int) * KEY_LENGTH);
for (i = 0; i < CHUNK_SIZE; i++)
{
key[i] = (userkey[i] & 0xffff);
}
for (i = CHUNK_SIZE; i < KEY_LENGTH; i++)
{
j = i % CHUNK_SIZE;
if (j < 6)
{
key[i] = ((key[i - 7] >> 9) | (key[i - 6] << 7))
& 0xffff;
continue;
}
if (j == 6)
{
key[i] = ((key[i - 7] >> 9) | (key[i - 14] << 7))
& 0xffff;
continue;
}
key[i] = ((key[i - 15] >> 9) | (key[i - 14] << 7))
& 0xffff;
}
return (key);
}
/*
* Generate the key to be used for decryption, based on the user key read from
* disk.
*/
static int *generateDecryptKey(int16_t *userkey)
{
int *key;
int i, j, k;
int t1, t2, t3;
CHECK(hipHostMalloc(&key, KEY_LENGTH * sizeof(int)));
int *Z = generateEncryptKey(userkey);
t1 = inv(Z[0]);
t2 = - Z[1] & 0xffff;
t3 = - Z[2] & 0xffff;
key[51] = inv(Z[3]);
key[50] = t3;
key[49] = t2;
key[48] = t1;
j = 47;
k = 4;
for (i = 0; i < 7; i++)
{
t1 = Z[k++];
key[j--] = Z[k++];
key[j--] = t1;
t1 = inv(Z[k++]);
t2 = -Z[k++] & 0xffff;
t3 = -Z[k++] & 0xffff;
key[j--] = inv(Z[k++]);
key[j--] = t2;
key[j--] = t3;
key[j--] = t1;
}
t1 = Z[k++];
key[j--] = Z[k++];
key[j--] = t1;
t1 = inv(Z[k++]);
t2 = -Z[k++] & 0xffff;
t3 = -Z[k++] & 0xffff;
key[j--] = inv(Z[k++]);
key[j--] = t3;
key[j--] = t2;
key[j--] = t1;
CHECK(hipHostFree(Z));
return (key);
}
void readInputData(FILE *in, size_t textLen, signed char **text, signed char **crypt)
{
CHECK(hipHostMalloc(text, textLen * sizeof(signed char)));
CHECK(hipHostMalloc(crypt, textLen * sizeof(signed char)));
if (fread(*text, sizeof(signed char), textLen, in) != textLen)
{
fprintf(stderr, "Failed reading text from input file\n");
exit(1);
}
}
void cleanup(signed char *text, signed char *crypt, int *key, int16_t *userkey)
{
free(userkey);
CHECK(hipHostFree(key));
CHECK(hipHostFree(text));
CHECK(hipHostFree(crypt));
}
/*
* Initialize application state by reading inputs from the disk and
* pre-allocating memory. Hand off to encrypt_decrypt to perform the actualy
* encryption or decryption. Then, write the encrypted/decrypted results to
* disk.
*/
int main(int argc, char **argv)
{
FILE *in, *out, *keyfile;
signed char *text, *crypt;
size_t textLen, keyFileLength;
int16_t *userkey;
int *key;
action a;
hipEvent_t startEvent, finishEvent;
if (argc != 8)
{
printf("usage: %s <encrypt|decrypt> <file.in> <file.out> <key.file> "
"<threads-per-block> <ncpus> <cpu-percent>\n", argv[0]);
return (1);
}
// Are we encrypting or decrypting?
if (strncmp(argv[1], "encrypt", 7) == 0)
{
a = ENCRYPT;
}
else if (strncmp(argv[1], "decrypt", 7) == 0)
{
a = DECRYPT;
}
else
{
fprintf(stderr, "The action specified ('%s') is not valid. Must be "
"either 'encrypt' or 'decrypt'\n", argv[1]);
return (1);
}
// Input file
in = fopen(argv[2], "r");
if (in == NULL)
{
fprintf(stderr, "Unable to open %s for reading\n", argv[2]);
return (1);
}
// Output file
out = fopen(argv[3], "w");
if (out == NULL)
{
fprintf(stderr, "Unable to open %s for writing\n", argv[3]);
return (1);
}
// Key file
keyfile = fopen(argv[4], "r");
if (keyfile == NULL)
{
fprintf(stderr, "Unable to open key file %s for reading\n", argv[4]);
return (1);
}
int nThreadsPerBlock = atoi(argv[5]);
int ncpus = atoi(argv[6]);
float cpu_percent = atof(argv[7]);
omp_set_num_threads(ncpus);
keyFileLength = getFileLength(keyfile);
if (keyFileLength != sizeof(*userkey) * USERKEY_LENGTH)
{
fprintf(stderr, "Invalid user key file length %lu, must be %lu\n",
keyFileLength, sizeof(*userkey) * USERKEY_LENGTH);
return (1);
}
userkey = (int16_t *)malloc(sizeof(int16_t) * USERKEY_LENGTH);
if (userkey == NULL)
{
fprintf(stderr, "Error allocating user key\n");
return (1);
}
if (fread(userkey, sizeof(*userkey), USERKEY_LENGTH, keyfile) != USERKEY_LENGTH)
{
fprintf(stderr, "Error reading user key\n");
return (1);
}
if (a == ENCRYPT)
{
key = generateEncryptKey(userkey);
}
else
{
key = generateDecryptKey(userkey);
}
textLen = getFileLength(in);
if (textLen % CHUNK_SIZE != 0)
{
fprintf(stderr, "Invalid input file length %lu, must be evenly "
"divisible by %d\n", textLen, CHUNK_SIZE);
return (1);
}
readInputData(in, textLen, &text, &crypt);
fclose(in);
int nDevices;
if (cpu_percent >= 1.0f || hipGetDeviceCount(&nDevices) == hipErrorNoDevice)
{
// If no devices are found, run all computation on the CPU using OpenMP.
double overall_start = seconds();
h_encrypt_decrypt(text, crypt, key, textLen);
double overall_finish = seconds();
double overall_ms = 1000.0 * (overall_finish - overall_start);
printf("Processed %d bytes in %.3f ms on CPU ( %.4f KB/s )\n",
(int)textLen, overall_ms, ((float)textLen / overall_ms) / 1024.0f);
}
else
{
int d;
int nTotalChunks = textLen / CHUNK_SIZE;
int nCpuChunks = nTotalChunks * cpu_percent;
if (nCpuChunks > nTotalChunks) nCpuChunks = nTotalChunks;
int nGpuChunks = nTotalChunks - nCpuChunks;
int gpuLen = nGpuChunks * CHUNK_SIZE;
printf("Processing %d bytes on the GPUs, %d bytes on the CPU\n",
gpuLen, (int)(textLen - gpuLen));
int chunksPerGpu = (nGpuChunks + nDevices - 1) / nDevices;
device_context *ctxs = (device_context *)malloc(nDevices * sizeof(device_context));
for (d = 0; d < nDevices; d++)
{
CHECK(hipSetDevice(d));
int start = d * chunksPerGpu * CHUNK_SIZE;
int len = chunksPerGpu * CHUNK_SIZE;
if (start + len > gpuLen)
{
len = gpuLen - start;
}
init_context(ctxs + d, len);
}
CHECK(hipEventCreate(&startEvent));
CHECK(hipEventCreate(&finishEvent));
/*
* Iterate over each device, launching a subset of the total chunks at
* a time.
*/
double overall_start = seconds();
CHECK(hipEventRecord(startEvent));
for (d = 0; d < nDevices; d++)
{
CHECK(hipSetDevice(d));
int start = d * chunksPerGpu * CHUNK_SIZE;
int len = chunksPerGpu * CHUNK_SIZE;
if (start + len > gpuLen)
{
len = gpuLen - start;
}
encrypt_decrypt_driver(text + start, crypt + start, key, len,
nThreadsPerBlock, ctxs + d);
}
CHECK(hipEventRecord(finishEvent));
double cpu_start = seconds();
int cpuStart = gpuLen;
h_encrypt_decrypt(text + cpuStart, crypt + cpuStart, key, textLen - cpuStart);
double cpu_finish = seconds();
double cpu_overall_ms = 1000.0 * (cpu_finish - cpu_start);
// Wait for each device to finish its work.
for (d = 0; d < nDevices; d++)
{
CHECK(hipSetDevice(d));
CHECK(hipDeviceSynchronize());
}
double overall_finish = seconds();
for (d = 0; d < nDevices; d++)
{
// Clean up any CUDA resource allocated for this device.
CHECK(hipSetDevice(d));
cleanup_context(ctxs + d);
}
float gpuElapsed;
CHECK(hipEventElapsedTime(&gpuElapsed, startEvent, finishEvent));
printf("Processed %d bytes in %.3f ms on GPUs ( %.4f KB/ms )\n",
gpuLen, gpuElapsed, ((float)gpuLen / gpuElapsed) / 1024.0f);
printf("Processed %d bytes in %.3f ms on CPU ( %.4f KB/ms )\n",
(int)(textLen - cpuStart), cpu_overall_ms,
((float)(textLen - cpuStart) / cpu_overall_ms) / 1024.0f);
// Display the aggregate performance of all devices.
double overall_elapsed_ms = 1000.0 * (overall_finish - overall_start);
printf("In total, processed %d bytes in %.3f ms on %d devices and the "
"CPU\n", (int)textLen, overall_elapsed_ms, nDevices);
printf("Aggregate bandwith = %f KB/ms\n",
(float)(textLen / 1024) / overall_elapsed_ms);
free(ctxs);
}
if (fwrite(crypt, sizeof(signed char), textLen, out) != textLen)
{
fprintf(stderr, "Failed writing crypt to %s\n", argv[3]);
return (1);
}
fclose(out);
cleanup(text, crypt, key, userkey);
return (0);
}
| d52d27f5c969849cd952905565b9221bcb7e3413.cu | #include "../common/common.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <omp.h>
/*
* The crypt application implements IDEA encryption and decryption of a single
* input file using the secret key provided.
*/
// Chunking size for IDEA, in bytes
#define CHUNK_SIZE 8
// Length of the encryption/decryption keys, in bytes
#define KEY_LENGTH 52
#define BLOCK_SIZE_IN_CHUNKS 1024000
// Length of the secret key, in bytes
#define USERKEY_LENGTH 8
#define BITS_PER_BYTE 8
typedef struct _device_context
{
signed char *dPlain, *dCrypt;
cudaStream_t *streams;
int nBlocks;
} device_context;
typedef enum { ENCRYPT, DECRYPT } action;
__constant__ int dkey[KEY_LENGTH];
/*
* doCrypt implements the core logic of IDEA. It iterates over the byte
* chunks stored in plainList and outputs their encrypted/decrypted form to the
* corresponding element in cryptList using the secret key provided.
*/
__host__ __device__ void doCrypt(int chunk, signed char *plain,
signed char *crypt, int *key)
{
long x1, x2, x3, x4, t1, t2, ik, r;
x1 = (((unsigned int)plain[chunk * CHUNK_SIZE]) & 0xff);
x1 |= ((((unsigned int)plain[chunk * CHUNK_SIZE + 1]) & 0xff) <<
BITS_PER_BYTE);
x2 = (((unsigned int)plain[chunk * CHUNK_SIZE + 2]) & 0xff);
x2 |= ((((unsigned int)plain[chunk * CHUNK_SIZE + 3]) & 0xff) <<
BITS_PER_BYTE);
x3 = (((unsigned int)plain[chunk * CHUNK_SIZE + 4]) & 0xff);
x3 |= ((((unsigned int)plain[chunk * CHUNK_SIZE + 5]) & 0xff) <<
BITS_PER_BYTE);
x4 = (((unsigned int)plain[chunk * CHUNK_SIZE + 6]) & 0xff);
x4 |= ((((unsigned int)plain[chunk * CHUNK_SIZE + 7]) & 0xff) <<
BITS_PER_BYTE);
ik = 0;
r = CHUNK_SIZE;
do
{
x1 = (int)((((long)x1 * key[ik++]) % 0x10001L) & 0xffff);
x2 = ((x2 + key[ik++]) & 0xffff);
x3 = ((x3 + key[ik++]) & 0xffff);
x4 = (int)((((long)x4 * key[ik++]) % 0x10001L) & 0xffff);
t2 = (x1 ^ x3);
t2 = (int)((((long)t2 * key[ik++]) % 0x10001L) & 0xffff);
t1 = ((t2 + (x2 ^ x4)) & 0xffff);
t1 = (int)((((long)t1 * key[ik++]) % 0x10001L) & 0xffff);
t2 = (t1 + t2 & 0xffff);
x1 = (x1 ^ t1);
x4 = (x4 ^ t2);
t2 = (t2 ^ x2);
x2 = (x3 ^ t1);
x3 = t2;
}
while(--r != 0);
x1 = (int)((((long)x1 * key[ik++]) % 0x10001L) & 0xffff);
x3 = ((x3 + key[ik++]) & 0xffff);
x2 = ((x2 + key[ik++]) & 0xffff);
x4 = (int)((((long)x4 * key[ik++]) % 0x10001L) & 0xffff);
crypt[chunk * CHUNK_SIZE] = (signed char) x1;
crypt[chunk * CHUNK_SIZE + 1] = (signed char) ((unsigned long)x1 >>
BITS_PER_BYTE);
crypt[chunk * CHUNK_SIZE + 2] = (signed char) x3;
crypt[chunk * CHUNK_SIZE + 3] = (signed char) ((unsigned long)x3 >>
BITS_PER_BYTE);
crypt[chunk * CHUNK_SIZE + 4] = (signed char) x2;
crypt[chunk * CHUNK_SIZE + 5] = (signed char) ((unsigned long)x2 >>
BITS_PER_BYTE);
crypt[chunk * CHUNK_SIZE + 6] = (signed char) x4;
crypt[chunk * CHUNK_SIZE + 7] = (signed char) ((unsigned long)x4 >>
BITS_PER_BYTE);
}
__global__ void d_encrypt_decrypt(signed char *plain, signed char *crypt, int nChunks)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int nthreads = blockDim.x * gridDim.x;
for ( ; tid < nChunks; tid += nthreads)
{
doCrypt(tid, plain, crypt, dkey);
}
}
static void h_encrypt_decrypt(signed char *plain, signed char *crypt, int *key,
int plainLength)
{
int c;
int nChunks = plainLength / CHUNK_SIZE;
#pragma omp parallel for firstprivate(nChunks) private(c)
for (c = 0; c < nChunks; c++)
{
doCrypt(c, plain, crypt, key);
}
}
static void init_context(device_context *ctx, int plainLength)
{
signed char *dPlain, *dCrypt;
cudaStream_t *streams;
int nBlocks, b;
if (plainLength % CHUNK_SIZE != 0)
{
fprintf(stderr, "Invalid encryption: length of plain must be an even "
"multiple of %d but is %d\n", CHUNK_SIZE, plainLength);
exit(-1);
}
CHECK(cudaMalloc((void **)&dPlain, plainLength * sizeof(signed char)));
CHECK(cudaMalloc((void **)&dCrypt, plainLength * sizeof(signed char)));
int nChunks = plainLength / CHUNK_SIZE;
nBlocks = (nChunks + BLOCK_SIZE_IN_CHUNKS - 1) / BLOCK_SIZE_IN_CHUNKS;
streams = (cudaStream_t *)malloc(sizeof(cudaStream_t) * nBlocks);
for (b = 0; b < nBlocks; b++)
{
CHECK(cudaStreamCreate(streams + b));
}
ctx->dPlain = dPlain;
ctx->dCrypt = dCrypt;
ctx->streams = streams;
ctx->nBlocks = nBlocks;
}
static void encrypt_decrypt_driver(signed char *plain, signed char *crypt,
int *key,
int plainLength, int nThreadsPerBlock,
device_context *ctx)
{
int b;
cudaDeviceProp info;
CHECK(cudaGetDeviceProperties(&info, 0));
int nChunks = plainLength / CHUNK_SIZE;
int nThreadBlocks = (nChunks + nThreadsPerBlock - 1) / nThreadsPerBlock;
if (nThreadBlocks > info.maxGridSize[0])
{
nThreadBlocks = info.maxGridSize[0];
}
CHECK(cudaMemcpyToSymbolAsync(dkey, key, KEY_LENGTH * sizeof(int), 0,
cudaMemcpyHostToDevice, (ctx->streams)[0]));
CHECK(cudaStreamSynchronize((ctx->streams)[0]));
for (b = 0; b < ctx->nBlocks; b++)
{
int blockOffset = b * BLOCK_SIZE_IN_CHUNKS * CHUNK_SIZE;
int localChunks = BLOCK_SIZE_IN_CHUNKS;
if (b * BLOCK_SIZE_IN_CHUNKS + localChunks > nChunks)
{
localChunks = nChunks - b * BLOCK_SIZE_IN_CHUNKS;
}
CHECK(cudaMemcpyAsync(ctx->dPlain + blockOffset, plain + blockOffset,
localChunks * CHUNK_SIZE * sizeof(signed char),
cudaMemcpyHostToDevice, (ctx->streams)[b]));
d_encrypt_decrypt<<<nThreadBlocks, nThreadsPerBlock, 0, (ctx->streams)[b] >>>(
ctx->dPlain + blockOffset, ctx->dCrypt + blockOffset, localChunks);
CHECK(cudaMemcpyAsync(crypt + blockOffset, ctx->dCrypt + blockOffset,
localChunks * CHUNK_SIZE * sizeof(signed char),
cudaMemcpyDeviceToHost, (ctx->streams)[b]));
}
}
static void cleanup_context(device_context *ctx)
{
int b;
for (b = 0; b < ctx->nBlocks; b++)
{
CHECK(cudaStreamDestroy(ctx->streams[b]));
}
free(ctx->streams);
CHECK(cudaFree(ctx->dPlain));
CHECK(cudaFree(ctx->dCrypt));
}
/*
* Get the length of a file on disk.
*/
static size_t getFileLength(FILE *fp)
{
fseek(fp, 0L, SEEK_END);
size_t fileLen = ftell(fp);
fseek(fp, 0L, SEEK_SET);
return (fileLen);
}
/*
* inv is used to generate the key used for decryption from the secret key.
*/
static int inv(int x)
{
int t0, t1;
int q, y;
if (x <= 1) // Assumes positive x.
return (x); // 0 and 1 are self-inverse.
t1 = 0x10001 / x; // (2**16+1)/x; x is >= 2, so fits 16 bits.
y = 0x10001 % x;
if (y == 1)
return ((1 - t1) & 0xffff);
t0 = 1;
do
{
q = x / y;
x = x % y;
t0 += q * t1;
if (x == 1) return (t0);
q = y / x;
y = y % x;
t1 += q * t0;
}
while (y != 1);
return ((1 - t1) & 0xffff);
}
/*
* Generate the key to be used for encryption, based on the user key read from
* disk.
*/
static int *generateEncryptKey(int16_t *userkey)
{
int i, j;
int *key;
CHECK(cudaMallocHost(&key, KEY_LENGTH * sizeof(int)));
memset(key, 0x00, sizeof(int) * KEY_LENGTH);
for (i = 0; i < CHUNK_SIZE; i++)
{
key[i] = (userkey[i] & 0xffff);
}
for (i = CHUNK_SIZE; i < KEY_LENGTH; i++)
{
j = i % CHUNK_SIZE;
if (j < 6)
{
key[i] = ((key[i - 7] >> 9) | (key[i - 6] << 7))
& 0xffff;
continue;
}
if (j == 6)
{
key[i] = ((key[i - 7] >> 9) | (key[i - 14] << 7))
& 0xffff;
continue;
}
key[i] = ((key[i - 15] >> 9) | (key[i - 14] << 7))
& 0xffff;
}
return (key);
}
/*
* Generate the key to be used for decryption, based on the user key read from
* disk.
*/
static int *generateDecryptKey(int16_t *userkey)
{
int *key;
int i, j, k;
int t1, t2, t3;
CHECK(cudaMallocHost(&key, KEY_LENGTH * sizeof(int)));
int *Z = generateEncryptKey(userkey);
t1 = inv(Z[0]);
t2 = - Z[1] & 0xffff;
t3 = - Z[2] & 0xffff;
key[51] = inv(Z[3]);
key[50] = t3;
key[49] = t2;
key[48] = t1;
j = 47;
k = 4;
for (i = 0; i < 7; i++)
{
t1 = Z[k++];
key[j--] = Z[k++];
key[j--] = t1;
t1 = inv(Z[k++]);
t2 = -Z[k++] & 0xffff;
t3 = -Z[k++] & 0xffff;
key[j--] = inv(Z[k++]);
key[j--] = t2;
key[j--] = t3;
key[j--] = t1;
}
t1 = Z[k++];
key[j--] = Z[k++];
key[j--] = t1;
t1 = inv(Z[k++]);
t2 = -Z[k++] & 0xffff;
t3 = -Z[k++] & 0xffff;
key[j--] = inv(Z[k++]);
key[j--] = t3;
key[j--] = t2;
key[j--] = t1;
CHECK(cudaFreeHost(Z));
return (key);
}
void readInputData(FILE *in, size_t textLen, signed char **text, signed char **crypt)
{
CHECK(cudaMallocHost(text, textLen * sizeof(signed char)));
CHECK(cudaMallocHost(crypt, textLen * sizeof(signed char)));
if (fread(*text, sizeof(signed char), textLen, in) != textLen)
{
fprintf(stderr, "Failed reading text from input file\n");
exit(1);
}
}
void cleanup(signed char *text, signed char *crypt, int *key, int16_t *userkey)
{
free(userkey);
CHECK(cudaFreeHost(key));
CHECK(cudaFreeHost(text));
CHECK(cudaFreeHost(crypt));
}
/*
* Initialize application state by reading inputs from the disk and
* pre-allocating memory. Hand off to encrypt_decrypt to perform the actualy
* encryption or decryption. Then, write the encrypted/decrypted results to
* disk.
*/
int main(int argc, char **argv)
{
FILE *in, *out, *keyfile;
signed char *text, *crypt;
size_t textLen, keyFileLength;
int16_t *userkey;
int *key;
action a;
cudaEvent_t startEvent, finishEvent;
if (argc != 8)
{
printf("usage: %s <encrypt|decrypt> <file.in> <file.out> <key.file> "
"<threads-per-block> <ncpus> <cpu-percent>\n", argv[0]);
return (1);
}
// Are we encrypting or decrypting?
if (strncmp(argv[1], "encrypt", 7) == 0)
{
a = ENCRYPT;
}
else if (strncmp(argv[1], "decrypt", 7) == 0)
{
a = DECRYPT;
}
else
{
fprintf(stderr, "The action specified ('%s') is not valid. Must be "
"either 'encrypt' or 'decrypt'\n", argv[1]);
return (1);
}
// Input file
in = fopen(argv[2], "r");
if (in == NULL)
{
fprintf(stderr, "Unable to open %s for reading\n", argv[2]);
return (1);
}
// Output file
out = fopen(argv[3], "w");
if (out == NULL)
{
fprintf(stderr, "Unable to open %s for writing\n", argv[3]);
return (1);
}
// Key file
keyfile = fopen(argv[4], "r");
if (keyfile == NULL)
{
fprintf(stderr, "Unable to open key file %s for reading\n", argv[4]);
return (1);
}
int nThreadsPerBlock = atoi(argv[5]);
int ncpus = atoi(argv[6]);
float cpu_percent = atof(argv[7]);
omp_set_num_threads(ncpus);
keyFileLength = getFileLength(keyfile);
if (keyFileLength != sizeof(*userkey) * USERKEY_LENGTH)
{
fprintf(stderr, "Invalid user key file length %lu, must be %lu\n",
keyFileLength, sizeof(*userkey) * USERKEY_LENGTH);
return (1);
}
userkey = (int16_t *)malloc(sizeof(int16_t) * USERKEY_LENGTH);
if (userkey == NULL)
{
fprintf(stderr, "Error allocating user key\n");
return (1);
}
if (fread(userkey, sizeof(*userkey), USERKEY_LENGTH, keyfile) != USERKEY_LENGTH)
{
fprintf(stderr, "Error reading user key\n");
return (1);
}
if (a == ENCRYPT)
{
key = generateEncryptKey(userkey);
}
else
{
key = generateDecryptKey(userkey);
}
textLen = getFileLength(in);
if (textLen % CHUNK_SIZE != 0)
{
fprintf(stderr, "Invalid input file length %lu, must be evenly "
"divisible by %d\n", textLen, CHUNK_SIZE);
return (1);
}
readInputData(in, textLen, &text, &crypt);
fclose(in);
int nDevices;
if (cpu_percent >= 1.0f || cudaGetDeviceCount(&nDevices) == cudaErrorNoDevice)
{
// If no devices are found, run all computation on the CPU using OpenMP.
double overall_start = seconds();
h_encrypt_decrypt(text, crypt, key, textLen);
double overall_finish = seconds();
double overall_ms = 1000.0 * (overall_finish - overall_start);
printf("Processed %d bytes in %.3f ms on CPU ( %.4f KB/s )\n",
(int)textLen, overall_ms, ((float)textLen / overall_ms) / 1024.0f);
}
else
{
int d;
int nTotalChunks = textLen / CHUNK_SIZE;
int nCpuChunks = nTotalChunks * cpu_percent;
if (nCpuChunks > nTotalChunks) nCpuChunks = nTotalChunks;
int nGpuChunks = nTotalChunks - nCpuChunks;
int gpuLen = nGpuChunks * CHUNK_SIZE;
printf("Processing %d bytes on the GPUs, %d bytes on the CPU\n",
gpuLen, (int)(textLen - gpuLen));
int chunksPerGpu = (nGpuChunks + nDevices - 1) / nDevices;
device_context *ctxs = (device_context *)malloc(nDevices * sizeof(device_context));
for (d = 0; d < nDevices; d++)
{
CHECK(cudaSetDevice(d));
int start = d * chunksPerGpu * CHUNK_SIZE;
int len = chunksPerGpu * CHUNK_SIZE;
if (start + len > gpuLen)
{
len = gpuLen - start;
}
init_context(ctxs + d, len);
}
CHECK(cudaEventCreate(&startEvent));
CHECK(cudaEventCreate(&finishEvent));
/*
* Iterate over each device, launching a subset of the total chunks at
* a time.
*/
double overall_start = seconds();
CHECK(cudaEventRecord(startEvent));
for (d = 0; d < nDevices; d++)
{
CHECK(cudaSetDevice(d));
int start = d * chunksPerGpu * CHUNK_SIZE;
int len = chunksPerGpu * CHUNK_SIZE;
if (start + len > gpuLen)
{
len = gpuLen - start;
}
encrypt_decrypt_driver(text + start, crypt + start, key, len,
nThreadsPerBlock, ctxs + d);
}
CHECK(cudaEventRecord(finishEvent));
double cpu_start = seconds();
int cpuStart = gpuLen;
h_encrypt_decrypt(text + cpuStart, crypt + cpuStart, key, textLen - cpuStart);
double cpu_finish = seconds();
double cpu_overall_ms = 1000.0 * (cpu_finish - cpu_start);
// Wait for each device to finish its work.
for (d = 0; d < nDevices; d++)
{
CHECK(cudaSetDevice(d));
CHECK(cudaDeviceSynchronize());
}
double overall_finish = seconds();
for (d = 0; d < nDevices; d++)
{
// Clean up any CUDA resource allocated for this device.
CHECK(cudaSetDevice(d));
cleanup_context(ctxs + d);
}
float gpuElapsed;
CHECK(cudaEventElapsedTime(&gpuElapsed, startEvent, finishEvent));
printf("Processed %d bytes in %.3f ms on GPUs ( %.4f KB/ms )\n",
gpuLen, gpuElapsed, ((float)gpuLen / gpuElapsed) / 1024.0f);
printf("Processed %d bytes in %.3f ms on CPU ( %.4f KB/ms )\n",
(int)(textLen - cpuStart), cpu_overall_ms,
((float)(textLen - cpuStart) / cpu_overall_ms) / 1024.0f);
// Display the aggregate performance of all devices.
double overall_elapsed_ms = 1000.0 * (overall_finish - overall_start);
printf("In total, processed %d bytes in %.3f ms on %d devices and the "
"CPU\n", (int)textLen, overall_elapsed_ms, nDevices);
printf("Aggregate bandwith = %f KB/ms\n",
(float)(textLen / 1024) / overall_elapsed_ms);
free(ctxs);
}
if (fwrite(crypt, sizeof(signed char), textLen, out) != textLen)
{
fprintf(stderr, "Failed writing crypt to %s\n", argv[3]);
return (1);
}
fclose(out);
cleanup(text, crypt, key, userkey);
return (0);
}
|
60ea84881ed492e7156e1226c14b472ebf3711d2.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "average_subsampling_2d_layer_tester_cuda.h"
#include <hip/hip_runtime.h>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "../average_subsampling_layer.h"
#include "../nn_types.h"
texture<float, hipTextureType1D, hipReadModeElementType> input_tex_ref;
__global__ void average_subsampling_2d_tex_kernel(
float * __restrict output,
int subsampling_width,
int subsampling_height,
float subsampling_weight,
int input_width,
int input_height,
int output_width,
int output_height,
int feature_map_count,
int entry_count)
{
int elem_id_in_feature_map = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
int tt = 32 - __clz(output_width - 1);
int output_y = elem_id_in_feature_map >> tt;
int output_x = elem_id_in_feature_map & ((1 << tt) - 1);
bool in_bounds = (output_x < output_width) && (output_y < output_height) && (feature_map_id < feature_map_count) && (entry_id < entry_count);
if (in_bounds)
{
int input_x = output_x * subsampling_width;
int input_y = output_y * subsampling_height;
int current_input_elem_id = ((entry_id * feature_map_count + feature_map_id) * input_height + input_y) * input_width + input_x;
float sum = 0.0F;
for(int j = 0; j < subsampling_height; ++j)
{
#pragma unroll 4
for(int i = 0; i < subsampling_width; ++i)
{
sum += tex1Dfetch(input_tex_ref, current_input_elem_id);
current_input_elem_id++;
}
current_input_elem_id += (input_width - subsampling_width);
}
output[((entry_id * feature_map_count + feature_map_id) * output_height + output_y) * output_width + output_x] = sum * subsampling_weight;
}
}
template<int SUBSAMPLING_WIDTH, int SUBSAMPLING_HEIGHT>
__global__ void average_subsampling_2d_tex_exact_kernel(
float * __restrict output,
int input_width,
int input_height,
int output_width,
int output_height,
int feature_map_count,
int entry_count)
{
int elem_id_in_feature_map = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
int tt = 32 - __clz(output_width - 1);
int output_y = elem_id_in_feature_map >> tt;
int output_x = elem_id_in_feature_map & ((1 << tt) - 1);
bool in_bounds = (output_x < output_width) && (output_y < output_height) && (feature_map_id < feature_map_count) && (entry_id < entry_count);
if (in_bounds)
{
int input_x = output_x * SUBSAMPLING_WIDTH;
int input_y = output_y * SUBSAMPLING_HEIGHT;
int current_input_elem_id = ((entry_id * feature_map_count + feature_map_id) * input_height + input_y) * input_width + input_x;
float sum = 0.0F;
#pragma unroll
for(int j = 0; j < SUBSAMPLING_HEIGHT; ++j)
{
#pragma unroll
for(int i = 0; i < SUBSAMPLING_WIDTH; ++i)
{
sum += tex1Dfetch(input_tex_ref, current_input_elem_id);
current_input_elem_id++;
}
current_input_elem_id += (input_width - SUBSAMPLING_WIDTH);
}
output[((entry_id * feature_map_count + feature_map_id) * output_height + output_y) * output_width + output_x] = sum * (1.0F / (float)(SUBSAMPLING_WIDTH * SUBSAMPLING_HEIGHT));
}
}
namespace nnforge
{
namespace cuda
{
average_subsampling_2d_layer_tester_cuda::average_subsampling_2d_layer_tester_cuda()
{
input_tex_ref.addressMode[0] = hipAddressModeBorder;
input_tex_ref.normalized = false;
}
average_subsampling_2d_layer_tester_cuda::~average_subsampling_2d_layer_tester_cuda()
{
}
#define MAX_WINDOW_WIDTH 4
#define MAX_WINDOW_HEIGHT 4
#define launch_exact_kernel_const_const(window_width_const, window_height_const) \
hipLaunchKernelGGL(( average_subsampling_2d_tex_exact_kernel<window_width_const,window_height_const>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, output,input_configuration_specific.dimension_sizes[0],input_configuration_specific.dimension_sizes[1],output_configuration_specific.dimension_sizes[0],output_configuration_specific.dimension_sizes[1],output_configuration_specific.feature_map_count,entry_count);
#define launch_exact_kernel_const(window_width, window_height_const) \
switch (window_width) \
{ \
case 1: \
launch_exact_kernel_const_const(1, window_height_const); \
break; \
case 2: \
launch_exact_kernel_const_const(2, window_height_const); \
break; \
case 3: \
launch_exact_kernel_const_const(3, window_height_const); \
break; \
case 4: \
launch_exact_kernel_const_const(4, window_height_const); \
break; \
};
#define launch_exact_kernel(window_width, window_height) \
switch (window_height) \
{ \
case 1: \
launch_exact_kernel_const(window_width, 1); \
break; \
case 2: \
launch_exact_kernel_const(window_width, 2); \
break; \
case 3: \
launch_exact_kernel_const(window_width, 3); \
break; \
case 4: \
launch_exact_kernel_const(window_width, 4); \
break; \
};
void average_subsampling_2d_layer_tester_cuda::enqueue_test(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data_custom,
cuda_linear_buffer_device_smart_ptr input_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
cuda_safe_call(hipBindTexture(0, input_tex_ref, *input_buffer, desc, input_elem_count_per_entry * entry_count * sizeof(float)));
int output_elem_count_per_feature_map_aligned = cuda_util::get_power2_aligned_size(output_configuration_specific.dimension_sizes[0]) * output_configuration_specific.dimension_sizes[1];
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_feature_map_aligned,
output_configuration_specific.feature_map_count,
entry_count);
float * output = *additional_buffers[0];
if ((subsampling_sizes[0] <= MAX_WINDOW_WIDTH) && (subsampling_sizes[1] <= MAX_WINDOW_HEIGHT))
{
launch_exact_kernel(subsampling_sizes[0], subsampling_sizes[1]);
}
else
{
hipLaunchKernelGGL(( average_subsampling_2d_tex_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
output,
subsampling_sizes[0],
subsampling_sizes[1],
subsampling_weight,
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
output_configuration_specific.feature_map_count,
entry_count);
}
}
std::vector<size_t> average_subsampling_2d_layer_tester_cuda::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
res.push_back(output_elem_count_per_entry * sizeof(float));
return res;
}
std::vector<unsigned int> average_subsampling_2d_layer_tester_cuda::get_linear_addressing_through_texture_per_entry() const
{
std::vector<unsigned int> res;
res.push_back(input_elem_count_per_entry);
return res;
}
cuda_linear_buffer_device_smart_ptr average_subsampling_2d_layer_tester_cuda::get_output_buffer(
cuda_linear_buffer_device_smart_ptr input_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers)
{
return additional_buffers[0];
}
void average_subsampling_2d_layer_tester_cuda::tester_configured()
{
nnforge_shared_ptr<const average_subsampling_layer> layer_derived = nnforge_dynamic_pointer_cast<const average_subsampling_layer>(layer_schema);
subsampling_sizes = layer_derived->subsampling_sizes;
subsampling_weight = 1.0F / static_cast<float>(subsampling_sizes[0] * subsampling_sizes[1]);
}
}
}
| 60ea84881ed492e7156e1226c14b472ebf3711d2.cu | /*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "average_subsampling_2d_layer_tester_cuda.h"
#include <cuda_runtime.h>
#include "util_cuda.h"
#include "neural_network_cuda_exception.h"
#include "../average_subsampling_layer.h"
#include "../nn_types.h"
texture<float, cudaTextureType1D, cudaReadModeElementType> input_tex_ref;
__global__ void average_subsampling_2d_tex_kernel(
float * __restrict output,
int subsampling_width,
int subsampling_height,
float subsampling_weight,
int input_width,
int input_height,
int output_width,
int output_height,
int feature_map_count,
int entry_count)
{
int elem_id_in_feature_map = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
int tt = 32 - __clz(output_width - 1);
int output_y = elem_id_in_feature_map >> tt;
int output_x = elem_id_in_feature_map & ((1 << tt) - 1);
bool in_bounds = (output_x < output_width) && (output_y < output_height) && (feature_map_id < feature_map_count) && (entry_id < entry_count);
if (in_bounds)
{
int input_x = output_x * subsampling_width;
int input_y = output_y * subsampling_height;
int current_input_elem_id = ((entry_id * feature_map_count + feature_map_id) * input_height + input_y) * input_width + input_x;
float sum = 0.0F;
for(int j = 0; j < subsampling_height; ++j)
{
#pragma unroll 4
for(int i = 0; i < subsampling_width; ++i)
{
sum += tex1Dfetch(input_tex_ref, current_input_elem_id);
current_input_elem_id++;
}
current_input_elem_id += (input_width - subsampling_width);
}
output[((entry_id * feature_map_count + feature_map_id) * output_height + output_y) * output_width + output_x] = sum * subsampling_weight;
}
}
template<int SUBSAMPLING_WIDTH, int SUBSAMPLING_HEIGHT>
__global__ void average_subsampling_2d_tex_exact_kernel(
float * __restrict output,
int input_width,
int input_height,
int output_width,
int output_height,
int feature_map_count,
int entry_count)
{
int elem_id_in_feature_map = blockIdx.x * blockDim.x + threadIdx.x;
int feature_map_id = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
int tt = 32 - __clz(output_width - 1);
int output_y = elem_id_in_feature_map >> tt;
int output_x = elem_id_in_feature_map & ((1 << tt) - 1);
bool in_bounds = (output_x < output_width) && (output_y < output_height) && (feature_map_id < feature_map_count) && (entry_id < entry_count);
if (in_bounds)
{
int input_x = output_x * SUBSAMPLING_WIDTH;
int input_y = output_y * SUBSAMPLING_HEIGHT;
int current_input_elem_id = ((entry_id * feature_map_count + feature_map_id) * input_height + input_y) * input_width + input_x;
float sum = 0.0F;
#pragma unroll
for(int j = 0; j < SUBSAMPLING_HEIGHT; ++j)
{
#pragma unroll
for(int i = 0; i < SUBSAMPLING_WIDTH; ++i)
{
sum += tex1Dfetch(input_tex_ref, current_input_elem_id);
current_input_elem_id++;
}
current_input_elem_id += (input_width - SUBSAMPLING_WIDTH);
}
output[((entry_id * feature_map_count + feature_map_id) * output_height + output_y) * output_width + output_x] = sum * (1.0F / (float)(SUBSAMPLING_WIDTH * SUBSAMPLING_HEIGHT));
}
}
namespace nnforge
{
namespace cuda
{
average_subsampling_2d_layer_tester_cuda::average_subsampling_2d_layer_tester_cuda()
{
input_tex_ref.addressMode[0] = cudaAddressModeBorder;
input_tex_ref.normalized = false;
}
average_subsampling_2d_layer_tester_cuda::~average_subsampling_2d_layer_tester_cuda()
{
}
#define MAX_WINDOW_WIDTH 4
#define MAX_WINDOW_HEIGHT 4
#define launch_exact_kernel_const_const(window_width_const, window_height_const) \
average_subsampling_2d_tex_exact_kernel<window_width_const,window_height_const><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(output,input_configuration_specific.dimension_sizes[0],input_configuration_specific.dimension_sizes[1],output_configuration_specific.dimension_sizes[0],output_configuration_specific.dimension_sizes[1],output_configuration_specific.feature_map_count,entry_count);
#define launch_exact_kernel_const(window_width, window_height_const) \
switch (window_width) \
{ \
case 1: \
launch_exact_kernel_const_const(1, window_height_const); \
break; \
case 2: \
launch_exact_kernel_const_const(2, window_height_const); \
break; \
case 3: \
launch_exact_kernel_const_const(3, window_height_const); \
break; \
case 4: \
launch_exact_kernel_const_const(4, window_height_const); \
break; \
};
#define launch_exact_kernel(window_width, window_height) \
switch (window_height) \
{ \
case 1: \
launch_exact_kernel_const(window_width, 1); \
break; \
case 2: \
launch_exact_kernel_const(window_width, 2); \
break; \
case 3: \
launch_exact_kernel_const(window_width, 3); \
break; \
case 4: \
launch_exact_kernel_const(window_width, 4); \
break; \
};
void average_subsampling_2d_layer_tester_cuda::enqueue_test(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& data_custom,
cuda_linear_buffer_device_smart_ptr input_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
unsigned int entry_count)
{
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
cuda_safe_call(cudaBindTexture(0, input_tex_ref, *input_buffer, desc, input_elem_count_per_entry * entry_count * sizeof(float)));
int output_elem_count_per_feature_map_aligned = cuda_util::get_power2_aligned_size(output_configuration_specific.dimension_sizes[0]) * output_configuration_specific.dimension_sizes[1];
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_feature_map_aligned,
output_configuration_specific.feature_map_count,
entry_count);
float * output = *additional_buffers[0];
if ((subsampling_sizes[0] <= MAX_WINDOW_WIDTH) && (subsampling_sizes[1] <= MAX_WINDOW_HEIGHT))
{
launch_exact_kernel(subsampling_sizes[0], subsampling_sizes[1]);
}
else
{
average_subsampling_2d_tex_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
output,
subsampling_sizes[0],
subsampling_sizes[1],
subsampling_weight,
input_configuration_specific.dimension_sizes[0],
input_configuration_specific.dimension_sizes[1],
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
output_configuration_specific.feature_map_count,
entry_count);
}
}
std::vector<size_t> average_subsampling_2d_layer_tester_cuda::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
res.push_back(output_elem_count_per_entry * sizeof(float));
return res;
}
std::vector<unsigned int> average_subsampling_2d_layer_tester_cuda::get_linear_addressing_through_texture_per_entry() const
{
std::vector<unsigned int> res;
res.push_back(input_elem_count_per_entry);
return res;
}
cuda_linear_buffer_device_smart_ptr average_subsampling_2d_layer_tester_cuda::get_output_buffer(
cuda_linear_buffer_device_smart_ptr input_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers)
{
return additional_buffers[0];
}
void average_subsampling_2d_layer_tester_cuda::tester_configured()
{
nnforge_shared_ptr<const average_subsampling_layer> layer_derived = nnforge_dynamic_pointer_cast<const average_subsampling_layer>(layer_schema);
subsampling_sizes = layer_derived->subsampling_sizes;
subsampling_weight = 1.0F / static_cast<float>(subsampling_sizes[0] * subsampling_sizes[1]);
}
}
}
|
944c8aa9bab87b1fd3aae65ed14b75f5225f2b5a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from sparse/blas/zmergeidr.cu, normal z -> s, Sun Nov 20 20:20:40 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_s
// These routines merge multiple kernels from sidr into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_sidr_smoothing_1_kernel(
int num_rows,
int num_cols,
float *drs,
float *dr,
float *dt )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
dt[ i+j*num_rows ] = drs[ i+j*num_rows ] - dr[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
dt = drs - dr
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
drs magmaFloat_ptr
vector
@param[in]
dr magmaFloat_ptr
vector
@param[in,out]
dt magmaFloat_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_sidr_smoothing_1(
magma_int_t num_rows,
magma_int_t num_cols,
magmaFloat_ptr drs,
magmaFloat_ptr dr,
magmaFloat_ptr dt,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_sidr_smoothing_1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, drs, dr, dt );
return MAGMA_SUCCESS;
}
__global__ void
magma_sidr_smoothing_2_kernel(
int num_rows,
int num_cols,
float omega,
float *dx,
float *dxs )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
dxs[ i+j*num_rows ] = dxs[ i+j*num_rows ] + omega * dxs[ i+j*num_rows ]
- omega * dx[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
dxs = dxs - gamma*(dxs-dx)
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
omega float
scalar
@param[in]
dx magmaFloat_ptr
vector
@param[in,out]
dxs magmaFloat_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_sidr_smoothing_2(
magma_int_t num_rows,
magma_int_t num_cols,
float omega,
magmaFloat_ptr dx,
magmaFloat_ptr dxs,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_sidr_smoothing_2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, omega, dx, dxs);
return MAGMA_SUCCESS;
}
| 944c8aa9bab87b1fd3aae65ed14b75f5225f2b5a.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from sparse/blas/zmergeidr.cu, normal z -> s, Sun Nov 20 20:20:40 2016
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_s
// These routines merge multiple kernels from sidr into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_sidr_smoothing_1_kernel(
int num_rows,
int num_cols,
float *drs,
float *dr,
float *dt )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
dt[ i+j*num_rows ] = drs[ i+j*num_rows ] - dr[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
dt = drs - dr
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
drs magmaFloat_ptr
vector
@param[in]
dr magmaFloat_ptr
vector
@param[in,out]
dt magmaFloat_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_sidr_smoothing_1(
magma_int_t num_rows,
magma_int_t num_cols,
magmaFloat_ptr drs,
magmaFloat_ptr dr,
magmaFloat_ptr dt,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_sidr_smoothing_1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, drs, dr, dt );
return MAGMA_SUCCESS;
}
__global__ void
magma_sidr_smoothing_2_kernel(
int num_rows,
int num_cols,
float omega,
float *dx,
float *dxs )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
dxs[ i+j*num_rows ] = dxs[ i+j*num_rows ] + omega * dxs[ i+j*num_rows ]
- omega * dx[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
dxs = dxs - gamma*(dxs-dx)
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
omega float
scalar
@param[in]
dx magmaFloat_ptr
vector
@param[in,out]
dxs magmaFloat_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_sidr_smoothing_2(
magma_int_t num_rows,
magma_int_t num_cols,
float omega,
magmaFloat_ptr dx,
magmaFloat_ptr dxs,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_sidr_smoothing_2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, omega, dx, dxs);
return MAGMA_SUCCESS;
}
|
c3756a487122c4bdfd6e4bf2560a86be57f633b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "matmul_gpu.h"
#include <cstdio>
constexpr unsigned int threadsPerBlock = 8;
__global__ void matmul_kernel_ver1(
const float * const mat1, const float * const mat2, float * const mat3,
const size_t pitch1, const size_t pitch2, const size_t pitch3,
const size_t size1, const size_t size2, const size_t size3) {
for (size_t i = 0; i < size1; ++i) {
for (size_t j = 0; j < size3; ++j) {
for (size_t k = 0; k < size2; ++k) {
mat3[i*pitch3 + j] += mat1[i*pitch1 + k] * mat2[k*pitch2 + j];
}
}
}
}
__global__ void matmul_kernel_ver2(
const float * const mat1, const float * const mat2, float * const mat3,
const size_t pitch1, const size_t pitch2, const size_t pitch3,
const size_t size1, const size_t size2, const size_t size3) {
const size_t i = blockIdx.y * blockDim.y + threadIdx.y;
const size_t j = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size1 && j < size3) {
for (size_t k = 0; k < size2; ++k) {
mat3[i*pitch3 + j] += mat1[i*pitch1 + k] * mat2[k*pitch2 + j];
}
}
}
__global__ void matmul_kernel_ver3(
const float * const mat1, const float * const mat2, float * const mat3,
const size_t pitch1, const size_t pitch2, const size_t pitch3,
const size_t size1, const size_t size2, const size_t size3) {
const size_t block_i = blockIdx.y * blockDim.y;
const size_t block_j = blockIdx.x * blockDim.x;
const size_t i = block_i + threadIdx.y;
const size_t j = block_j + threadIdx.x;
__shared__ float localA[threadsPerBlock][threadsPerBlock];
__shared__ float localB[threadsPerBlock][threadsPerBlock];
if (size1 - block_i >= threadsPerBlock && size3 - block_j >= threadsPerBlock) {
float tmp = 0.0;
for (size_t k = 0; k < size2; k += threadsPerBlock) {
if (size2 - k < threadsPerBlock) {
for (size_t k2 = k; k2 < size2; ++k2) {
tmp += mat1[i*pitch1 + k2] * mat2[k2*pitch2 + j];
}
} else {
__syncthreads();
localA[threadIdx.y][threadIdx.x] = mat1[i*pitch1 + (k + threadIdx.x)];
localB[threadIdx.y][threadIdx.x] = mat2[(k + threadIdx.y)*pitch2 + j];
__syncthreads();
for (size_t k2 = 0; k2 < threadsPerBlock; ++k2) {
tmp += localA[threadIdx.y][k2] * localB[k2][threadIdx.x];
}
}
}
mat3[i*pitch3 + j] = tmp;
} else if (i < size1 && j < size3) {
for (size_t k = 0; k < size2; ++k) {
mat3[i*pitch3 + j] += mat1[i*pitch1 + k] * mat2[k*pitch2 + j];
}
}
}
constexpr unsigned int blockSize = 64;
#define FMA_IMPL(index, k2) \
{ \
const float v = localA[threadIdx.y + threadsPerBlock * index][k2]; \
tmp##index##0 += v * v10; \
tmp##index##1 += v * v11; \
tmp##index##2 += v * v12; \
tmp##index##3 += v * v13; \
tmp##index##4 += v * v14; \
tmp##index##5 += v * v15; \
tmp##index##6 += v * v16; \
tmp##index##7 += v * v17; \
}
#define FMA_IMPL2(index, k2) \
{ \
const float v = localA[threadIdx.y + threadsPerBlock * index][k2]; \
if (j0 < size3) { tmp##index##0 += v * v10; } \
if (j1 < size3) { tmp##index##1 += v * v11; } \
if (j2 < size3) { tmp##index##2 += v * v12; } \
if (j3 < size3) { tmp##index##3 += v * v13; } \
if (j4 < size3) { tmp##index##4 += v * v14; } \
if (j5 < size3) { tmp##index##5 += v * v15; } \
if (j6 < size3) { tmp##index##6 += v * v16; } \
if (j7 < size3) { tmp##index##7 += v * v17; } \
}
#define FMA(k2) \
{ \
const float v10 = localB[k2][threadIdx.x]; \
const float v11 = localB[k2][threadIdx.x + threadsPerBlock]; \
const float v12 = localB[k2][threadIdx.x + threadsPerBlock*2]; \
const float v13 = localB[k2][threadIdx.x + threadsPerBlock*3]; \
const float v14 = localB[k2][threadIdx.x + threadsPerBlock*4]; \
const float v15 = localB[k2][threadIdx.x + threadsPerBlock*5]; \
const float v16 = localB[k2][threadIdx.x + threadsPerBlock*6]; \
const float v17 = localB[k2][threadIdx.x + threadsPerBlock*7]; \
FMA_IMPL(0, k2) \
FMA_IMPL(1, k2) \
FMA_IMPL(2, k2) \
FMA_IMPL(3, k2) \
FMA_IMPL(4, k2) \
FMA_IMPL(5, k2) \
FMA_IMPL(6, k2) \
FMA_IMPL(7, k2) \
}
#define APPLY_IMPL(indexi, indexj) \
if (j##indexj < size3) { \
mat3[i##indexi * pitch3 + j##indexj] = tmp##indexi##indexj; \
}
#define APPLY(index) \
if (i##index < size1) { \
APPLY_IMPL(index, 0) \
APPLY_IMPL(index, 1) \
APPLY_IMPL(index, 2) \
APPLY_IMPL(index, 3) \
APPLY_IMPL(index, 4) \
APPLY_IMPL(index, 5) \
APPLY_IMPL(index, 6) \
APPLY_IMPL(index, 7) \
}
__global__ void matmul_kernel_ver4(
const float * const mat1, const float * const mat2, float * const mat3,
const size_t pitch1, const size_t pitch2, const size_t pitch3,
const size_t size1, const size_t size2, const size_t size3) {
const int i0 = threadIdx.y + blockIdx.y * blockSize;
const int j0 = threadIdx.x + blockIdx.x * blockSize;
const int i1 = threadIdx.y + threadsPerBlock + blockIdx.y * blockSize;
const int j1 = threadIdx.x + threadsPerBlock + blockIdx.x * blockSize;
const int i2 = threadIdx.y + threadsPerBlock*2 + blockIdx.y * blockSize;
const int j2 = threadIdx.x + threadsPerBlock*2 + blockIdx.x * blockSize;
const int i3 = threadIdx.y + threadsPerBlock*3 + blockIdx.y * blockSize;
const int j3 = threadIdx.x + threadsPerBlock*3 + blockIdx.x * blockSize;
const int i4 = threadIdx.y + threadsPerBlock*4 + blockIdx.y * blockSize;
const int j4 = threadIdx.x + threadsPerBlock*4 + blockIdx.x * blockSize;
const int i5 = threadIdx.y + threadsPerBlock*5 + blockIdx.y * blockSize;
const int j5 = threadIdx.x + threadsPerBlock*5 + blockIdx.x * blockSize;
const int i6 = threadIdx.y + threadsPerBlock*6 + blockIdx.y * blockSize;
const int j6 = threadIdx.x + threadsPerBlock*6 + blockIdx.x * blockSize;
const int i7 = threadIdx.y + threadsPerBlock*7 + blockIdx.y * blockSize;
const int j7 = threadIdx.x + threadsPerBlock*7 + blockIdx.x * blockSize;
float tmp00 = 0.0, tmp01 = 0.0, tmp02 = 0.0, tmp03 = 0.0, tmp04 = 0.0, tmp05 = 0.0, tmp06 = 0.0, tmp07 = 0.0;
float tmp10 = 0.0, tmp11 = 0.0, tmp12 = 0.0, tmp13 = 0.0, tmp14 = 0.0, tmp15 = 0.0, tmp16 = 0.0, tmp17 = 0.0;
float tmp20 = 0.0, tmp21 = 0.0, tmp22 = 0.0, tmp23 = 0.0, tmp24 = 0.0, tmp25 = 0.0, tmp26 = 0.0, tmp27 = 0.0;
float tmp30 = 0.0, tmp31 = 0.0, tmp32 = 0.0, tmp33 = 0.0, tmp34 = 0.0, tmp35 = 0.0, tmp36 = 0.0, tmp37 = 0.0;
float tmp40 = 0.0, tmp41 = 0.0, tmp42 = 0.0, tmp43 = 0.0, tmp44 = 0.0, tmp45 = 0.0, tmp46 = 0.0, tmp47 = 0.0;
float tmp50 = 0.0, tmp51 = 0.0, tmp52 = 0.0, tmp53 = 0.0, tmp54 = 0.0, tmp55 = 0.0, tmp56 = 0.0, tmp57 = 0.0;
float tmp60 = 0.0, tmp61 = 0.0, tmp62 = 0.0, tmp63 = 0.0, tmp64 = 0.0, tmp65 = 0.0, tmp66 = 0.0, tmp67 = 0.0;
float tmp70 = 0.0, tmp71 = 0.0, tmp72 = 0.0, tmp73 = 0.0, tmp74 = 0.0, tmp75 = 0.0, tmp76 = 0.0, tmp77 = 0.0;
__shared__ float localA[blockSize][threadsPerBlock+1];
__shared__ float localB[threadsPerBlock][blockSize];
for (int k = 0; k < size2; k += threadsPerBlock) {
if (size1 - blockIdx.y * blockSize < blockSize ||
size2 - k < threadsPerBlock ||
size3 - blockIdx.x * blockSize < blockSize) {
const size_t ek = min((size_t)threadsPerBlock, size2 - k);
__syncthreads();
if (threadIdx.x < ek) {
if (i0 < size1) {
localA[threadIdx.y][threadIdx.x] = __ldg(mat1 + i0 * pitch1 + k + threadIdx.x);
}
if (i1 < size1) {
localA[threadIdx.y + threadsPerBlock][threadIdx.x] = __ldg(mat1 + i1 * pitch1 + k + threadIdx.x);
}
if (i2 < size1) {
localA[threadIdx.y + threadsPerBlock * 2][threadIdx.x] = __ldg(mat1 + i2 * pitch1 + k + threadIdx.x);
}
if (i3 < size1) {
localA[threadIdx.y + threadsPerBlock * 3][threadIdx.x] = __ldg(mat1 + i3 * pitch1 + k + threadIdx.x);
}
if (i4 < size1) {
localA[threadIdx.y + threadsPerBlock * 4][threadIdx.x] = __ldg(mat1 + i4 * pitch1 + k + threadIdx.x);
}
if (i5 < size1) {
localA[threadIdx.y + threadsPerBlock * 5][threadIdx.x] = __ldg(mat1 + i5 * pitch1 + k + threadIdx.x);
}
if (i6 < size1) {
localA[threadIdx.y + threadsPerBlock * 6][threadIdx.x] = __ldg(mat1 + i6 * pitch1 + k + threadIdx.x);
}
if (i7 < size1) {
localA[threadIdx.y + threadsPerBlock * 7][threadIdx.x] = __ldg(mat1 + i7 * pitch1 + k + threadIdx.x);
}
}
__syncthreads();
for (size_t k2 = 0; k2 < ek; ++k2) {
float v10 = 0.0, v11 = 0.0, v12 = 0.0, v13 = 0.0, v14 = 0.0, v15 = 0.0, v16 = 0.0, v17 = 0.0;
if (j0 < size3) {
v10 = __ldg(mat2 + (k+k2) * pitch2 + j0);
}
if (j1 < size3) {
v11 = __ldg(mat2 + (k+k2) * pitch2 + j1);
}
if (j2 < size3) {
v12 = __ldg(mat2 + (k+k2) * pitch2 + j2);
}
if (j3 < size3) {
v13 = __ldg(mat2 + (k+k2) * pitch2 + j3);
}
if (j4 < size3) {
v14 = __ldg(mat2 + (k+k2) * pitch2 + j4);
}
if (j5 < size3) {
v15 = __ldg(mat2 + (k+k2) * pitch2 + j5);
}
if (j6 < size3) {
v16 = __ldg(mat2 + (k+k2) * pitch2 + j6);
}
if (j7 < size3) {
v17 = __ldg(mat2 + (k+k2) * pitch2 + j7);
}
if (i0 < size1) FMA_IMPL2(0, k2)
if (i1 < size1) FMA_IMPL2(1, k2)
if (i2 < size1) FMA_IMPL2(2, k2)
if (i3 < size1) FMA_IMPL2(3, k2)
if (i4 < size1) FMA_IMPL2(4, k2)
if (i5 < size1) FMA_IMPL2(5, k2)
if (i6 < size1) FMA_IMPL2(6, k2)
if (i7 < size1) FMA_IMPL2(7, k2)
}
} else {
__syncthreads();
localB[threadIdx.y][threadIdx.x] = __ldg(mat2 + (k+threadIdx.y) * pitch2 + j0);
localB[threadIdx.y][threadIdx.x + threadsPerBlock] = __ldg(mat2 + (k+threadIdx.y) * pitch2 + j1);
localB[threadIdx.y][threadIdx.x + threadsPerBlock*2] = __ldg(mat2 + (k+threadIdx.y) * pitch2 + j2);
localB[threadIdx.y][threadIdx.x + threadsPerBlock*3] = __ldg(mat2 + (k+threadIdx.y) * pitch2 + j3);
localB[threadIdx.y][threadIdx.x + threadsPerBlock*4] = __ldg(mat2 + (k+threadIdx.y) * pitch2 + j4);
localB[threadIdx.y][threadIdx.x + threadsPerBlock*5] = __ldg(mat2 + (k+threadIdx.y) * pitch2 + j5);
localB[threadIdx.y][threadIdx.x + threadsPerBlock*6] = __ldg(mat2 + (k+threadIdx.y) * pitch2 + j6);
localB[threadIdx.y][threadIdx.x + threadsPerBlock*7] = __ldg(mat2 + (k+threadIdx.y) * pitch2 + j7);
localA[threadIdx.y][threadIdx.x] = __ldg(mat1 + i0 * pitch1 + k + threadIdx.x);
localA[threadIdx.y + threadsPerBlock][threadIdx.x] = __ldg(mat1 + i1 * pitch1 + k + threadIdx.x);
localA[threadIdx.y + threadsPerBlock * 2][threadIdx.x] = __ldg(mat1 + i2 * pitch1 + k + threadIdx.x);
localA[threadIdx.y + threadsPerBlock * 3][threadIdx.x] = __ldg(mat1 + i3 * pitch1 + k + threadIdx.x);
localA[threadIdx.y + threadsPerBlock * 4][threadIdx.x] = __ldg(mat1 + i4 * pitch1 + k + threadIdx.x);
localA[threadIdx.y + threadsPerBlock * 5][threadIdx.x] = __ldg(mat1 + i5 * pitch1 + k + threadIdx.x);
localA[threadIdx.y + threadsPerBlock * 6][threadIdx.x] = __ldg(mat1 + i6 * pitch1 + k + threadIdx.x);
localA[threadIdx.y + threadsPerBlock * 7][threadIdx.x] = __ldg(mat1 + i7 * pitch1 + k + threadIdx.x);
__syncthreads();
FMA(0)
FMA(1)
FMA(2)
FMA(3)
FMA(4)
FMA(5)
FMA(6)
FMA(7)
}
}
APPLY(0)
APPLY(1)
APPLY(2)
APPLY(3)
APPLY(4)
APPLY(5)
APPLY(6)
APPLY(7)
}
__host__ void handle_error(hipError_t err) {
if (err != hipSuccess) {
fprintf(stderr, "[Error] %s (error code: %d)\n", hipGetErrorString(err), err);
}
}
__host__ void matmul_gpu_ver1(const float* const mat1, const float * const mat2, float * const mat3,
const size_t size1, const size_t size2, const size_t size3,
const size_t pitch1, const size_t pitch2) {
hipEvent_t start, stop;
handle_error(hipEventCreate(&start));
handle_error(hipEventCreate(&stop));
float *mat1_d;
float *mat2_d;
float *mat3_d;
size_t pitch1_d, pitch2_d, pitch3_d;
handle_error(hipMallocPitch((void**)&mat1_d, &pitch1_d, size2 * sizeof(float), size1));
handle_error(hipMallocPitch((void**)&mat2_d, &pitch2_d, size3 * sizeof(float), size2));
handle_error(hipMallocPitch((void**)&mat3_d, &pitch3_d, size3 * sizeof(float), size1));
handle_error(hipMemcpy2D((void*)mat1_d, pitch1_d, (void*)mat1, pitch1 * sizeof(float), size2 * sizeof(float), size1, hipMemcpyHostToDevice));
handle_error(hipMemcpy2D((void*)mat2_d, pitch2_d, (void*)mat2, pitch2 * sizeof(float), size3 * sizeof(float), size2, hipMemcpyHostToDevice));
handle_error(hipMemset2D((void*)mat3_d, pitch3_d, 0, size3 * sizeof(float), size1));
handle_error(hipEventRecord(start, 0));
hipLaunchKernelGGL(( matmul_kernel_ver1), dim3(1), dim3(1), 0, 0,
mat1_d, mat2_d, mat3_d,
pitch1_d/sizeof(float), pitch2_d/sizeof(float), pitch3_d/sizeof(float),
size1, size2, size3);
handle_error(hipEventRecord(stop, 0));
handle_error(hipEventSynchronize(stop));
handle_error(hipMemcpy2D((void*)mat3, pitch2 * sizeof(float), (void*)mat3_d, pitch3_d, size3 * sizeof(float), size1, hipMemcpyDeviceToHost));
hipFree(mat1_d);
hipFree(mat2_d);
hipFree(mat3_d);
size_t ops = 2 * size1 * size2 * size3;
float elapsed;
handle_error(hipEventElapsedTime(&elapsed, start, stop));
fprintf(stderr, "Time: %fms, %f GFLOPS\n", elapsed, ops / elapsed / 1e6);
}
__host__ void matmul_gpu_ver2(const float* const mat1, const float * const mat2, float * const mat3,
const size_t size1, const size_t size2, const size_t size3,
const size_t pitch1, const size_t pitch2) {
hipEvent_t start, stop;
handle_error(hipEventCreate(&start));
handle_error(hipEventCreate(&stop));
float *mat1_d;
float *mat2_d;
float *mat3_d;
size_t pitch1_d, pitch2_d, pitch3_d;
handle_error(hipMallocPitch((void**)&mat1_d, &pitch1_d, size2 * sizeof(float), size1));
handle_error(hipMallocPitch((void**)&mat2_d, &pitch2_d, size3 * sizeof(float), size2));
handle_error(hipMallocPitch((void**)&mat3_d, &pitch3_d, size3 * sizeof(float), size1));
handle_error(hipMemcpy2D((void*)mat1_d, pitch1_d, (void*)mat1, pitch1 * sizeof(float), size2 * sizeof(float), size1, hipMemcpyHostToDevice));
handle_error(hipMemcpy2D((void*)mat2_d, pitch2_d, (void*)mat2, pitch2 * sizeof(float), size3 * sizeof(float), size2, hipMemcpyHostToDevice));
handle_error(hipMemset2D((void*)mat3_d, pitch3_d, 0, size3 * sizeof(float), size1));
size_t bv = (size1+threadsPerBlock-1) / threadsPerBlock;
size_t bh = (size3+threadsPerBlock-1) / threadsPerBlock;
dim3 block(threadsPerBlock, threadsPerBlock);
dim3 grid(bh, bv);
handle_error(hipEventRecord(start, 0));
hipLaunchKernelGGL(( matmul_kernel_ver2), dim3(grid), dim3(block), 0, 0,
mat1_d, mat2_d, mat3_d,
pitch1_d/sizeof(float), pitch2_d/sizeof(float), pitch3_d/sizeof(float),
size1, size2, size3);
handle_error(hipEventRecord(stop, 0));
handle_error(hipEventSynchronize(stop));
handle_error(hipMemcpy2D((void*)mat3, pitch2 * sizeof(float), (void*)mat3_d, pitch3_d, size3 * sizeof(float), size1, hipMemcpyDeviceToHost));
hipFree(mat1_d);
hipFree(mat2_d);
hipFree(mat3_d);
size_t ops = 2 * size1 * size2 * size3;
float elapsed;
handle_error(hipEventElapsedTime(&elapsed, start, stop));
fprintf(stderr, "Time: %fms, %f GFLOPS\n", elapsed, ops / elapsed / 1e6);
}
__host__ void matmul_gpu_ver3(const float* const mat1, const float * const mat2, float * const mat3,
const size_t size1, const size_t size2, const size_t size3,
const size_t pitch1, const size_t pitch2) {
hipEvent_t start, stop;
handle_error(hipEventCreate(&start));
handle_error(hipEventCreate(&stop));
float *mat1_d;
float *mat2_d;
float *mat3_d;
size_t pitch1_d, pitch2_d, pitch3_d;
handle_error(hipMallocPitch((void**)&mat1_d, &pitch1_d, size2 * sizeof(float), size1));
handle_error(hipMallocPitch((void**)&mat2_d, &pitch2_d, size3 * sizeof(float), size2));
handle_error(hipMallocPitch((void**)&mat3_d, &pitch3_d, size3 * sizeof(float), size1));
handle_error(hipMemcpy2D((void*)mat1_d, pitch1_d, (void*)mat1, pitch1 * sizeof(float), size2 * sizeof(float), size1, hipMemcpyHostToDevice));
handle_error(hipMemcpy2D((void*)mat2_d, pitch2_d, (void*)mat2, pitch2 * sizeof(float), size3 * sizeof(float), size2, hipMemcpyHostToDevice));
handle_error(hipMemset2D((void*)mat3_d, pitch3_d, 0, size3 * sizeof(float), size1));
size_t bv = (size1+threadsPerBlock-1) / threadsPerBlock;
size_t bh = (size3+threadsPerBlock-1) / threadsPerBlock;
dim3 block(threadsPerBlock, threadsPerBlock);
dim3 grid(bh, bv);
handle_error(hipEventRecord(start, 0));
hipLaunchKernelGGL(( matmul_kernel_ver3), dim3(grid), dim3(block), 0, 0,
mat1_d, mat2_d, mat3_d,
pitch1_d/sizeof(float), pitch2_d/sizeof(float), pitch3_d/sizeof(float),
size1, size2, size3);
handle_error(hipEventRecord(stop, 0));
handle_error(hipEventSynchronize(stop));
handle_error(hipMemcpy2D((void*)mat3, pitch2 * sizeof(float), (void*)mat3_d, pitch3_d, size3 * sizeof(float), size1, hipMemcpyDeviceToHost));
hipFree(mat1_d);
hipFree(mat2_d);
hipFree(mat3_d);
size_t ops = 2 * size1 * size2 * size3;
float elapsed;
handle_error(hipEventElapsedTime(&elapsed, start, stop));
fprintf(stderr, "Time: %fms, %f GFLOPS\n", elapsed, ops / elapsed / 1e6);
}
__host__ void matmul_gpu_ver4(const float* const mat1, const float * const mat2, float * const mat3,
const size_t size1, const size_t size2, const size_t size3,
const size_t pitch1, const size_t pitch2) {
hipEvent_t start, stop;
handle_error(hipEventCreate(&start));
handle_error(hipEventCreate(&stop));
float *mat1_d;
float *mat2_d;
float *mat3_d;
size_t pitch1_d, pitch2_d, pitch3_d;
handle_error(hipMallocPitch((void**)&mat1_d, &pitch1_d, size2 * sizeof(float), size1));
handle_error(hipMallocPitch((void**)&mat2_d, &pitch2_d, size3 * sizeof(float), size2));
handle_error(hipMallocPitch((void**)&mat3_d, &pitch3_d, size3 * sizeof(float), size1));
handle_error(hipMemcpy2D((void*)mat1_d, pitch1_d, (void*)mat1, pitch1 * sizeof(float), size2 * sizeof(float), size1, hipMemcpyHostToDevice));
handle_error(hipMemcpy2D((void*)mat2_d, pitch2_d, (void*)mat2, pitch2 * sizeof(float), size3 * sizeof(float), size2, hipMemcpyHostToDevice));
handle_error(hipMemset2D((void*)mat3_d, pitch3_d, 0, size3 * sizeof(float), size1));
size_t bv = (size1+blockSize-1) / blockSize;
size_t bh = (size3+blockSize-1) / blockSize;
dim3 block(threadsPerBlock, threadsPerBlock);
dim3 grid(bh, bv);
handle_error(hipEventRecord(start, 0));
hipLaunchKernelGGL(( matmul_kernel_ver4), dim3(grid), dim3(block), 0, 0,
mat1_d, mat2_d, mat3_d,
pitch1_d/sizeof(float), pitch2_d/sizeof(float), pitch3_d/sizeof(float),
size1, size2, size3);
handle_error(hipEventRecord(stop, 0));
handle_error(hipEventSynchronize(stop));
handle_error(hipMemcpy2D((void*)mat3, pitch2 * sizeof(float), (void*)mat3_d, pitch3_d, size3 * sizeof(float), size1, hipMemcpyDeviceToHost));
hipFree(mat1_d);
hipFree(mat2_d);
hipFree(mat3_d);
size_t ops = 2 * size1 * size2 * size3;
float elapsed;
handle_error(hipEventElapsedTime(&elapsed, start, stop));
fprintf(stderr, "Time: %fms, %f GFLOPS\n", elapsed, ops / elapsed / 1e6);
}
| c3756a487122c4bdfd6e4bf2560a86be57f633b7.cu | #include "matmul_gpu.h"
#include <cstdio>
constexpr unsigned int threadsPerBlock = 8;
__global__ void matmul_kernel_ver1(
const float * const mat1, const float * const mat2, float * const mat3,
const size_t pitch1, const size_t pitch2, const size_t pitch3,
const size_t size1, const size_t size2, const size_t size3) {
for (size_t i = 0; i < size1; ++i) {
for (size_t j = 0; j < size3; ++j) {
for (size_t k = 0; k < size2; ++k) {
mat3[i*pitch3 + j] += mat1[i*pitch1 + k] * mat2[k*pitch2 + j];
}
}
}
}
__global__ void matmul_kernel_ver2(
const float * const mat1, const float * const mat2, float * const mat3,
const size_t pitch1, const size_t pitch2, const size_t pitch3,
const size_t size1, const size_t size2, const size_t size3) {
const size_t i = blockIdx.y * blockDim.y + threadIdx.y;
const size_t j = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size1 && j < size3) {
for (size_t k = 0; k < size2; ++k) {
mat3[i*pitch3 + j] += mat1[i*pitch1 + k] * mat2[k*pitch2 + j];
}
}
}
__global__ void matmul_kernel_ver3(
const float * const mat1, const float * const mat2, float * const mat3,
const size_t pitch1, const size_t pitch2, const size_t pitch3,
const size_t size1, const size_t size2, const size_t size3) {
const size_t block_i = blockIdx.y * blockDim.y;
const size_t block_j = blockIdx.x * blockDim.x;
const size_t i = block_i + threadIdx.y;
const size_t j = block_j + threadIdx.x;
__shared__ float localA[threadsPerBlock][threadsPerBlock];
__shared__ float localB[threadsPerBlock][threadsPerBlock];
if (size1 - block_i >= threadsPerBlock && size3 - block_j >= threadsPerBlock) {
float tmp = 0.0;
for (size_t k = 0; k < size2; k += threadsPerBlock) {
if (size2 - k < threadsPerBlock) {
for (size_t k2 = k; k2 < size2; ++k2) {
tmp += mat1[i*pitch1 + k2] * mat2[k2*pitch2 + j];
}
} else {
__syncthreads();
localA[threadIdx.y][threadIdx.x] = mat1[i*pitch1 + (k + threadIdx.x)];
localB[threadIdx.y][threadIdx.x] = mat2[(k + threadIdx.y)*pitch2 + j];
__syncthreads();
for (size_t k2 = 0; k2 < threadsPerBlock; ++k2) {
tmp += localA[threadIdx.y][k2] * localB[k2][threadIdx.x];
}
}
}
mat3[i*pitch3 + j] = tmp;
} else if (i < size1 && j < size3) {
for (size_t k = 0; k < size2; ++k) {
mat3[i*pitch3 + j] += mat1[i*pitch1 + k] * mat2[k*pitch2 + j];
}
}
}
constexpr unsigned int blockSize = 64;
#define FMA_IMPL(index, k2) \
{ \
const float v = localA[threadIdx.y + threadsPerBlock * index][k2]; \
tmp##index##0 += v * v10; \
tmp##index##1 += v * v11; \
tmp##index##2 += v * v12; \
tmp##index##3 += v * v13; \
tmp##index##4 += v * v14; \
tmp##index##5 += v * v15; \
tmp##index##6 += v * v16; \
tmp##index##7 += v * v17; \
}
#define FMA_IMPL2(index, k2) \
{ \
const float v = localA[threadIdx.y + threadsPerBlock * index][k2]; \
if (j0 < size3) { tmp##index##0 += v * v10; } \
if (j1 < size3) { tmp##index##1 += v * v11; } \
if (j2 < size3) { tmp##index##2 += v * v12; } \
if (j3 < size3) { tmp##index##3 += v * v13; } \
if (j4 < size3) { tmp##index##4 += v * v14; } \
if (j5 < size3) { tmp##index##5 += v * v15; } \
if (j6 < size3) { tmp##index##6 += v * v16; } \
if (j7 < size3) { tmp##index##7 += v * v17; } \
}
#define FMA(k2) \
{ \
const float v10 = localB[k2][threadIdx.x]; \
const float v11 = localB[k2][threadIdx.x + threadsPerBlock]; \
const float v12 = localB[k2][threadIdx.x + threadsPerBlock*2]; \
const float v13 = localB[k2][threadIdx.x + threadsPerBlock*3]; \
const float v14 = localB[k2][threadIdx.x + threadsPerBlock*4]; \
const float v15 = localB[k2][threadIdx.x + threadsPerBlock*5]; \
const float v16 = localB[k2][threadIdx.x + threadsPerBlock*6]; \
const float v17 = localB[k2][threadIdx.x + threadsPerBlock*7]; \
FMA_IMPL(0, k2) \
FMA_IMPL(1, k2) \
FMA_IMPL(2, k2) \
FMA_IMPL(3, k2) \
FMA_IMPL(4, k2) \
FMA_IMPL(5, k2) \
FMA_IMPL(6, k2) \
FMA_IMPL(7, k2) \
}
#define APPLY_IMPL(indexi, indexj) \
if (j##indexj < size3) { \
mat3[i##indexi * pitch3 + j##indexj] = tmp##indexi##indexj; \
}
#define APPLY(index) \
if (i##index < size1) { \
APPLY_IMPL(index, 0) \
APPLY_IMPL(index, 1) \
APPLY_IMPL(index, 2) \
APPLY_IMPL(index, 3) \
APPLY_IMPL(index, 4) \
APPLY_IMPL(index, 5) \
APPLY_IMPL(index, 6) \
APPLY_IMPL(index, 7) \
}
__global__ void matmul_kernel_ver4(
const float * const mat1, const float * const mat2, float * const mat3,
const size_t pitch1, const size_t pitch2, const size_t pitch3,
const size_t size1, const size_t size2, const size_t size3) {
const int i0 = threadIdx.y + blockIdx.y * blockSize;
const int j0 = threadIdx.x + blockIdx.x * blockSize;
const int i1 = threadIdx.y + threadsPerBlock + blockIdx.y * blockSize;
const int j1 = threadIdx.x + threadsPerBlock + blockIdx.x * blockSize;
const int i2 = threadIdx.y + threadsPerBlock*2 + blockIdx.y * blockSize;
const int j2 = threadIdx.x + threadsPerBlock*2 + blockIdx.x * blockSize;
const int i3 = threadIdx.y + threadsPerBlock*3 + blockIdx.y * blockSize;
const int j3 = threadIdx.x + threadsPerBlock*3 + blockIdx.x * blockSize;
const int i4 = threadIdx.y + threadsPerBlock*4 + blockIdx.y * blockSize;
const int j4 = threadIdx.x + threadsPerBlock*4 + blockIdx.x * blockSize;
const int i5 = threadIdx.y + threadsPerBlock*5 + blockIdx.y * blockSize;
const int j5 = threadIdx.x + threadsPerBlock*5 + blockIdx.x * blockSize;
const int i6 = threadIdx.y + threadsPerBlock*6 + blockIdx.y * blockSize;
const int j6 = threadIdx.x + threadsPerBlock*6 + blockIdx.x * blockSize;
const int i7 = threadIdx.y + threadsPerBlock*7 + blockIdx.y * blockSize;
const int j7 = threadIdx.x + threadsPerBlock*7 + blockIdx.x * blockSize;
float tmp00 = 0.0, tmp01 = 0.0, tmp02 = 0.0, tmp03 = 0.0, tmp04 = 0.0, tmp05 = 0.0, tmp06 = 0.0, tmp07 = 0.0;
float tmp10 = 0.0, tmp11 = 0.0, tmp12 = 0.0, tmp13 = 0.0, tmp14 = 0.0, tmp15 = 0.0, tmp16 = 0.0, tmp17 = 0.0;
float tmp20 = 0.0, tmp21 = 0.0, tmp22 = 0.0, tmp23 = 0.0, tmp24 = 0.0, tmp25 = 0.0, tmp26 = 0.0, tmp27 = 0.0;
float tmp30 = 0.0, tmp31 = 0.0, tmp32 = 0.0, tmp33 = 0.0, tmp34 = 0.0, tmp35 = 0.0, tmp36 = 0.0, tmp37 = 0.0;
float tmp40 = 0.0, tmp41 = 0.0, tmp42 = 0.0, tmp43 = 0.0, tmp44 = 0.0, tmp45 = 0.0, tmp46 = 0.0, tmp47 = 0.0;
float tmp50 = 0.0, tmp51 = 0.0, tmp52 = 0.0, tmp53 = 0.0, tmp54 = 0.0, tmp55 = 0.0, tmp56 = 0.0, tmp57 = 0.0;
float tmp60 = 0.0, tmp61 = 0.0, tmp62 = 0.0, tmp63 = 0.0, tmp64 = 0.0, tmp65 = 0.0, tmp66 = 0.0, tmp67 = 0.0;
float tmp70 = 0.0, tmp71 = 0.0, tmp72 = 0.0, tmp73 = 0.0, tmp74 = 0.0, tmp75 = 0.0, tmp76 = 0.0, tmp77 = 0.0;
__shared__ float localA[blockSize][threadsPerBlock+1];
__shared__ float localB[threadsPerBlock][blockSize];
for (int k = 0; k < size2; k += threadsPerBlock) {
if (size1 - blockIdx.y * blockSize < blockSize ||
size2 - k < threadsPerBlock ||
size3 - blockIdx.x * blockSize < blockSize) {
const size_t ek = min((size_t)threadsPerBlock, size2 - k);
__syncthreads();
if (threadIdx.x < ek) {
if (i0 < size1) {
localA[threadIdx.y][threadIdx.x] = __ldg(mat1 + i0 * pitch1 + k + threadIdx.x);
}
if (i1 < size1) {
localA[threadIdx.y + threadsPerBlock][threadIdx.x] = __ldg(mat1 + i1 * pitch1 + k + threadIdx.x);
}
if (i2 < size1) {
localA[threadIdx.y + threadsPerBlock * 2][threadIdx.x] = __ldg(mat1 + i2 * pitch1 + k + threadIdx.x);
}
if (i3 < size1) {
localA[threadIdx.y + threadsPerBlock * 3][threadIdx.x] = __ldg(mat1 + i3 * pitch1 + k + threadIdx.x);
}
if (i4 < size1) {
localA[threadIdx.y + threadsPerBlock * 4][threadIdx.x] = __ldg(mat1 + i4 * pitch1 + k + threadIdx.x);
}
if (i5 < size1) {
localA[threadIdx.y + threadsPerBlock * 5][threadIdx.x] = __ldg(mat1 + i5 * pitch1 + k + threadIdx.x);
}
if (i6 < size1) {
localA[threadIdx.y + threadsPerBlock * 6][threadIdx.x] = __ldg(mat1 + i6 * pitch1 + k + threadIdx.x);
}
if (i7 < size1) {
localA[threadIdx.y + threadsPerBlock * 7][threadIdx.x] = __ldg(mat1 + i7 * pitch1 + k + threadIdx.x);
}
}
__syncthreads();
for (size_t k2 = 0; k2 < ek; ++k2) {
float v10 = 0.0, v11 = 0.0, v12 = 0.0, v13 = 0.0, v14 = 0.0, v15 = 0.0, v16 = 0.0, v17 = 0.0;
if (j0 < size3) {
v10 = __ldg(mat2 + (k+k2) * pitch2 + j0);
}
if (j1 < size3) {
v11 = __ldg(mat2 + (k+k2) * pitch2 + j1);
}
if (j2 < size3) {
v12 = __ldg(mat2 + (k+k2) * pitch2 + j2);
}
if (j3 < size3) {
v13 = __ldg(mat2 + (k+k2) * pitch2 + j3);
}
if (j4 < size3) {
v14 = __ldg(mat2 + (k+k2) * pitch2 + j4);
}
if (j5 < size3) {
v15 = __ldg(mat2 + (k+k2) * pitch2 + j5);
}
if (j6 < size3) {
v16 = __ldg(mat2 + (k+k2) * pitch2 + j6);
}
if (j7 < size3) {
v17 = __ldg(mat2 + (k+k2) * pitch2 + j7);
}
if (i0 < size1) FMA_IMPL2(0, k2)
if (i1 < size1) FMA_IMPL2(1, k2)
if (i2 < size1) FMA_IMPL2(2, k2)
if (i3 < size1) FMA_IMPL2(3, k2)
if (i4 < size1) FMA_IMPL2(4, k2)
if (i5 < size1) FMA_IMPL2(5, k2)
if (i6 < size1) FMA_IMPL2(6, k2)
if (i7 < size1) FMA_IMPL2(7, k2)
}
} else {
__syncthreads();
localB[threadIdx.y][threadIdx.x] = __ldg(mat2 + (k+threadIdx.y) * pitch2 + j0);
localB[threadIdx.y][threadIdx.x + threadsPerBlock] = __ldg(mat2 + (k+threadIdx.y) * pitch2 + j1);
localB[threadIdx.y][threadIdx.x + threadsPerBlock*2] = __ldg(mat2 + (k+threadIdx.y) * pitch2 + j2);
localB[threadIdx.y][threadIdx.x + threadsPerBlock*3] = __ldg(mat2 + (k+threadIdx.y) * pitch2 + j3);
localB[threadIdx.y][threadIdx.x + threadsPerBlock*4] = __ldg(mat2 + (k+threadIdx.y) * pitch2 + j4);
localB[threadIdx.y][threadIdx.x + threadsPerBlock*5] = __ldg(mat2 + (k+threadIdx.y) * pitch2 + j5);
localB[threadIdx.y][threadIdx.x + threadsPerBlock*6] = __ldg(mat2 + (k+threadIdx.y) * pitch2 + j6);
localB[threadIdx.y][threadIdx.x + threadsPerBlock*7] = __ldg(mat2 + (k+threadIdx.y) * pitch2 + j7);
localA[threadIdx.y][threadIdx.x] = __ldg(mat1 + i0 * pitch1 + k + threadIdx.x);
localA[threadIdx.y + threadsPerBlock][threadIdx.x] = __ldg(mat1 + i1 * pitch1 + k + threadIdx.x);
localA[threadIdx.y + threadsPerBlock * 2][threadIdx.x] = __ldg(mat1 + i2 * pitch1 + k + threadIdx.x);
localA[threadIdx.y + threadsPerBlock * 3][threadIdx.x] = __ldg(mat1 + i3 * pitch1 + k + threadIdx.x);
localA[threadIdx.y + threadsPerBlock * 4][threadIdx.x] = __ldg(mat1 + i4 * pitch1 + k + threadIdx.x);
localA[threadIdx.y + threadsPerBlock * 5][threadIdx.x] = __ldg(mat1 + i5 * pitch1 + k + threadIdx.x);
localA[threadIdx.y + threadsPerBlock * 6][threadIdx.x] = __ldg(mat1 + i6 * pitch1 + k + threadIdx.x);
localA[threadIdx.y + threadsPerBlock * 7][threadIdx.x] = __ldg(mat1 + i7 * pitch1 + k + threadIdx.x);
__syncthreads();
FMA(0)
FMA(1)
FMA(2)
FMA(3)
FMA(4)
FMA(5)
FMA(6)
FMA(7)
}
}
APPLY(0)
APPLY(1)
APPLY(2)
APPLY(3)
APPLY(4)
APPLY(5)
APPLY(6)
APPLY(7)
}
__host__ void handle_error(cudaError_t err) {
if (err != cudaSuccess) {
fprintf(stderr, "[Error] %s (error code: %d)\n", cudaGetErrorString(err), err);
}
}
__host__ void matmul_gpu_ver1(const float* const mat1, const float * const mat2, float * const mat3,
const size_t size1, const size_t size2, const size_t size3,
const size_t pitch1, const size_t pitch2) {
cudaEvent_t start, stop;
handle_error(cudaEventCreate(&start));
handle_error(cudaEventCreate(&stop));
float *mat1_d;
float *mat2_d;
float *mat3_d;
size_t pitch1_d, pitch2_d, pitch3_d;
handle_error(cudaMallocPitch((void**)&mat1_d, &pitch1_d, size2 * sizeof(float), size1));
handle_error(cudaMallocPitch((void**)&mat2_d, &pitch2_d, size3 * sizeof(float), size2));
handle_error(cudaMallocPitch((void**)&mat3_d, &pitch3_d, size3 * sizeof(float), size1));
handle_error(cudaMemcpy2D((void*)mat1_d, pitch1_d, (void*)mat1, pitch1 * sizeof(float), size2 * sizeof(float), size1, cudaMemcpyHostToDevice));
handle_error(cudaMemcpy2D((void*)mat2_d, pitch2_d, (void*)mat2, pitch2 * sizeof(float), size3 * sizeof(float), size2, cudaMemcpyHostToDevice));
handle_error(cudaMemset2D((void*)mat3_d, pitch3_d, 0, size3 * sizeof(float), size1));
handle_error(cudaEventRecord(start, 0));
matmul_kernel_ver1<<<1, 1>>>(
mat1_d, mat2_d, mat3_d,
pitch1_d/sizeof(float), pitch2_d/sizeof(float), pitch3_d/sizeof(float),
size1, size2, size3);
handle_error(cudaEventRecord(stop, 0));
handle_error(cudaEventSynchronize(stop));
handle_error(cudaMemcpy2D((void*)mat3, pitch2 * sizeof(float), (void*)mat3_d, pitch3_d, size3 * sizeof(float), size1, cudaMemcpyDeviceToHost));
cudaFree(mat1_d);
cudaFree(mat2_d);
cudaFree(mat3_d);
size_t ops = 2 * size1 * size2 * size3;
float elapsed;
handle_error(cudaEventElapsedTime(&elapsed, start, stop));
fprintf(stderr, "Time: %fms, %f GFLOPS\n", elapsed, ops / elapsed / 1e6);
}
__host__ void matmul_gpu_ver2(const float* const mat1, const float * const mat2, float * const mat3,
const size_t size1, const size_t size2, const size_t size3,
const size_t pitch1, const size_t pitch2) {
cudaEvent_t start, stop;
handle_error(cudaEventCreate(&start));
handle_error(cudaEventCreate(&stop));
float *mat1_d;
float *mat2_d;
float *mat3_d;
size_t pitch1_d, pitch2_d, pitch3_d;
handle_error(cudaMallocPitch((void**)&mat1_d, &pitch1_d, size2 * sizeof(float), size1));
handle_error(cudaMallocPitch((void**)&mat2_d, &pitch2_d, size3 * sizeof(float), size2));
handle_error(cudaMallocPitch((void**)&mat3_d, &pitch3_d, size3 * sizeof(float), size1));
handle_error(cudaMemcpy2D((void*)mat1_d, pitch1_d, (void*)mat1, pitch1 * sizeof(float), size2 * sizeof(float), size1, cudaMemcpyHostToDevice));
handle_error(cudaMemcpy2D((void*)mat2_d, pitch2_d, (void*)mat2, pitch2 * sizeof(float), size3 * sizeof(float), size2, cudaMemcpyHostToDevice));
handle_error(cudaMemset2D((void*)mat3_d, pitch3_d, 0, size3 * sizeof(float), size1));
size_t bv = (size1+threadsPerBlock-1) / threadsPerBlock;
size_t bh = (size3+threadsPerBlock-1) / threadsPerBlock;
dim3 block(threadsPerBlock, threadsPerBlock);
dim3 grid(bh, bv);
handle_error(cudaEventRecord(start, 0));
matmul_kernel_ver2<<<grid, block>>>(
mat1_d, mat2_d, mat3_d,
pitch1_d/sizeof(float), pitch2_d/sizeof(float), pitch3_d/sizeof(float),
size1, size2, size3);
handle_error(cudaEventRecord(stop, 0));
handle_error(cudaEventSynchronize(stop));
handle_error(cudaMemcpy2D((void*)mat3, pitch2 * sizeof(float), (void*)mat3_d, pitch3_d, size3 * sizeof(float), size1, cudaMemcpyDeviceToHost));
cudaFree(mat1_d);
cudaFree(mat2_d);
cudaFree(mat3_d);
size_t ops = 2 * size1 * size2 * size3;
float elapsed;
handle_error(cudaEventElapsedTime(&elapsed, start, stop));
fprintf(stderr, "Time: %fms, %f GFLOPS\n", elapsed, ops / elapsed / 1e6);
}
__host__ void matmul_gpu_ver3(const float* const mat1, const float * const mat2, float * const mat3,
const size_t size1, const size_t size2, const size_t size3,
const size_t pitch1, const size_t pitch2) {
cudaEvent_t start, stop;
handle_error(cudaEventCreate(&start));
handle_error(cudaEventCreate(&stop));
float *mat1_d;
float *mat2_d;
float *mat3_d;
size_t pitch1_d, pitch2_d, pitch3_d;
handle_error(cudaMallocPitch((void**)&mat1_d, &pitch1_d, size2 * sizeof(float), size1));
handle_error(cudaMallocPitch((void**)&mat2_d, &pitch2_d, size3 * sizeof(float), size2));
handle_error(cudaMallocPitch((void**)&mat3_d, &pitch3_d, size3 * sizeof(float), size1));
handle_error(cudaMemcpy2D((void*)mat1_d, pitch1_d, (void*)mat1, pitch1 * sizeof(float), size2 * sizeof(float), size1, cudaMemcpyHostToDevice));
handle_error(cudaMemcpy2D((void*)mat2_d, pitch2_d, (void*)mat2, pitch2 * sizeof(float), size3 * sizeof(float), size2, cudaMemcpyHostToDevice));
handle_error(cudaMemset2D((void*)mat3_d, pitch3_d, 0, size3 * sizeof(float), size1));
size_t bv = (size1+threadsPerBlock-1) / threadsPerBlock;
size_t bh = (size3+threadsPerBlock-1) / threadsPerBlock;
dim3 block(threadsPerBlock, threadsPerBlock);
dim3 grid(bh, bv);
handle_error(cudaEventRecord(start, 0));
matmul_kernel_ver3<<<grid, block>>>(
mat1_d, mat2_d, mat3_d,
pitch1_d/sizeof(float), pitch2_d/sizeof(float), pitch3_d/sizeof(float),
size1, size2, size3);
handle_error(cudaEventRecord(stop, 0));
handle_error(cudaEventSynchronize(stop));
handle_error(cudaMemcpy2D((void*)mat3, pitch2 * sizeof(float), (void*)mat3_d, pitch3_d, size3 * sizeof(float), size1, cudaMemcpyDeviceToHost));
cudaFree(mat1_d);
cudaFree(mat2_d);
cudaFree(mat3_d);
size_t ops = 2 * size1 * size2 * size3;
float elapsed;
handle_error(cudaEventElapsedTime(&elapsed, start, stop));
fprintf(stderr, "Time: %fms, %f GFLOPS\n", elapsed, ops / elapsed / 1e6);
}
__host__ void matmul_gpu_ver4(const float* const mat1, const float * const mat2, float * const mat3,
const size_t size1, const size_t size2, const size_t size3,
const size_t pitch1, const size_t pitch2) {
cudaEvent_t start, stop;
handle_error(cudaEventCreate(&start));
handle_error(cudaEventCreate(&stop));
float *mat1_d;
float *mat2_d;
float *mat3_d;
size_t pitch1_d, pitch2_d, pitch3_d;
handle_error(cudaMallocPitch((void**)&mat1_d, &pitch1_d, size2 * sizeof(float), size1));
handle_error(cudaMallocPitch((void**)&mat2_d, &pitch2_d, size3 * sizeof(float), size2));
handle_error(cudaMallocPitch((void**)&mat3_d, &pitch3_d, size3 * sizeof(float), size1));
handle_error(cudaMemcpy2D((void*)mat1_d, pitch1_d, (void*)mat1, pitch1 * sizeof(float), size2 * sizeof(float), size1, cudaMemcpyHostToDevice));
handle_error(cudaMemcpy2D((void*)mat2_d, pitch2_d, (void*)mat2, pitch2 * sizeof(float), size3 * sizeof(float), size2, cudaMemcpyHostToDevice));
handle_error(cudaMemset2D((void*)mat3_d, pitch3_d, 0, size3 * sizeof(float), size1));
size_t bv = (size1+blockSize-1) / blockSize;
size_t bh = (size3+blockSize-1) / blockSize;
dim3 block(threadsPerBlock, threadsPerBlock);
dim3 grid(bh, bv);
handle_error(cudaEventRecord(start, 0));
matmul_kernel_ver4<<<grid, block>>>(
mat1_d, mat2_d, mat3_d,
pitch1_d/sizeof(float), pitch2_d/sizeof(float), pitch3_d/sizeof(float),
size1, size2, size3);
handle_error(cudaEventRecord(stop, 0));
handle_error(cudaEventSynchronize(stop));
handle_error(cudaMemcpy2D((void*)mat3, pitch2 * sizeof(float), (void*)mat3_d, pitch3_d, size3 * sizeof(float), size1, cudaMemcpyDeviceToHost));
cudaFree(mat1_d);
cudaFree(mat2_d);
cudaFree(mat3_d);
size_t ops = 2 * size1 * size2 * size3;
float elapsed;
handle_error(cudaEventElapsedTime(&elapsed, start, stop));
fprintf(stderr, "Time: %fms, %f GFLOPS\n", elapsed, ops / elapsed / 1e6);
}
|
061a0d79a5ddb2dda76c5b007e8bb215c92344de.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zset_pointer.cu, normal z -> d, Sun Nov 20 20:20:31 2016
@author Azzam Haidar
@author Tingxing Dong
*/
#include "magma_internal.h"
/******************************************************************************/
__global__ void kernel_dset_pointer(
double **output_array,
double *input,
magma_int_t lda,
magma_int_t row, magma_int_t column,
magma_int_t batch_offset)
{
output_array[blockIdx.x] = input + blockIdx.x * batch_offset + row + column * lda;
//printf("==> kernel_set_pointer input_array %p output_array %p \n",input+ blockIdx.x * batch_offset,output_array[blockIdx.x]);
}
/******************************************************************************/
// set pointer with variable size matrices, batch_offset becomes an array with accumulated sum of sizes
// batch_offset[i] = sum( matrix_size[0], matrix_size[1], ..., matrix_size[i-1])
// batch_offset is usually the output of a prefix sum operation
__global__ void kernel_dset_pointer_var(
double **output_array,
double *input,
magma_int_t *lda,
magma_int_t row, magma_int_t column,
magma_int_t *batch_offset)
{
output_array[blockIdx.x] = input + batch_offset[blockIdx.x] + row + column * lda[blockIdx.x];
//printf("==> kernel_set_pointer input_array %p output_array %p \n",input+ blockIdx.x * batch_offset,output_array[blockIdx.x]);
}
/***************************************************************************//**
Purpose
-------
convert consecutive stored variable to array stored
for example the size of A is N*batchCount; N is the size of A(batch_offset)
change into dA_array[0] dA_array[1],... dA_array[batchCount-1], where the size of each dA_array[i] is N
Arguments
----------
@param[out]
output_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array A of DIMENSION ( lda, column ) on the GPU
@param[in]
input DOUBLE PRECISION array of dimension ( LDDA, N*batchCount ) on the GPU.
@param[in]
lda INTEGER
LDA specifies the leading dimension of A.
@param[in]
row INTEGER
On entry, row specifies the number of rows of the matrix A.
@param[in]
column INTEGER
On entry, column specifies the number of columns of the matrix A
@param[in]
batch_offset INTEGER
The starting pointer of each matrix A in input arrray
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C"
void magma_dset_pointer(
double **output_array,
double *input,
magma_int_t lda,
magma_int_t row, magma_int_t column,
magma_int_t batch_offset,
magma_int_t batchCount,
magma_queue_t queue)
{
hipLaunchKernelGGL(( kernel_dset_pointer)
, dim3(batchCount), dim3(1), 0, queue->cuda_stream() ,
output_array, input, lda, row, column, batch_offset);
}
/******************************************************************************/
extern "C"
void magma_dset_pointer_var_cc(
double **output_array,
double *input,
magma_int_t *lda,
magma_int_t row, magma_int_t column,
magma_int_t *batch_offset,
magma_int_t batchCount,
magma_queue_t queue)
{
hipLaunchKernelGGL(( kernel_dset_pointer_var)
, dim3(batchCount), dim3(1), 0, queue->cuda_stream() ,
output_array, input, lda, row, column, batch_offset);
}
/******************************************************************************/
__global__ void zdisplace_pointers_kernel(double **output_array,
double **input_array, magma_int_t lda,
magma_int_t row, magma_int_t column)
{
double *inpt = input_array[blockIdx.x];
output_array[blockIdx.x] = &inpt[row + column * lda];
}
/******************************************************************************/
/* Variable pointer displacement kernels */
/******************************************************************************/
// variable leading dimension, constant row and column offsets
__global__ void zdisplace_pointers_var_cc_kernel(double **output_array,
double **input_array, magma_int_t* lda,
magma_int_t row, magma_int_t column)
{
const int bid = blockIdx.x;
double *inpt = input_array[blockIdx.x];
if(inpt == NULL || row < 0 || column < 0)
output_array[bid] = NULL;
else
output_array[bid] = &inpt[row + column * lda[blockIdx.x] ];
}
/******************************************************************************/
// variable leading dimension, constant row offset and variable column offsets
__global__ void zdisplace_pointers_var_cv_kernel(double **output_array,
double **input_array, magma_int_t* lda,
magma_int_t row, magma_int_t *column)
{
const int bid = blockIdx.x;
double *inpt = input_array[blockIdx.x];
if(inpt == NULL || row < 0 || column[bid] < 0)
output_array[bid] = NULL;
else
output_array[bid] = &inpt[row + column[bid] * lda[blockIdx.x] ];
}
/******************************************************************************/
// variable leading dimension, variable row offset and constant column offsets
__global__ void zdisplace_pointers_var_vc_kernel(double **output_array,
double **input_array, magma_int_t* lda,
magma_int_t *row, magma_int_t column)
{
const int bid = blockIdx.x;
double *inpt = input_array[blockIdx.x];
if(inpt == NULL || row[bid] < 0 || column < 0)
output_array[bid] = NULL;
else
output_array[bid] = &inpt[row[bid] + column * lda[blockIdx.x] ];
}
/******************************************************************************/
// variable leading dimension, variable row and column offsets
__global__ void zdisplace_pointers_var_vv_kernel(double **output_array,
double **input_array, magma_int_t* lda,
magma_int_t* row, magma_int_t* column)
{
const int bid = blockIdx.x;
double *inpt = input_array[bid];
if(inpt == NULL || row[bid] < 0 || column[bid] < 0)
output_array[bid] = NULL;
else
output_array[bid] = &inpt[ row[bid] + column[bid] * lda[bid] ];
}
/***************************************************************************//**
Purpose
-------
compute the offset for all the matrices and save the displacment of the new pointer on output_array.
input_array contains the pointers to the initial position.
output_array[i] = input_array[i] + row + lda * column;
Arguments
----------
@param[out]
output_array Array of pointers, dimension (batchCount).
Each pointer points to the new displacement of array A in input_array on the GPU
@param[in]
input_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array A of DIMENSION ( lda, column ) on the GPU
@param[in]
lda INTEGER
LDA specifies the leading dimension of A.
@param[in]
row INTEGER
On entry, row specifies the number of rows of the matrix A.
@param[in]
column INTEGER
On entry, column specifies the number of columns of the matrix A
@param[in]
batch_offset INTEGER
The starting pointer of each matrix A in input arrray
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C"
void magma_ddisplace_pointers(double **output_array,
double **input_array, magma_int_t lda,
magma_int_t row, magma_int_t column,
magma_int_t batchCount, magma_queue_t queue)
{
hipLaunchKernelGGL(( zdisplace_pointers_kernel)
, dim3(batchCount), dim3(1), 0, queue->cuda_stream() ,
output_array, input_array, lda, row, column);
}
/******************************************************************************/
extern "C"
void magma_ddisplace_pointers_var_cc(double **output_array,
double **input_array, magma_int_t* lda,
magma_int_t row, magma_int_t column,
magma_int_t batchCount, magma_queue_t queue)
{
/*
compute the offset for all the matrices and save the displacment of the new pointer on output_array.
input_array contains the pointers to the initial position.
output_array[i] = input_array[i] + row + lda[i] * column;
*/
hipLaunchKernelGGL(( zdisplace_pointers_var_cc_kernel), dim3(batchCount), dim3(1), 0, queue->cuda_stream(), output_array, input_array, lda, row, column);
}
/******************************************************************************/
extern "C"
void magma_ddisplace_pointers_var_cv(double **output_array,
double **input_array, magma_int_t* lda,
magma_int_t row, magma_int_t* column,
magma_int_t batchCount, magma_queue_t queue)
{
/*
compute the offset for all the matrices and save the displacment of the new pointer on output_array.
input_array contains the pointers to the initial position.
output_array[i] = input_array[i] + row + lda[i] * column[i];
*/
hipLaunchKernelGGL(( zdisplace_pointers_var_cv_kernel), dim3(batchCount), dim3(1), 0, queue->cuda_stream(), output_array, input_array, lda, row, column);
}
/******************************************************************************/
extern "C"
void magma_ddisplace_pointers_var_vc(double **output_array,
double **input_array, magma_int_t* lda,
magma_int_t *row, magma_int_t column,
magma_int_t batchCount, magma_queue_t queue)
{
/*
compute the offset for all the matrices and save the displacment of the new pointer on output_array.
input_array contains the pointers to the initial position.
output_array[i] = input_array[i] + row[i] + lda[i] * column;
*/
hipLaunchKernelGGL(( zdisplace_pointers_var_vc_kernel), dim3(batchCount), dim3(1), 0, queue->cuda_stream(), output_array, input_array, lda, row, column);
}
/******************************************************************************/
extern "C"
void magma_ddisplace_pointers_var_vv(double **output_array,
double **input_array, magma_int_t* lda,
magma_int_t* row, magma_int_t* column,
magma_int_t batchCount, magma_queue_t queue)
{
/*
compute the offset for all the matrices and save the displacment of the new pointer on output_array.
input_array contains the pointers to the initial position.
output_array[i] = input_array[i] + row[i] + lda[i] * column[i];
*/
hipLaunchKernelGGL(( zdisplace_pointers_var_vv_kernel), dim3(batchCount), dim3(1), 0, queue->cuda_stream(), output_array, input_array, lda, row, column);
}
| 061a0d79a5ddb2dda76c5b007e8bb215c92344de.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from magmablas/zset_pointer.cu, normal z -> d, Sun Nov 20 20:20:31 2016
@author Azzam Haidar
@author Tingxing Dong
*/
#include "magma_internal.h"
/******************************************************************************/
__global__ void kernel_dset_pointer(
double **output_array,
double *input,
magma_int_t lda,
magma_int_t row, magma_int_t column,
magma_int_t batch_offset)
{
output_array[blockIdx.x] = input + blockIdx.x * batch_offset + row + column * lda;
//printf("==> kernel_set_pointer input_array %p output_array %p \n",input+ blockIdx.x * batch_offset,output_array[blockIdx.x]);
}
/******************************************************************************/
// set pointer with variable size matrices, batch_offset becomes an array with accumulated sum of sizes
// batch_offset[i] = sum( matrix_size[0], matrix_size[1], ..., matrix_size[i-1])
// batch_offset is usually the output of a prefix sum operation
__global__ void kernel_dset_pointer_var(
double **output_array,
double *input,
magma_int_t *lda,
magma_int_t row, magma_int_t column,
magma_int_t *batch_offset)
{
output_array[blockIdx.x] = input + batch_offset[blockIdx.x] + row + column * lda[blockIdx.x];
//printf("==> kernel_set_pointer input_array %p output_array %p \n",input+ blockIdx.x * batch_offset,output_array[blockIdx.x]);
}
/***************************************************************************//**
Purpose
-------
convert consecutive stored variable to array stored
for example the size of A is N*batchCount; N is the size of A(batch_offset)
change into dA_array[0] dA_array[1],... dA_array[batchCount-1], where the size of each dA_array[i] is N
Arguments
----------
@param[out]
output_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array A of DIMENSION ( lda, column ) on the GPU
@param[in]
input DOUBLE PRECISION array of dimension ( LDDA, N*batchCount ) on the GPU.
@param[in]
lda INTEGER
LDA specifies the leading dimension of A.
@param[in]
row INTEGER
On entry, row specifies the number of rows of the matrix A.
@param[in]
column INTEGER
On entry, column specifies the number of columns of the matrix A
@param[in]
batch_offset INTEGER
The starting pointer of each matrix A in input arrray
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C"
void magma_dset_pointer(
double **output_array,
double *input,
magma_int_t lda,
magma_int_t row, magma_int_t column,
magma_int_t batch_offset,
magma_int_t batchCount,
magma_queue_t queue)
{
kernel_dset_pointer
<<< batchCount, 1, 0, queue->cuda_stream() >>>
(output_array, input, lda, row, column, batch_offset);
}
/******************************************************************************/
extern "C"
void magma_dset_pointer_var_cc(
double **output_array,
double *input,
magma_int_t *lda,
magma_int_t row, magma_int_t column,
magma_int_t *batch_offset,
magma_int_t batchCount,
magma_queue_t queue)
{
kernel_dset_pointer_var
<<< batchCount, 1, 0, queue->cuda_stream() >>>
(output_array, input, lda, row, column, batch_offset);
}
/******************************************************************************/
__global__ void zdisplace_pointers_kernel(double **output_array,
double **input_array, magma_int_t lda,
magma_int_t row, magma_int_t column)
{
double *inpt = input_array[blockIdx.x];
output_array[blockIdx.x] = &inpt[row + column * lda];
}
/******************************************************************************/
/* Variable pointer displacement kernels */
/******************************************************************************/
// variable leading dimension, constant row and column offsets
__global__ void zdisplace_pointers_var_cc_kernel(double **output_array,
double **input_array, magma_int_t* lda,
magma_int_t row, magma_int_t column)
{
const int bid = blockIdx.x;
double *inpt = input_array[blockIdx.x];
if(inpt == NULL || row < 0 || column < 0)
output_array[bid] = NULL;
else
output_array[bid] = &inpt[row + column * lda[blockIdx.x] ];
}
/******************************************************************************/
// variable leading dimension, constant row offset and variable column offsets
__global__ void zdisplace_pointers_var_cv_kernel(double **output_array,
double **input_array, magma_int_t* lda,
magma_int_t row, magma_int_t *column)
{
const int bid = blockIdx.x;
double *inpt = input_array[blockIdx.x];
if(inpt == NULL || row < 0 || column[bid] < 0)
output_array[bid] = NULL;
else
output_array[bid] = &inpt[row + column[bid] * lda[blockIdx.x] ];
}
/******************************************************************************/
// variable leading dimension, variable row offset and constant column offsets
__global__ void zdisplace_pointers_var_vc_kernel(double **output_array,
double **input_array, magma_int_t* lda,
magma_int_t *row, magma_int_t column)
{
const int bid = blockIdx.x;
double *inpt = input_array[blockIdx.x];
if(inpt == NULL || row[bid] < 0 || column < 0)
output_array[bid] = NULL;
else
output_array[bid] = &inpt[row[bid] + column * lda[blockIdx.x] ];
}
/******************************************************************************/
// variable leading dimension, variable row and column offsets
__global__ void zdisplace_pointers_var_vv_kernel(double **output_array,
double **input_array, magma_int_t* lda,
magma_int_t* row, magma_int_t* column)
{
const int bid = blockIdx.x;
double *inpt = input_array[bid];
if(inpt == NULL || row[bid] < 0 || column[bid] < 0)
output_array[bid] = NULL;
else
output_array[bid] = &inpt[ row[bid] + column[bid] * lda[bid] ];
}
/***************************************************************************//**
Purpose
-------
compute the offset for all the matrices and save the displacment of the new pointer on output_array.
input_array contains the pointers to the initial position.
output_array[i] = input_array[i] + row + lda * column;
Arguments
----------
@param[out]
output_array Array of pointers, dimension (batchCount).
Each pointer points to the new displacement of array A in input_array on the GPU
@param[in]
input_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array A of DIMENSION ( lda, column ) on the GPU
@param[in]
lda INTEGER
LDA specifies the leading dimension of A.
@param[in]
row INTEGER
On entry, row specifies the number of rows of the matrix A.
@param[in]
column INTEGER
On entry, column specifies the number of columns of the matrix A
@param[in]
batch_offset INTEGER
The starting pointer of each matrix A in input arrray
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
*******************************************************************************/
extern "C"
void magma_ddisplace_pointers(double **output_array,
double **input_array, magma_int_t lda,
magma_int_t row, magma_int_t column,
magma_int_t batchCount, magma_queue_t queue)
{
zdisplace_pointers_kernel
<<< batchCount, 1, 0, queue->cuda_stream() >>>
(output_array, input_array, lda, row, column);
}
/******************************************************************************/
extern "C"
void magma_ddisplace_pointers_var_cc(double **output_array,
double **input_array, magma_int_t* lda,
magma_int_t row, magma_int_t column,
magma_int_t batchCount, magma_queue_t queue)
{
/*
compute the offset for all the matrices and save the displacment of the new pointer on output_array.
input_array contains the pointers to the initial position.
output_array[i] = input_array[i] + row + lda[i] * column;
*/
zdisplace_pointers_var_cc_kernel<<<batchCount, 1, 0, queue->cuda_stream()>>>(output_array, input_array, lda, row, column);
}
/******************************************************************************/
extern "C"
void magma_ddisplace_pointers_var_cv(double **output_array,
double **input_array, magma_int_t* lda,
magma_int_t row, magma_int_t* column,
magma_int_t batchCount, magma_queue_t queue)
{
/*
compute the offset for all the matrices and save the displacment of the new pointer on output_array.
input_array contains the pointers to the initial position.
output_array[i] = input_array[i] + row + lda[i] * column[i];
*/
zdisplace_pointers_var_cv_kernel<<<batchCount, 1, 0, queue->cuda_stream()>>>(output_array, input_array, lda, row, column);
}
/******************************************************************************/
extern "C"
void magma_ddisplace_pointers_var_vc(double **output_array,
double **input_array, magma_int_t* lda,
magma_int_t *row, magma_int_t column,
magma_int_t batchCount, magma_queue_t queue)
{
/*
compute the offset for all the matrices and save the displacment of the new pointer on output_array.
input_array contains the pointers to the initial position.
output_array[i] = input_array[i] + row[i] + lda[i] * column;
*/
zdisplace_pointers_var_vc_kernel<<<batchCount, 1, 0, queue->cuda_stream()>>>(output_array, input_array, lda, row, column);
}
/******************************************************************************/
extern "C"
void magma_ddisplace_pointers_var_vv(double **output_array,
double **input_array, magma_int_t* lda,
magma_int_t* row, magma_int_t* column,
magma_int_t batchCount, magma_queue_t queue)
{
/*
compute the offset for all the matrices and save the displacment of the new pointer on output_array.
input_array contains the pointers to the initial position.
output_array[i] = input_array[i] + row[i] + lda[i] * column[i];
*/
zdisplace_pointers_var_vv_kernel<<<batchCount, 1, 0, queue->cuda_stream()>>>(output_array, input_array, lda, row, column);
}
|
5542e66b2622983ba7b4c3e47dfa60e268890b01.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
int block_size(int thread_per_block, int total_thread){
return (total_thread + thread_per_block - 1) / thread_per_block;
}
__global__ void trans_unif2exp(int n, float *u, float *thetaSq) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < n) {
u[i] = (-1.0f/thetaSq[i]) * logf(u[i]);
}
}
float *read_mat_col_maj(const char *a, int *row, int *col){
std::ifstream myfile;
myfile.open(a);
myfile >> *row >> *col;
float *mdata = (float *)malloc(sizeof(float) * (*row) * (*col));
// Note: column major
for (int i = 0; i < *row; i++) { // i-th row
for (int j = 0; j < *col; j++) { // j-th col
myfile >> mdata[j * (*row) + i];
}
}
myfile.close();
return mdata;
}
int print_mat_col_maj(float *a, int row, int col){
for (int i = 0; i < row; i++) { // i-th row
for (int j = 0; j < col; j++) { // j-th col
std::cout << a[j * row + i] << "\t";
}
std::cout << std::endl;
}
return 0;
}
float *read_vec(const char *a, int *n){
std::ifstream myfile;
myfile.open(a);
myfile >> *n;
float *mdata = (float *)malloc(sizeof(float)* (*n));
// Note: column major
for (int i = 0; i < *n; i++) { // i-th row
myfile >> mdata[i];
}
myfile.close();
return mdata;
}
int print_vec(float *a, int n){
// Note: column major
for (int i = 0; i < n; i++) { // i-th row
std::cout << a[i] << std::endl;
}
return 0;
}
const char* cublasGetErrorString(hipblasStatus_t status)
{
switch(status)
{
case HIPBLAS_STATUS_SUCCESS : return "HIPBLAS_STATUS_SUCCESS";
case HIPBLAS_STATUS_NOT_INITIALIZED : return "HIPBLAS_STATUS_NOT_INITIALIZED";
case HIPBLAS_STATUS_ALLOC_FAILED : return "HIPBLAS_STATUS_ALLOC_FAILED";
case HIPBLAS_STATUS_INVALID_VALUE : return "HIPBLAS_STATUS_INVALID_VALUE";
case HIPBLAS_STATUS_ARCH_MISMATCH : return "HIPBLAS_STATUS_ARCH_MISMATCH";
case HIPBLAS_STATUS_MAPPING_ERROR : return "HIPBLAS_STATUS_MAPPING_ERROR";
case HIPBLAS_STATUS_EXECUTION_FAILED: return "HIPBLAS_STATUS_EXECUTION_FAILED";
case HIPBLAS_STATUS_INTERNAL_ERROR : return "HIPBLAS_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
const char *curandGetErrorString(hiprandStatus_t error)
{
switch (error)
{
case HIPRAND_STATUS_SUCCESS : return "HIPRAND_STATUS_SUCCESS";
case HIPRAND_STATUS_VERSION_MISMATCH : return "HIPRAND_STATUS_VERSION_MISMATCH";
case HIPRAND_STATUS_NOT_INITIALIZED : return "HIPRAND_STATUS_NOT_INITIALIZED";
case HIPRAND_STATUS_ALLOCATION_FAILED : return "HIPRAND_STATUS_ALLOCATION_FAILED";
case HIPRAND_STATUS_TYPE_ERROR : return "HIPRAND_STATUS_TYPE_ERROR";
case HIPRAND_STATUS_OUT_OF_RANGE : return "HIPRAND_STATUS_OUT_OF_RANGE";
case HIPRAND_STATUS_LENGTH_NOT_MULTIPLE : return "HIPRAND_STATUS_LENGTH_NOT_MULTIPLE";
case HIPRAND_STATUS_DOUBLE_PRECISION_REQUIRED: return "HIPRAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case HIPRAND_STATUS_LAUNCH_FAILURE : return "HIPRAND_STATUS_LAUNCH_FAILURE";
case HIPRAND_STATUS_PREEXISTING_FAILURE : return "HIPRAND_STATUS_PREEXISTING_FAILURE";
case HIPRAND_STATUS_INITIALIZATION_FAILED : return "HIPRAND_STATUS_INITIALIZATION_FAILED";
case HIPRAND_STATUS_ARCH_MISMATCH : return "HIPRAND_STATUS_ARCH_MISMATCH";
case HIPRAND_STATUS_INTERNAL_ERROR : return "HIPRAND_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
const char *cusolverGetErrorString(cusolverStatus_t error)
{
switch (error)
{
case CUSOLVER_STATUS_SUCCESS : return "CUSOLVER_STATUS_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED : return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED : return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE : return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH : return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_EXECUTION_FAILED : return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR : return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
int d_print_mat(const float *a,int n,int p){
float *tmp = (float *)malloc(n*p*sizeof(float));
CUDA_CALL(hipMemcpy(tmp,a,n*p*sizeof(float),hipMemcpyDeviceToHost));
print_mat_col_maj(tmp,n,p);
free(tmp);
return 0;
}
__global__ void shrink_vector(float *d_vec, int n, float * d_scale){
// TODO: Maybe put d_scale to shared mem?
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n){
d_vec[tid] /= (*d_scale);
}
}
| 5542e66b2622983ba7b4c3e47dfa60e268890b01.cu | #include "common.h"
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
int block_size(int thread_per_block, int total_thread){
return (total_thread + thread_per_block - 1) / thread_per_block;
}
__global__ void trans_unif2exp(int n, float *u, float *thetaSq) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < n) {
u[i] = (-1.0f/thetaSq[i]) * logf(u[i]);
}
}
float *read_mat_col_maj(const char *a, int *row, int *col){
std::ifstream myfile;
myfile.open(a);
myfile >> *row >> *col;
float *mdata = (float *)malloc(sizeof(float) * (*row) * (*col));
// Note: column major
for (int i = 0; i < *row; i++) { // i-th row
for (int j = 0; j < *col; j++) { // j-th col
myfile >> mdata[j * (*row) + i];
}
}
myfile.close();
return mdata;
}
int print_mat_col_maj(float *a, int row, int col){
for (int i = 0; i < row; i++) { // i-th row
for (int j = 0; j < col; j++) { // j-th col
std::cout << a[j * row + i] << "\t";
}
std::cout << std::endl;
}
return 0;
}
float *read_vec(const char *a, int *n){
std::ifstream myfile;
myfile.open(a);
myfile >> *n;
float *mdata = (float *)malloc(sizeof(float)* (*n));
// Note: column major
for (int i = 0; i < *n; i++) { // i-th row
myfile >> mdata[i];
}
myfile.close();
return mdata;
}
int print_vec(float *a, int n){
// Note: column major
for (int i = 0; i < n; i++) { // i-th row
std::cout << a[i] << std::endl;
}
return 0;
}
const char* cublasGetErrorString(cublasStatus_t status)
{
switch(status)
{
case CUBLAS_STATUS_SUCCESS : return "CUBLAS_STATUS_SUCCESS";
case CUBLAS_STATUS_NOT_INITIALIZED : return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED : return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE : return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH : return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR : return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR : return "CUBLAS_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
const char *curandGetErrorString(curandStatus_t error)
{
switch (error)
{
case CURAND_STATUS_SUCCESS : return "CURAND_STATUS_SUCCESS";
case CURAND_STATUS_VERSION_MISMATCH : return "CURAND_STATUS_VERSION_MISMATCH";
case CURAND_STATUS_NOT_INITIALIZED : return "CURAND_STATUS_NOT_INITIALIZED";
case CURAND_STATUS_ALLOCATION_FAILED : return "CURAND_STATUS_ALLOCATION_FAILED";
case CURAND_STATUS_TYPE_ERROR : return "CURAND_STATUS_TYPE_ERROR";
case CURAND_STATUS_OUT_OF_RANGE : return "CURAND_STATUS_OUT_OF_RANGE";
case CURAND_STATUS_LENGTH_NOT_MULTIPLE : return "CURAND_STATUS_LENGTH_NOT_MULTIPLE";
case CURAND_STATUS_DOUBLE_PRECISION_REQUIRED: return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case CURAND_STATUS_LAUNCH_FAILURE : return "CURAND_STATUS_LAUNCH_FAILURE";
case CURAND_STATUS_PREEXISTING_FAILURE : return "CURAND_STATUS_PREEXISTING_FAILURE";
case CURAND_STATUS_INITIALIZATION_FAILED : return "CURAND_STATUS_INITIALIZATION_FAILED";
case CURAND_STATUS_ARCH_MISMATCH : return "CURAND_STATUS_ARCH_MISMATCH";
case CURAND_STATUS_INTERNAL_ERROR : return "CURAND_STATUS_INTERNAL_ERROR";
}
return "<unknown>";
}
const char *cusolverGetErrorString(cusolverStatus_t error)
{
switch (error)
{
case CUSOLVER_STATUS_SUCCESS : return "CUSOLVER_STATUS_SUCCESS";
case CUSOLVER_STATUS_NOT_INITIALIZED : return "CUSOLVER_STATUS_NOT_INITIALIZED";
case CUSOLVER_STATUS_ALLOC_FAILED : return "CUSOLVER_STATUS_ALLOC_FAILED";
case CUSOLVER_STATUS_INVALID_VALUE : return "CUSOLVER_STATUS_INVALID_VALUE";
case CUSOLVER_STATUS_ARCH_MISMATCH : return "CUSOLVER_STATUS_ARCH_MISMATCH";
case CUSOLVER_STATUS_EXECUTION_FAILED : return "CUSOLVER_STATUS_EXECUTION_FAILED";
case CUSOLVER_STATUS_INTERNAL_ERROR : return "CUSOLVER_STATUS_INTERNAL_ERROR";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED";
}
return "<unknown>";
}
int d_print_mat(const float *a,int n,int p){
float *tmp = (float *)malloc(n*p*sizeof(float));
CUDA_CALL(cudaMemcpy(tmp,a,n*p*sizeof(float),cudaMemcpyDeviceToHost));
print_mat_col_maj(tmp,n,p);
free(tmp);
return 0;
}
__global__ void shrink_vector(float *d_vec, int n, float * d_scale){
// TODO: Maybe put d_scale to shared mem?
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n){
d_vec[tid] /= (*d_scale);
}
}
|
32f5fad3ae50d51f7e9f622874150c0356fe79dd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**********************************************************************************
* Numerical Solution for the Cubic-Quintic Nonlinear Schrodinger Equation *
* using second order split step Fourier method. *
* Coded by: Omar Ashour, Texas A&M University at Qatar, February 2015. *
* ********************************************************************************/
#include <stddef.h>
#include "../lib/cu_helpers.h"
#include <hipfft.h>
// Grid Parameters
#define XN xn // Number of x-spatial nodes
#define YN yn // Number of y-spatial nodes
#define ZN zn // Number of z-spatial nodes
#define TN 100 // Number of temporal nodes
#define LX 50.0f // x-spatial domain [-LX,LX)
#define LY 50.0f // y-spatial domain [-LY,LY)
#define LZ 50.0f // z-spatial domain [-LZ,LZ)
#define TT 10.0f // Max time
#define DX (2*LX / XN) // x-spatial step size
#define DY (2*LY / YN) // y-spatial step size
#define DZ (2*LZ / ZN) // z-spatial step size
#define DT (TT / TN) // temporal step size
// Gaussian Parameters
#define A_S (3.0f/sqrt(8.0f))
#define R_S (sqrt(32.0f/9.0f))
#define A 0.6f
#define R (1.0f/(A*sqrt(1.0f-A*A)))
// Index flattening macro
// Flat[x + WIDTH * (y + DEPTH * z)] = Original[x, y, z]
#define ind(i,j,k) ((((i * ZN) * YN) + (j * YN)) + k)
// ____WIDTH____
// |_|_|_|_|_|_|_|H
// |_|_|_|_|_|_|_|E
// Z|_|_|_|_|_|_|_|I
// N|_|_|_|_|_|_|_|G
// |_|_|_|_|_|_|_|H
// |_|_|_|_|_|_|_|T
// \_\_\_\_\_\_\_\D
// \_\_\_\_\_\_\_\E
// Y\_\_\_\_\_\_\_\P
// N\_\_\_\_\_\_\_\T
// \_\_\_\_\_\_\_\H
// XN
// Timing parameters
#define IRVL 10 // Timing interval. Take a reading every N iterations.
// Output files
#define VTK_0 "gpu_ffts_0.vtk"
#define VTK_1 "gpu_ffts_1.vtk"
#define TIME_F argv[2]
// Function prototypes
__global__ void nonlin(hipfftComplex *psi, float dt, int xn, int yn, int zn);
__global__ void lin(hipfftComplex *psi, float *k2, float dt, int xn, int yn, int zn);
__global__ void normalize(hipfftComplex *psi, int size, int xn, int yn, int zn);
int main(void)
{
// Timing info
hipEvent_t begin_event, end_event;
hipEventCreate(&begin_event);
hipEventCreate(&end_event);
// Print basic info about simulation
const int xn = atoi(argv[1]);
const int yn = atoi(argv[1]);
const int zn = atoi(argv[1]);
printf("XN: %d. DX: %f, DT: %f, dt/dx^2: %f\n", XN, DX, DT, DT/(DX*DX));
// Allocate host arrays
float *h_x = (float*)malloc(sizeof(float) * XN);
float *h_y = (float*)malloc(sizeof(float) * YN);
float *h_z = (float*)malloc(sizeof(float) * ZN);
float *h_k2 = (float*)malloc(sizeof(float) * XN * YN * ZN);
float *h_kx = (float*)malloc(XN * sizeof(float));
float *h_ky = (float*)malloc(YN * sizeof(float));
float *h_kz = (float*)malloc(ZN * sizeof(float));
float *h_max = (float*)calloc(TN+1, sizeof(float));
hipfftComplex *h_psi = (hipfftComplex*)malloc(
sizeof(hipfftComplex) * XN * YN * ZN);
hipfftComplex *h_psi_0 = (hipfftComplex*)malloc(
sizeof(hipfftComplex) * XN * YN * ZN);
// Create transform plans
hipfftHandle plan;
CUFFT_SAFE_CALL(hipfftPlan3d(&plan, XN, YN, ZN, HIPFFT_C2C));
// Create wavenumbers
float dkx = 2*M_PI/XN/DX;
for(int i = XN/2; i >= 0; i--)
h_kx[XN/2 - i]=(XN/2 - i) * dkx;
for(int i = XN/2+1; i < XN; i++)
h_kx[i]=(i - XN) * dkx;
float dky = 2*M_PI/YN/DY;
for(int i = YN/2; i >= 0; i--)
h_ky[YN/2 - i]=(YN/2 - i) * dky;
for(int i = YN/2+1; i < YN; i++)
h_ky[i]=(i - YN) * dky;
float dkz = 2*M_PI/ZN/DZ;
for(int i = ZN/2; i >= 0; i--)
h_kz[ZN/2 - i]=(ZN/2 - i) * dkz;
for(int i = ZN/2+1; i < ZN; i++)
h_kz[i]=(i - ZN) * dkz;
// Initialize x, y and z
for(int i = 0; i < XN ; i++)
h_x[i] = (i-XN/2)*DX;
for(int i = 0; i < YN ; i++)
h_y[i] = (i-YN/2)*DY;
for(int i = 0; i < ZN ; i++)
h_z[i] = (i-ZN/2)*DZ;
// Initial conditions on host
for(int i = 0; i < XN; i++)
for(int j = 0; j < YN; j++)
for(int k = 0; k < ZN; k++)
{
h_psi[ind(i,j,k)].x = A_S*A*
exp(-(h_x[i]*h_x[i]+h_y[j]*h_y[j]+h_z[k]*h_z[k])
/(2*R*R*R_S*R_S));
h_psi[ind(i,j,k)].y = 0;
h_psi_0[ind(i,j,k)].x = h_psi[ind(i,j,k)].x;
h_psi_0[ind(i,j,k)].y = h_psi[ind(i,j,k)].y;
h_k2[ind(i,j,k)] = h_kx[i]*h_kx[i] + h_ky[j]*h_ky[j] + h_kz[k]*h_kz[k];
}
// Allocate and copy device memory
hipfftComplex *d_psi; float *d_k2;
CUDAR_SAFE_CALL(hipMalloc((void **)&d_psi, sizeof(hipfftComplex)*XN*YN*ZN));
CUDAR_SAFE_CALL(hipMalloc((void **)&d_k2, sizeof(float)*XN*YN*ZN));
CUDAR_SAFE_CALL(hipMemcpy(d_psi, h_psi, sizeof(hipfftComplex)*XN*YN*ZN,
hipMemcpyHostToDevice));
CUDAR_SAFE_CALL(hipMemcpy(d_k2, h_k2, sizeof(float)*XN*YN*ZN,
hipMemcpyHostToDevice));
// Initialize the grid
dim3 threadsPerBlock(8,8,8);
dim3 blocksPerGrid((XN + 7)/8,(YN+7)/8,(ZN+7)/8);
// Find max(|psi|) for initial pulse.
//cmax_psi(psi, h_max, 0, XN*YN*ZN);
// Forward transform
CUFFT_SAFE_CALL(hipfftExecC2C(plan, d_psi, d_psi, HIPFFT_FORWARD));
// Timing starts here
hipEventRecord(begin_event, 0);
// Start time evolution
for (int i = 1; i <= TN; i++)
{
// Solve linear part
hipLaunchKernelGGL(( lin), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, d_k2, DT/2, XN, YN, ZN);
#if CUDAR_ERROR_CHECKING
CUDAR_SAFE_CALL(hipPeekAtLastError());
#endif // CUDAR_ERROR_CHECKING
// Backward transform
CUFFT_SAFE_CALL(hipfftExecC2C(plan, d_psi, d_psi, HIPFFT_BACKWARD));
// Normalize the transform
hipLaunchKernelGGL(( normalize), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, XN*YN*ZN, XN, YN, ZN);
#if CUDAR_ERROR_CHECKING
CUDAR_SAFE_CALL(hipPeekAtLastError());
#endif // CUDAR_ERROR_CHECKING
// Solve nonlinear part
hipLaunchKernelGGL(( nonlin), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, DT, XN, YN, ZN);
#if CUDAR_ERROR_CHECKING
CUDAR_SAFE_CALL(hipPeekAtLastError());
#endif // CUDAR_ERROR_CHECKING
// Forward transform
CUFFT_SAFE_CALL(hipfftExecC2C(plan, d_psi, d_psi, HIPFFT_FORWARD));
// Linear calculation
hipLaunchKernelGGL(( lin), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, d_k2, DT/2, XN, YN, ZN);
#if CUDAR_ERROR_CHECKING
CUDAR_SAFE_CALL(hipPeekAtLastError());
#endif // CUDAR_ERROR_CHECKING
// Save max |psi| for printing
//cmax_psi(psi, h_max, i, XN*YN*ZN);
}
float time_value;
hipEventRecord(end_event, 0);
hipEventSynchronize(end_event);
hipEventElapsedTime(&time_value, begin_event, end_event);
// Print time to file
FILE *fp = fopen(TIME_F, "a");
fprintf(fp, "%f, ", time_value);
fclose(fp);
// Backward tranform to retreive data
CUFFT_SAFE_CALL(hipfftExecC2C(plan, d_psi, d_psi, HIPFFT_BACKWARD));
// Normalize the transform
hipLaunchKernelGGL(( normalize), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_psi, XN*YN*ZN, XN, YN, ZN);
CUDAR_SAFE_CALL(hipPeekAtLastError());
// Copy results to device
CUDAR_SAFE_CALL(hipMemcpy(h_psi, d_psi, sizeof(hipfftComplex)*XN*YN*ZN,
hipMemcpyDeviceToHost));
// Plot results
vtk_3dcf(h_x, h_y, h_z, h_psi, XN, YN, ZN, VTK_1);
vtk_3dcf(h_x, h_y, h_z, h_psi_0, XN, YN, ZN, VTK_0);
// Clean up
CUFFT_SAFE_CALL(hipfftDestroy(plan));
free(h_x);
free(h_y);
free(h_z);
free(h_k2);
free(h_kx);
free(h_ky);
free(h_kz);
free(h_psi);
free(h_psi_0);
free(h_max);
CUDAR_SAFE_CALL(hipFree(d_psi));
CUDAR_SAFE_CALL(hipFree(d_k2));
return 0;
}
__global__ void nonlin(hipfftComplex *psi, float dt, int xn, int yn, int zn)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
// Avoid first and last point (boundary conditions) (needs fixing)
// if (i >= xn - 1 || j >= yn-1 || || k >= zn-1 || i == 0 || j == 0 || k == 0) return;
if (i >= xn || j >= yn || k >= zn) return;
float psi2 = cuCabsf(psi[ind(i,j,k)])*cuCabsf(psi[ind(i,j,k)]);
float non = psi2 - psi2*psi2;
psi[ind(i,j,k)] = cuCmulf(psi[ind(i,j,k)],
make_cuComplex(cos(non*dt), sin(non*dt)));
}
__global__ void lin(hipfftComplex *psi, float *k2, float dt, int xn, int yn, int zn)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
// Avoid first and last point (boundary conditions) (needs fixing)
// if (i >= xn - 1 || j >= yn-1 || || k >= zn-1 || i == 0 || j == 0 || k == 0) return;
if (i >= xn || j >= yn || k >= zn) return;
psi[ind(i,j,k)] = cuCmulf(psi[ind(i,j,k)],
make_cuComplex(cos(k2[ind(i,j,k)]*dt), -sin(k2[ind(i,j,k)]*dt)));
}
__global__ void normalize(hipfftComplex *psi, int size, int xn, int yn, int zn)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
// Stay within range since the grid might be larger
if (i >= xn || j >= yn || k >= zn) return;
psi[ind(i,j,k)].x = psi[ind(i,j,k)].x/size;
psi[ind(i,j,k)].y = psi[ind(i,j,k)].y/size;
}
| 32f5fad3ae50d51f7e9f622874150c0356fe79dd.cu | /**********************************************************************************
* Numerical Solution for the Cubic-Quintic Nonlinear Schrodinger Equation *
* using second order split step Fourier method. *
* Coded by: Omar Ashour, Texas A&M University at Qatar, February 2015. *
* ********************************************************************************/
#include <stddef.h>
#include "../lib/cu_helpers.h"
#include <cufft.h>
// Grid Parameters
#define XN xn // Number of x-spatial nodes
#define YN yn // Number of y-spatial nodes
#define ZN zn // Number of z-spatial nodes
#define TN 100 // Number of temporal nodes
#define LX 50.0f // x-spatial domain [-LX,LX)
#define LY 50.0f // y-spatial domain [-LY,LY)
#define LZ 50.0f // z-spatial domain [-LZ,LZ)
#define TT 10.0f // Max time
#define DX (2*LX / XN) // x-spatial step size
#define DY (2*LY / YN) // y-spatial step size
#define DZ (2*LZ / ZN) // z-spatial step size
#define DT (TT / TN) // temporal step size
// Gaussian Parameters
#define A_S (3.0f/sqrt(8.0f))
#define R_S (sqrt(32.0f/9.0f))
#define A 0.6f
#define R (1.0f/(A*sqrt(1.0f-A*A)))
// Index flattening macro
// Flat[x + WIDTH * (y + DEPTH * z)] = Original[x, y, z]
#define ind(i,j,k) ((((i * ZN) * YN) + (j * YN)) + k)
// ____WIDTH____
// |_|_|_|_|_|_|_|H
// |_|_|_|_|_|_|_|E
// Z|_|_|_|_|_|_|_|I
// N|_|_|_|_|_|_|_|G
// |_|_|_|_|_|_|_|H
// |_|_|_|_|_|_|_|T
// \_\_\_\_\_\_\_\D
// \_\_\_\_\_\_\_\E
// Y\_\_\_\_\_\_\_\P
// N\_\_\_\_\_\_\_\T
// \_\_\_\_\_\_\_\H
// XN
// Timing parameters
#define IRVL 10 // Timing interval. Take a reading every N iterations.
// Output files
#define VTK_0 "gpu_ffts_0.vtk"
#define VTK_1 "gpu_ffts_1.vtk"
#define TIME_F argv[2]
// Function prototypes
__global__ void nonlin(cufftComplex *psi, float dt, int xn, int yn, int zn);
__global__ void lin(cufftComplex *psi, float *k2, float dt, int xn, int yn, int zn);
__global__ void normalize(cufftComplex *psi, int size, int xn, int yn, int zn);
int main(void)
{
// Timing info
cudaEvent_t begin_event, end_event;
cudaEventCreate(&begin_event);
cudaEventCreate(&end_event);
// Print basic info about simulation
const int xn = atoi(argv[1]);
const int yn = atoi(argv[1]);
const int zn = atoi(argv[1]);
printf("XN: %d. DX: %f, DT: %f, dt/dx^2: %f\n", XN, DX, DT, DT/(DX*DX));
// Allocate host arrays
float *h_x = (float*)malloc(sizeof(float) * XN);
float *h_y = (float*)malloc(sizeof(float) * YN);
float *h_z = (float*)malloc(sizeof(float) * ZN);
float *h_k2 = (float*)malloc(sizeof(float) * XN * YN * ZN);
float *h_kx = (float*)malloc(XN * sizeof(float));
float *h_ky = (float*)malloc(YN * sizeof(float));
float *h_kz = (float*)malloc(ZN * sizeof(float));
float *h_max = (float*)calloc(TN+1, sizeof(float));
cufftComplex *h_psi = (cufftComplex*)malloc(
sizeof(cufftComplex) * XN * YN * ZN);
cufftComplex *h_psi_0 = (cufftComplex*)malloc(
sizeof(cufftComplex) * XN * YN * ZN);
// Create transform plans
cufftHandle plan;
CUFFT_SAFE_CALL(cufftPlan3d(&plan, XN, YN, ZN, CUFFT_C2C));
// Create wavenumbers
float dkx = 2*M_PI/XN/DX;
for(int i = XN/2; i >= 0; i--)
h_kx[XN/2 - i]=(XN/2 - i) * dkx;
for(int i = XN/2+1; i < XN; i++)
h_kx[i]=(i - XN) * dkx;
float dky = 2*M_PI/YN/DY;
for(int i = YN/2; i >= 0; i--)
h_ky[YN/2 - i]=(YN/2 - i) * dky;
for(int i = YN/2+1; i < YN; i++)
h_ky[i]=(i - YN) * dky;
float dkz = 2*M_PI/ZN/DZ;
for(int i = ZN/2; i >= 0; i--)
h_kz[ZN/2 - i]=(ZN/2 - i) * dkz;
for(int i = ZN/2+1; i < ZN; i++)
h_kz[i]=(i - ZN) * dkz;
// Initialize x, y and z
for(int i = 0; i < XN ; i++)
h_x[i] = (i-XN/2)*DX;
for(int i = 0; i < YN ; i++)
h_y[i] = (i-YN/2)*DY;
for(int i = 0; i < ZN ; i++)
h_z[i] = (i-ZN/2)*DZ;
// Initial conditions on host
for(int i = 0; i < XN; i++)
for(int j = 0; j < YN; j++)
for(int k = 0; k < ZN; k++)
{
h_psi[ind(i,j,k)].x = A_S*A*
exp(-(h_x[i]*h_x[i]+h_y[j]*h_y[j]+h_z[k]*h_z[k])
/(2*R*R*R_S*R_S));
h_psi[ind(i,j,k)].y = 0;
h_psi_0[ind(i,j,k)].x = h_psi[ind(i,j,k)].x;
h_psi_0[ind(i,j,k)].y = h_psi[ind(i,j,k)].y;
h_k2[ind(i,j,k)] = h_kx[i]*h_kx[i] + h_ky[j]*h_ky[j] + h_kz[k]*h_kz[k];
}
// Allocate and copy device memory
cufftComplex *d_psi; float *d_k2;
CUDAR_SAFE_CALL(cudaMalloc((void **)&d_psi, sizeof(cufftComplex)*XN*YN*ZN));
CUDAR_SAFE_CALL(cudaMalloc((void **)&d_k2, sizeof(float)*XN*YN*ZN));
CUDAR_SAFE_CALL(cudaMemcpy(d_psi, h_psi, sizeof(cufftComplex)*XN*YN*ZN,
cudaMemcpyHostToDevice));
CUDAR_SAFE_CALL(cudaMemcpy(d_k2, h_k2, sizeof(float)*XN*YN*ZN,
cudaMemcpyHostToDevice));
// Initialize the grid
dim3 threadsPerBlock(8,8,8);
dim3 blocksPerGrid((XN + 7)/8,(YN+7)/8,(ZN+7)/8);
// Find max(|psi|) for initial pulse.
//cmax_psi(psi, h_max, 0, XN*YN*ZN);
// Forward transform
CUFFT_SAFE_CALL(cufftExecC2C(plan, d_psi, d_psi, CUFFT_FORWARD));
// Timing starts here
cudaEventRecord(begin_event, 0);
// Start time evolution
for (int i = 1; i <= TN; i++)
{
// Solve linear part
lin<<<blocksPerGrid, threadsPerBlock>>>(d_psi, d_k2, DT/2, XN, YN, ZN);
#if CUDAR_ERROR_CHECKING
CUDAR_SAFE_CALL(cudaPeekAtLastError());
#endif // CUDAR_ERROR_CHECKING
// Backward transform
CUFFT_SAFE_CALL(cufftExecC2C(plan, d_psi, d_psi, CUFFT_INVERSE));
// Normalize the transform
normalize<<<blocksPerGrid, threadsPerBlock>>>(d_psi, XN*YN*ZN, XN, YN, ZN);
#if CUDAR_ERROR_CHECKING
CUDAR_SAFE_CALL(cudaPeekAtLastError());
#endif // CUDAR_ERROR_CHECKING
// Solve nonlinear part
nonlin<<<blocksPerGrid, threadsPerBlock>>>(d_psi, DT, XN, YN, ZN);
#if CUDAR_ERROR_CHECKING
CUDAR_SAFE_CALL(cudaPeekAtLastError());
#endif // CUDAR_ERROR_CHECKING
// Forward transform
CUFFT_SAFE_CALL(cufftExecC2C(plan, d_psi, d_psi, CUFFT_FORWARD));
// Linear calculation
lin<<<blocksPerGrid, threadsPerBlock>>>(d_psi, d_k2, DT/2, XN, YN, ZN);
#if CUDAR_ERROR_CHECKING
CUDAR_SAFE_CALL(cudaPeekAtLastError());
#endif // CUDAR_ERROR_CHECKING
// Save max |psi| for printing
//cmax_psi(psi, h_max, i, XN*YN*ZN);
}
float time_value;
cudaEventRecord(end_event, 0);
cudaEventSynchronize(end_event);
cudaEventElapsedTime(&time_value, begin_event, end_event);
// Print time to file
FILE *fp = fopen(TIME_F, "a");
fprintf(fp, "%f, ", time_value);
fclose(fp);
// Backward tranform to retreive data
CUFFT_SAFE_CALL(cufftExecC2C(plan, d_psi, d_psi, CUFFT_INVERSE));
// Normalize the transform
normalize<<<blocksPerGrid, threadsPerBlock>>>(d_psi, XN*YN*ZN, XN, YN, ZN);
CUDAR_SAFE_CALL(cudaPeekAtLastError());
// Copy results to device
CUDAR_SAFE_CALL(cudaMemcpy(h_psi, d_psi, sizeof(cufftComplex)*XN*YN*ZN,
cudaMemcpyDeviceToHost));
// Plot results
vtk_3dcf(h_x, h_y, h_z, h_psi, XN, YN, ZN, VTK_1);
vtk_3dcf(h_x, h_y, h_z, h_psi_0, XN, YN, ZN, VTK_0);
// Clean up
CUFFT_SAFE_CALL(cufftDestroy(plan));
free(h_x);
free(h_y);
free(h_z);
free(h_k2);
free(h_kx);
free(h_ky);
free(h_kz);
free(h_psi);
free(h_psi_0);
free(h_max);
CUDAR_SAFE_CALL(cudaFree(d_psi));
CUDAR_SAFE_CALL(cudaFree(d_k2));
return 0;
}
__global__ void nonlin(cufftComplex *psi, float dt, int xn, int yn, int zn)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
// Avoid first and last point (boundary conditions) (needs fixing)
// if (i >= xn - 1 || j >= yn-1 || || k >= zn-1 || i == 0 || j == 0 || k == 0) return;
if (i >= xn || j >= yn || k >= zn) return;
float psi2 = cuCabsf(psi[ind(i,j,k)])*cuCabsf(psi[ind(i,j,k)]);
float non = psi2 - psi2*psi2;
psi[ind(i,j,k)] = cuCmulf(psi[ind(i,j,k)],
make_cuComplex(cos(non*dt), sin(non*dt)));
}
__global__ void lin(cufftComplex *psi, float *k2, float dt, int xn, int yn, int zn)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
// Avoid first and last point (boundary conditions) (needs fixing)
// if (i >= xn - 1 || j >= yn-1 || || k >= zn-1 || i == 0 || j == 0 || k == 0) return;
if (i >= xn || j >= yn || k >= zn) return;
psi[ind(i,j,k)] = cuCmulf(psi[ind(i,j,k)],
make_cuComplex(cos(k2[ind(i,j,k)]*dt), -sin(k2[ind(i,j,k)]*dt)));
}
__global__ void normalize(cufftComplex *psi, int size, int xn, int yn, int zn)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
// Stay within range since the grid might be larger
if (i >= xn || j >= yn || k >= zn) return;
psi[ind(i,j,k)].x = psi[ind(i,j,k)].x/size;
psi[ind(i,j,k)].y = psi[ind(i,j,k)].y/size;
}
|
a6bed4f82503fe8841d09afc3f6a6ba3cc0ef9dc.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017 XGBoost contributors
*/
#include <thrust/device_vector.h>
#include <xgboost/base.h>
#include "../../../src/common/device_helpers.cuh"
#include "../../../src/common/timer.h"
#include "gtest/gtest.h"
void CreateTestData(xgboost::bst_uint num_rows, int max_row_size,
thrust::host_vector<int> *row_ptr,
thrust::host_vector<xgboost::bst_uint> *rows) {
row_ptr->resize(num_rows + 1);
int sum = 0;
for (xgboost::bst_uint i = 0; i <= num_rows; i++) {
(*row_ptr)[i] = sum;
sum += rand() % max_row_size; // NOLINT
if (i < num_rows) {
for (int j = (*row_ptr)[i]; j < sum; j++) {
(*rows).push_back(i);
}
}
}
}
void TestLbs() {
srand(17);
dh::CubMemory temp_memory;
std::vector<int> test_rows = {4, 100, 1000};
std::vector<int> test_max_row_sizes = {4, 100, 1300};
for (auto num_rows : test_rows) {
for (auto max_row_size : test_max_row_sizes) {
thrust::host_vector<int> h_row_ptr;
thrust::host_vector<xgboost::bst_uint> h_rows;
CreateTestData(num_rows, max_row_size, &h_row_ptr, &h_rows);
thrust::device_vector<size_t> row_ptr = h_row_ptr;
thrust::device_vector<int> output_row(h_rows.size());
auto d_output_row = output_row.data();
dh::TransformLbs(0, &temp_memory, h_rows.size(), dh::Raw(row_ptr),
row_ptr.size() - 1, false,
[=] __device__(size_t idx, size_t ridx) {
d_output_row[idx] = ridx;
});
dh::safe_cuda(hipDeviceSynchronize());
ASSERT_TRUE(h_rows == output_row);
}
}
}
TEST(cub_lbs, Test) { TestLbs(); }
TEST(sumReduce, Test) {
thrust::device_vector<float> data(100, 1.0f);
dh::CubMemory temp;
auto sum = dh::SumReduction(temp, dh::Raw(data), data.size());
ASSERT_NEAR(sum, 100.0f, 1e-5);
}
| a6bed4f82503fe8841d09afc3f6a6ba3cc0ef9dc.cu |
/*!
* Copyright 2017 XGBoost contributors
*/
#include <thrust/device_vector.h>
#include <xgboost/base.h>
#include "../../../src/common/device_helpers.cuh"
#include "../../../src/common/timer.h"
#include "gtest/gtest.h"
void CreateTestData(xgboost::bst_uint num_rows, int max_row_size,
thrust::host_vector<int> *row_ptr,
thrust::host_vector<xgboost::bst_uint> *rows) {
row_ptr->resize(num_rows + 1);
int sum = 0;
for (xgboost::bst_uint i = 0; i <= num_rows; i++) {
(*row_ptr)[i] = sum;
sum += rand() % max_row_size; // NOLINT
if (i < num_rows) {
for (int j = (*row_ptr)[i]; j < sum; j++) {
(*rows).push_back(i);
}
}
}
}
void TestLbs() {
srand(17);
dh::CubMemory temp_memory;
std::vector<int> test_rows = {4, 100, 1000};
std::vector<int> test_max_row_sizes = {4, 100, 1300};
for (auto num_rows : test_rows) {
for (auto max_row_size : test_max_row_sizes) {
thrust::host_vector<int> h_row_ptr;
thrust::host_vector<xgboost::bst_uint> h_rows;
CreateTestData(num_rows, max_row_size, &h_row_ptr, &h_rows);
thrust::device_vector<size_t> row_ptr = h_row_ptr;
thrust::device_vector<int> output_row(h_rows.size());
auto d_output_row = output_row.data();
dh::TransformLbs(0, &temp_memory, h_rows.size(), dh::Raw(row_ptr),
row_ptr.size() - 1, false,
[=] __device__(size_t idx, size_t ridx) {
d_output_row[idx] = ridx;
});
dh::safe_cuda(cudaDeviceSynchronize());
ASSERT_TRUE(h_rows == output_row);
}
}
}
TEST(cub_lbs, Test) { TestLbs(); }
TEST(sumReduce, Test) {
thrust::device_vector<float> data(100, 1.0f);
dh::CubMemory temp;
auto sum = dh::SumReduction(temp, dh::Raw(data), data.size());
ASSERT_NEAR(sum, 100.0f, 1e-5);
}
|
b7429890e8a11955a05273ad1747b7c713ae7b08.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
//#include <cutil_inline.h>
#include <assert.h>
#include <stdio.h>
#include "mergeSort_common.h"
inline __device__ void Comparator(
uint& keyA,
uint& valA,
uint& keyB,
uint& valB,
uint arrowDir
){
uint t;
if( (keyA > keyB) == arrowDir ){
t = keyA; keyA = keyB; keyB = t;
t = valA; valA = valB; valB = t;
}
}
__global__ void bitonicSortSharedKernel(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint arrayLength,
uint sortDir
){
//Shared memory storage for one or more short vectors
__shared__ uint s_key[SHARED_SIZE_LIMIT];
__shared__ uint s_val[SHARED_SIZE_LIMIT];
//Offset to the beginning of subbatch and load data
d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = d_SrcKey[ 0];
s_val[threadIdx.x + 0] = d_SrcVal[ 0];
s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)];
s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)];
for(uint size = 2; size < arrayLength; size <<= 1){
//Bitonic merge
uint dir = (threadIdx.x & (size / 2)) != 0;
for(uint stride = size / 2; stride > 0; stride >>= 1){
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
dir
);
}
}
//ddd == sortDir for the last bitonic merge step
{
for(uint stride = arrayLength / 2; stride > 0; stride >>= 1){
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
sortDir
);
}
}
__syncthreads();
d_DstKey[ 0] = s_key[threadIdx.x + 0];
d_DstVal[ 0] = s_val[threadIdx.x + 0];
d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
//Helper function (also used by odd-even merge sort)
extern "C" uint factorRadix2(uint *log2L, uint L){
if(!L){
*log2L = 0;
return 0;
}else{
for(*log2L = 0; (L & 1) == 0; L >>= 1, *log2L++);
return L;
}
}
extern "C" void bitonicSortShared(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint batchSize,
uint arrayLength,
uint sortDir
){
//Nothing to sort
if(arrayLength < 2)
return;
//Only power-of-two array lengths are supported by this implementation
uint log2L;
uint factorizationRemainder = factorRadix2(&log2L, arrayLength);
assert( factorizationRemainder == 1 );
printf("batchSize: %d, arryLength: %d, SHARED_SIZE_LIMIT: %d \n", batchSize, arrayLength, SHARED_SIZE_LIMIT);
uint blockCount = batchSize * arrayLength / SHARED_SIZE_LIMIT;
uint threadCount = SHARED_SIZE_LIMIT / 2;
assert(arrayLength <= SHARED_SIZE_LIMIT);
assert( (batchSize * arrayLength) % SHARED_SIZE_LIMIT == 0 );
hipLaunchKernelGGL(( bitonicSortSharedKernel), dim3(blockCount), dim3(threadCount), 0, 0, d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, arrayLength, sortDir);
//cutilCheckMsg("bitonicSortSharedKernel<<<>>> failed!\n");
}
int main() {
uint h_SrcKey[SHARED_SIZE_LIMIT];
uint h_SrcVal[SHARED_SIZE_LIMIT];
klee_make_symbolic(h_SrcKey, sizeof(uint) * SHARED_SIZE_LIMIT, "srckey_input");
uint *d_DstKey, *d_DstVal, *d_SrcKey, *d_SrcVal;
//cutilSafeCall(hipMalloc((void**)&d_DstKey, sizeof(uint) * SHARED_SIZE_LIMIT));
//cutilSafeCall(hipMalloc((void**)&d_DstVal, sizeof(uint) * SHARED_SIZE_LIMIT));
//cutilSafeCall(hipMalloc((void**)&d_SrcKey, sizeof(uint) * SHARED_SIZE_LIMIT));
//cutilSafeCall(hipMalloc((void**)&d_SrcVal, sizeof(uint) * SHARED_SIZE_LIMIT));
hipMalloc((void**)&d_DstKey, sizeof(uint) * SHARED_SIZE_LIMIT);
hipMalloc((void**)&d_DstVal, sizeof(uint) * SHARED_SIZE_LIMIT);
hipMalloc((void**)&d_SrcKey, sizeof(uint) * SHARED_SIZE_LIMIT);
hipMalloc((void**)&d_SrcVal, sizeof(uint) * SHARED_SIZE_LIMIT);
//cutilSafeCall(hipMemcpy(d_SrcKey, h_SrcKey, sizeof(uint) * SHARED_SIZE_LIMIT, hipMemcpyHostToDevice));
//cutilSafeCall(hipMemcpy(d_SrcVal, h_SrcVal, sizeof(uint) * SHARED_SIZE_LIMIT, hipMemcpyHostToDevice));
hipMemcpy(d_SrcKey, h_SrcKey, sizeof(uint) * SHARED_SIZE_LIMIT, hipMemcpyHostToDevice);
hipMemcpy(d_SrcVal, h_SrcVal, sizeof(uint) * SHARED_SIZE_LIMIT, hipMemcpyHostToDevice);
bitonicSortShared(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, 1, SHARED_SIZE_LIMIT, 1);
}
| b7429890e8a11955a05273ad1747b7c713ae7b08.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
//#include <cutil_inline.h>
#include <assert.h>
#include <stdio.h>
#include "mergeSort_common.h"
inline __device__ void Comparator(
uint& keyA,
uint& valA,
uint& keyB,
uint& valB,
uint arrowDir
){
uint t;
if( (keyA > keyB) == arrowDir ){
t = keyA; keyA = keyB; keyB = t;
t = valA; valA = valB; valB = t;
}
}
__global__ void bitonicSortSharedKernel(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint arrayLength,
uint sortDir
){
//Shared memory storage for one or more short vectors
__shared__ uint s_key[SHARED_SIZE_LIMIT];
__shared__ uint s_val[SHARED_SIZE_LIMIT];
//Offset to the beginning of subbatch and load data
d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = d_SrcKey[ 0];
s_val[threadIdx.x + 0] = d_SrcVal[ 0];
s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)];
s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)];
for(uint size = 2; size < arrayLength; size <<= 1){
//Bitonic merge
uint dir = (threadIdx.x & (size / 2)) != 0;
for(uint stride = size / 2; stride > 0; stride >>= 1){
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
dir
);
}
}
//ddd == sortDir for the last bitonic merge step
{
for(uint stride = arrayLength / 2; stride > 0; stride >>= 1){
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
sortDir
);
}
}
__syncthreads();
d_DstKey[ 0] = s_key[threadIdx.x + 0];
d_DstVal[ 0] = s_val[threadIdx.x + 0];
d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
//Helper function (also used by odd-even merge sort)
extern "C" uint factorRadix2(uint *log2L, uint L){
if(!L){
*log2L = 0;
return 0;
}else{
for(*log2L = 0; (L & 1) == 0; L >>= 1, *log2L++);
return L;
}
}
extern "C" void bitonicSortShared(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint batchSize,
uint arrayLength,
uint sortDir
){
//Nothing to sort
if(arrayLength < 2)
return;
//Only power-of-two array lengths are supported by this implementation
uint log2L;
uint factorizationRemainder = factorRadix2(&log2L, arrayLength);
assert( factorizationRemainder == 1 );
printf("batchSize: %d, arryLength: %d, SHARED_SIZE_LIMIT: %d \n", batchSize, arrayLength, SHARED_SIZE_LIMIT);
uint blockCount = batchSize * arrayLength / SHARED_SIZE_LIMIT;
uint threadCount = SHARED_SIZE_LIMIT / 2;
assert(arrayLength <= SHARED_SIZE_LIMIT);
assert( (batchSize * arrayLength) % SHARED_SIZE_LIMIT == 0 );
bitonicSortSharedKernel<<<blockCount, threadCount>>>(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, arrayLength, sortDir);
//cutilCheckMsg("bitonicSortSharedKernel<<<>>> failed!\n");
}
int main() {
uint h_SrcKey[SHARED_SIZE_LIMIT];
uint h_SrcVal[SHARED_SIZE_LIMIT];
klee_make_symbolic(h_SrcKey, sizeof(uint) * SHARED_SIZE_LIMIT, "srckey_input");
uint *d_DstKey, *d_DstVal, *d_SrcKey, *d_SrcVal;
//cutilSafeCall(cudaMalloc((void**)&d_DstKey, sizeof(uint) * SHARED_SIZE_LIMIT));
//cutilSafeCall(cudaMalloc((void**)&d_DstVal, sizeof(uint) * SHARED_SIZE_LIMIT));
//cutilSafeCall(cudaMalloc((void**)&d_SrcKey, sizeof(uint) * SHARED_SIZE_LIMIT));
//cutilSafeCall(cudaMalloc((void**)&d_SrcVal, sizeof(uint) * SHARED_SIZE_LIMIT));
cudaMalloc((void**)&d_DstKey, sizeof(uint) * SHARED_SIZE_LIMIT);
cudaMalloc((void**)&d_DstVal, sizeof(uint) * SHARED_SIZE_LIMIT);
cudaMalloc((void**)&d_SrcKey, sizeof(uint) * SHARED_SIZE_LIMIT);
cudaMalloc((void**)&d_SrcVal, sizeof(uint) * SHARED_SIZE_LIMIT);
//cutilSafeCall(cudaMemcpy(d_SrcKey, h_SrcKey, sizeof(uint) * SHARED_SIZE_LIMIT, cudaMemcpyHostToDevice));
//cutilSafeCall(cudaMemcpy(d_SrcVal, h_SrcVal, sizeof(uint) * SHARED_SIZE_LIMIT, cudaMemcpyHostToDevice));
cudaMemcpy(d_SrcKey, h_SrcKey, sizeof(uint) * SHARED_SIZE_LIMIT, cudaMemcpyHostToDevice);
cudaMemcpy(d_SrcVal, h_SrcVal, sizeof(uint) * SHARED_SIZE_LIMIT, cudaMemcpyHostToDevice);
bitonicSortShared(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, 1, SHARED_SIZE_LIMIT, 1);
}
|
3ac6fcda3d557a2e7a2bf71b7eb61ac0a16c7dfa.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
// Change the code here:
// This should be changed to GPU kernel definition
void dot(int numElements, const float3* a, const float3* b, float* c)
{
for (int i = 0; i < numElements; i++)
{
c[i] = a[i].x*b[i].x + a[i].y*b[i].y + a[i].z*b[i].z;
}
}
int main()
{
int numElements = 10000;
float3* a = (float3*)calloc(numElements, sizeof(float3));
float3* b = (float3*)calloc(numElements, sizeof(float3));
float* c = (float*)calloc(numElements, sizeof(float));
srand(1214134);
for (int i = 0; i < numElements; i++)
{
a[i].x = float(rand())/float(RAND_MAX + 1.0);
a[i].y = float(rand())/float(RAND_MAX + 1.0);
a[i].z = float(rand())/float(RAND_MAX + 1.0);
b[i].x = float(rand())/float(RAND_MAX + 1.0);
b[i].y = float(rand())/float(RAND_MAX + 1.0);
b[i].z = float(rand())/float(RAND_MAX + 1.0);
}
// Insert your code here:
// 1. Create GPU device buffers
// 2. Copy input data from host to device (vectors a and b)
// 3. Change the CPU function call to the GPU kernel call
dot(numElements, a, b, c);
// 4. Copy the result back (vector c)
for (int i = 0; i < ::min(10, numElements); i++)
{
printf("%f*%f + %f*%f + %f*%f = %f\n", a[i].x, b[i].x, a[i].y, b[i].y, a[i].z, b[i].z, c[i]);
}
printf("...\n");
free(a);
free(b);
free(c);
// Free GPU memory here
return 0;
}
| 3ac6fcda3d557a2e7a2bf71b7eb61ac0a16c7dfa.cu | #include <stdio.h>
#include <stdlib.h>
#include <algorithm>
// Change the code here:
// This should be changed to GPU kernel definition
void dot(int numElements, const float3* a, const float3* b, float* c)
{
for (int i = 0; i < numElements; i++)
{
c[i] = a[i].x*b[i].x + a[i].y*b[i].y + a[i].z*b[i].z;
}
}
int main()
{
int numElements = 10000;
float3* a = (float3*)calloc(numElements, sizeof(float3));
float3* b = (float3*)calloc(numElements, sizeof(float3));
float* c = (float*)calloc(numElements, sizeof(float));
srand(1214134);
for (int i = 0; i < numElements; i++)
{
a[i].x = float(rand())/float(RAND_MAX + 1.0);
a[i].y = float(rand())/float(RAND_MAX + 1.0);
a[i].z = float(rand())/float(RAND_MAX + 1.0);
b[i].x = float(rand())/float(RAND_MAX + 1.0);
b[i].y = float(rand())/float(RAND_MAX + 1.0);
b[i].z = float(rand())/float(RAND_MAX + 1.0);
}
// Insert your code here:
// 1. Create GPU device buffers
// 2. Copy input data from host to device (vectors a and b)
// 3. Change the CPU function call to the GPU kernel call
dot(numElements, a, b, c);
// 4. Copy the result back (vector c)
for (int i = 0; i < std::min(10, numElements); i++)
{
printf("%f*%f + %f*%f + %f*%f = %f\n", a[i].x, b[i].x, a[i].y, b[i].y, a[i].z, b[i].z, c[i]);
}
printf("...\n");
free(a);
free(b);
free(c);
// Free GPU memory here
return 0;
}
|
d4c4f4681acc86277594f18edd4f9c79d5928106.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/kthvalue_kernel.h"
#include "paddle/fluid/operators/top_k_function_cuda.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/eigen/eigen_function.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace phi {
inline int getBlockSize(int col) {
if (col > 512)
return 1024;
else if (col > 256 && col <= 512)
return 512;
else if (col > 128 && col <= 256)
return 256;
else if (col > 64 && col <= 128)
return 128;
else
return 64;
}
template <typename T>
bool SortKthvalue(const phi::GPUContext& dev_ctx,
const DenseTensor* input_tensor,
const int64_t num_cols,
const int64_t num_rows,
const int k,
DenseTensor* out_tensor,
DenseTensor* indices_tensor) {
auto cu_stream = dev_ctx.stream();
DenseTensor input_indices;
const std::vector<int64_t> dims = {num_rows, num_cols};
auto dim = phi::make_ddim(dims);
input_indices.Resize(dim);
dev_ctx.template Alloc<int64_t>(&input_indices);
size_t temp_storage_bytes = -1;
int block_size = getBlockSize(num_cols);
unsigned int maxGridDimX = dev_ctx.GetCUDAMaxGridDimSize()[0];
unsigned int grid_size = num_rows < maxGridDimX
? static_cast<unsigned int>(num_rows)
: maxGridDimX;
hipLaunchKernelGGL(( paddle::operators::InitIndex<int64_t>)
, dim3(grid_size), dim3(block_size), 0, cu_stream,
input_indices.data<int64_t>(), num_rows, num_cols);
hipcub::CountingInputIterator<int64_t> counting_iter(0);
hipcub::TransformInputIterator<int64_t,
paddle::operators::SegmentOffsetIter,
hipcub::CountingInputIterator<int64_t>>
segment_offsets_t(counting_iter,
paddle::operators::SegmentOffsetIter(num_cols));
T* sorted_values_ptr;
int64_t* sorted_indices_ptr;
DenseTensor temp_values, temp_indices;
const T* input = input_tensor->data<T>();
T* values = out_tensor->data<T>();
int64_t* indices = indices_tensor->mutable_data<int64_t>(dev_ctx.GetPlace());
temp_values.Resize(dim);
temp_indices.Resize(dim);
sorted_values_ptr = dev_ctx.template Alloc<T>(&temp_values);
sorted_indices_ptr = dev_ctx.template Alloc<int64_t>(&temp_indices);
auto err =
hipcub::DeviceSegmentedRadixSort::SortPairs(nullptr,
temp_storage_bytes,
input,
sorted_values_ptr,
input_indices.data<int64_t>(),
sorted_indices_ptr,
num_cols * num_rows,
num_rows,
segment_offsets_t,
segment_offsets_t + 1,
0,
sizeof(T) * 8,
cu_stream);
#ifdef __HIPCC__
if (err != hipSuccess) {
LOG(ERROR) << "KthvalueOP failed as could not launch "
"hipcub::DeviceSegmentedRadixSort::SortPairs, status: "
<< hipGetErrorString(err);
return false;
}
#else
if (err != hipSuccess) {
LOG(ERROR) << "KthvalueOP failed as could not launch "
"hipcub::DeviceSegmentedRadixSort::SortPairs, status: "
<< hipGetErrorString(err);
return false;
}
#endif
DenseTensor temp_storage;
temp_storage.Resize({static_cast<int>(temp_storage_bytes / sizeof(uint8_t))});
uint8_t* temp_storage_data = dev_ctx.template Alloc<uint8_t>(&temp_storage);
err = hipcub::DeviceSegmentedRadixSort::SortPairs(temp_storage_data,
temp_storage_bytes,
input,
sorted_values_ptr,
input_indices.data<int64_t>(),
sorted_indices_ptr,
num_cols * num_rows,
num_rows,
segment_offsets_t,
segment_offsets_t + 1,
0,
sizeof(T) * 8,
cu_stream);
#ifdef __HIPCC__
if (err != hipSuccess) {
LOG(ERROR) << "KthvalueOP failed as could not launch "
"hipcub::DeviceSegmentedRadixSort::SortPairs, "
<< temp_storage_bytes << ", status: " << hipGetErrorString(err);
return false;
}
#else
if (err != hipSuccess) {
LOG(ERROR) << "KthvalueOP failed as could not launch "
"hipcub::DeviceSegmentedRadixSort::SortPairs, "
<< temp_storage_bytes << ", status: " << hipGetErrorString(err);
return false;
}
#endif
auto& dev = *dev_ctx.eigen_device();
const Eigen::DSizes<Eigen::DenseIndex, 2> slice_indices{0, k - 1};
const Eigen::DSizes<Eigen::DenseIndex, 2> slice_sizes{num_rows, 1};
auto e_indices = EigenMatrix<int64_t>::From(*indices_tensor, dim);
auto e_tmp_indices =
EigenMatrix<int64_t>::From(static_cast<const DenseTensor>(temp_indices));
std::vector<int> odims = {static_cast<int>(num_rows), static_cast<int>(1)};
dim = phi::make_ddim(odims);
auto e_values = EigenMatrix<T>::From(*out_tensor, dim);
auto e_tmp_values =
EigenMatrix<T>::From(static_cast<const DenseTensor>(temp_values));
funcs::EigenSlice<std::decay_t<decltype(dev)>, int64_t, 2>::Eval(
dev, e_indices, e_tmp_indices, slice_indices, slice_sizes);
funcs::EigenSlice<std::decay_t<decltype(dev)>, T, 2>::Eval(
dev, e_values, e_tmp_values, slice_indices, slice_sizes);
return true;
}
template <typename T, typename Context>
void KthvalueKernel(const Context& dev_ctx,
const DenseTensor& x,
int k,
int axis,
bool keepdim,
DenseTensor* output,
DenseTensor* indices) {
const auto& in_dims = x.dims();
if (axis < 0) axis += in_dims.size();
auto out_dims = output->dims();
const T* input_data = x.data<T>();
T* output_data = dev_ctx.template Alloc<T>(output);
int64_t* indices_data = dev_ctx.template Alloc<int64_t>(indices);
if (axis == in_dims.size() - 1) {
const int64_t& input_height =
phi::product(phi::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t& input_width = in_dims[in_dims.size() - 1];
PADDLE_ENFORCE_EQ(
SortKthvalue<T>(
dev_ctx, &x, input_width, input_height, k, output, indices),
true,
phi::errors::External("KthvalueOP: Error when use cub sorting"));
return;
} else {
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.emplace_back(i);
}
trans.emplace_back(in_dims.size() - 1);
for (int i = axis + 1; i < in_dims.size() - 1; i++) {
trans.emplace_back(i);
}
trans.emplace_back(axis);
if (!keepdim) {
std::vector<int> tmp_out_shape;
for (int i = 0; i < axis; i++) {
tmp_out_shape.emplace_back(in_dims[i]);
}
tmp_out_shape.emplace_back(1);
for (int i = axis + 1; i < in_dims.size(); i++) {
tmp_out_shape.emplace_back(in_dims[i]);
}
DDim tmp_out_dims = phi::make_ddim(tmp_out_shape);
output->Resize(tmp_out_dims);
indices->Resize(tmp_out_dims);
}
DDim trans_dims(in_dims);
DDim trans_out_dims(in_dims);
for (int i = 0; i < trans.size(); i++) {
trans_dims[i] = in_dims[trans[i]];
trans_out_dims[i] = in_dims[trans[i]];
}
trans_out_dims[in_dims.size() - 1] = 1;
DenseTensor trans_input;
trans_input.mutable_data<T>(trans_dims, dev_ctx.GetPlace());
int ndims = trans.size();
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, x, &trans_input, trans);
DenseTensor trans_ind, trans_out;
trans_ind.mutable_data<int64_t>(trans_out_dims, dev_ctx.GetPlace());
trans_out.mutable_data<T>(trans_out_dims, dev_ctx.GetPlace());
const int64_t input_height =
phi::product(phi::slice_ddim(trans_dims, 0, trans_dims.size() - 1));
const int64_t input_width = trans_dims[trans_dims.size() - 1];
PADDLE_ENFORCE_EQ(
SortKthvalue<T>(dev_ctx,
&trans_input,
input_width,
input_height,
k,
&trans_out,
&trans_ind),
true,
phi::errors::External("KthvalueOP: Error when use cub sorting"));
funcs::TransCompute<phi::GPUContext, int64_t>(
ndims, dev_ctx, trans_ind, indices, trans);
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, trans_out, output, trans);
if (!keepdim) {
output->Resize(out_dims);
indices->Resize(out_dims);
}
}
}
} // namespace phi
PD_REGISTER_KERNEL(kthvalue,
GPU,
ALL_LAYOUT,
phi::KthvalueKernel,
float,
double,
int,
int64_t) {}
| d4c4f4681acc86277594f18edd4f9c79d5928106.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/kthvalue_kernel.h"
#include "paddle/fluid/operators/top_k_function_cuda.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/eigen/eigen_function.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace phi {
inline int getBlockSize(int col) {
if (col > 512)
return 1024;
else if (col > 256 && col <= 512)
return 512;
else if (col > 128 && col <= 256)
return 256;
else if (col > 64 && col <= 128)
return 128;
else
return 64;
}
template <typename T>
bool SortKthvalue(const phi::GPUContext& dev_ctx,
const DenseTensor* input_tensor,
const int64_t num_cols,
const int64_t num_rows,
const int k,
DenseTensor* out_tensor,
DenseTensor* indices_tensor) {
auto cu_stream = dev_ctx.stream();
DenseTensor input_indices;
const std::vector<int64_t> dims = {num_rows, num_cols};
auto dim = phi::make_ddim(dims);
input_indices.Resize(dim);
dev_ctx.template Alloc<int64_t>(&input_indices);
size_t temp_storage_bytes = -1;
int block_size = getBlockSize(num_cols);
unsigned int maxGridDimX = dev_ctx.GetCUDAMaxGridDimSize()[0];
unsigned int grid_size = num_rows < maxGridDimX
? static_cast<unsigned int>(num_rows)
: maxGridDimX;
paddle::operators::InitIndex<int64_t>
<<<grid_size, block_size, 0, cu_stream>>>(
input_indices.data<int64_t>(), num_rows, num_cols);
cub::CountingInputIterator<int64_t> counting_iter(0);
cub::TransformInputIterator<int64_t,
paddle::operators::SegmentOffsetIter,
cub::CountingInputIterator<int64_t>>
segment_offsets_t(counting_iter,
paddle::operators::SegmentOffsetIter(num_cols));
T* sorted_values_ptr;
int64_t* sorted_indices_ptr;
DenseTensor temp_values, temp_indices;
const T* input = input_tensor->data<T>();
T* values = out_tensor->data<T>();
int64_t* indices = indices_tensor->mutable_data<int64_t>(dev_ctx.GetPlace());
temp_values.Resize(dim);
temp_indices.Resize(dim);
sorted_values_ptr = dev_ctx.template Alloc<T>(&temp_values);
sorted_indices_ptr = dev_ctx.template Alloc<int64_t>(&temp_indices);
auto err =
cub::DeviceSegmentedRadixSort::SortPairs(nullptr,
temp_storage_bytes,
input,
sorted_values_ptr,
input_indices.data<int64_t>(),
sorted_indices_ptr,
num_cols * num_rows,
num_rows,
segment_offsets_t,
segment_offsets_t + 1,
0,
sizeof(T) * 8,
cu_stream);
#ifdef __HIPCC__
if (err != hipSuccess) {
LOG(ERROR) << "KthvalueOP failed as could not launch "
"hipcub::DeviceSegmentedRadixSort::SortPairs, status: "
<< hipGetErrorString(err);
return false;
}
#else
if (err != cudaSuccess) {
LOG(ERROR) << "KthvalueOP failed as could not launch "
"cub::DeviceSegmentedRadixSort::SortPairs, status: "
<< cudaGetErrorString(err);
return false;
}
#endif
DenseTensor temp_storage;
temp_storage.Resize({static_cast<int>(temp_storage_bytes / sizeof(uint8_t))});
uint8_t* temp_storage_data = dev_ctx.template Alloc<uint8_t>(&temp_storage);
err = cub::DeviceSegmentedRadixSort::SortPairs(temp_storage_data,
temp_storage_bytes,
input,
sorted_values_ptr,
input_indices.data<int64_t>(),
sorted_indices_ptr,
num_cols * num_rows,
num_rows,
segment_offsets_t,
segment_offsets_t + 1,
0,
sizeof(T) * 8,
cu_stream);
#ifdef __HIPCC__
if (err != hipSuccess) {
LOG(ERROR) << "KthvalueOP failed as could not launch "
"hipcub::DeviceSegmentedRadixSort::SortPairs, "
<< temp_storage_bytes << ", status: " << hipGetErrorString(err);
return false;
}
#else
if (err != cudaSuccess) {
LOG(ERROR) << "KthvalueOP failed as could not launch "
"cub::DeviceSegmentedRadixSort::SortPairs, "
<< temp_storage_bytes << ", status: " << cudaGetErrorString(err);
return false;
}
#endif
auto& dev = *dev_ctx.eigen_device();
const Eigen::DSizes<Eigen::DenseIndex, 2> slice_indices{0, k - 1};
const Eigen::DSizes<Eigen::DenseIndex, 2> slice_sizes{num_rows, 1};
auto e_indices = EigenMatrix<int64_t>::From(*indices_tensor, dim);
auto e_tmp_indices =
EigenMatrix<int64_t>::From(static_cast<const DenseTensor>(temp_indices));
std::vector<int> odims = {static_cast<int>(num_rows), static_cast<int>(1)};
dim = phi::make_ddim(odims);
auto e_values = EigenMatrix<T>::From(*out_tensor, dim);
auto e_tmp_values =
EigenMatrix<T>::From(static_cast<const DenseTensor>(temp_values));
funcs::EigenSlice<std::decay_t<decltype(dev)>, int64_t, 2>::Eval(
dev, e_indices, e_tmp_indices, slice_indices, slice_sizes);
funcs::EigenSlice<std::decay_t<decltype(dev)>, T, 2>::Eval(
dev, e_values, e_tmp_values, slice_indices, slice_sizes);
return true;
}
template <typename T, typename Context>
void KthvalueKernel(const Context& dev_ctx,
const DenseTensor& x,
int k,
int axis,
bool keepdim,
DenseTensor* output,
DenseTensor* indices) {
const auto& in_dims = x.dims();
if (axis < 0) axis += in_dims.size();
auto out_dims = output->dims();
const T* input_data = x.data<T>();
T* output_data = dev_ctx.template Alloc<T>(output);
int64_t* indices_data = dev_ctx.template Alloc<int64_t>(indices);
if (axis == in_dims.size() - 1) {
const int64_t& input_height =
phi::product(phi::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t& input_width = in_dims[in_dims.size() - 1];
PADDLE_ENFORCE_EQ(
SortKthvalue<T>(
dev_ctx, &x, input_width, input_height, k, output, indices),
true,
phi::errors::External("KthvalueOP: Error when use cub sorting"));
return;
} else {
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.emplace_back(i);
}
trans.emplace_back(in_dims.size() - 1);
for (int i = axis + 1; i < in_dims.size() - 1; i++) {
trans.emplace_back(i);
}
trans.emplace_back(axis);
if (!keepdim) {
std::vector<int> tmp_out_shape;
for (int i = 0; i < axis; i++) {
tmp_out_shape.emplace_back(in_dims[i]);
}
tmp_out_shape.emplace_back(1);
for (int i = axis + 1; i < in_dims.size(); i++) {
tmp_out_shape.emplace_back(in_dims[i]);
}
DDim tmp_out_dims = phi::make_ddim(tmp_out_shape);
output->Resize(tmp_out_dims);
indices->Resize(tmp_out_dims);
}
DDim trans_dims(in_dims);
DDim trans_out_dims(in_dims);
for (int i = 0; i < trans.size(); i++) {
trans_dims[i] = in_dims[trans[i]];
trans_out_dims[i] = in_dims[trans[i]];
}
trans_out_dims[in_dims.size() - 1] = 1;
DenseTensor trans_input;
trans_input.mutable_data<T>(trans_dims, dev_ctx.GetPlace());
int ndims = trans.size();
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, x, &trans_input, trans);
DenseTensor trans_ind, trans_out;
trans_ind.mutable_data<int64_t>(trans_out_dims, dev_ctx.GetPlace());
trans_out.mutable_data<T>(trans_out_dims, dev_ctx.GetPlace());
const int64_t input_height =
phi::product(phi::slice_ddim(trans_dims, 0, trans_dims.size() - 1));
const int64_t input_width = trans_dims[trans_dims.size() - 1];
PADDLE_ENFORCE_EQ(
SortKthvalue<T>(dev_ctx,
&trans_input,
input_width,
input_height,
k,
&trans_out,
&trans_ind),
true,
phi::errors::External("KthvalueOP: Error when use cub sorting"));
funcs::TransCompute<phi::GPUContext, int64_t>(
ndims, dev_ctx, trans_ind, indices, trans);
funcs::TransCompute<phi::GPUContext, T>(
ndims, dev_ctx, trans_out, output, trans);
if (!keepdim) {
output->Resize(out_dims);
indices->Resize(out_dims);
}
}
}
} // namespace phi
PD_REGISTER_KERNEL(kthvalue,
GPU,
ALL_LAYOUT,
phi::KthvalueKernel,
float,
double,
int,
int64_t) {}
|
ae21692fddb0e17279b738b6da5a97d4e4b83444.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void matrixSort(int *A, int *B){
int id = blockIdx.x*blockDim.x + threadIdx.x,m = gridDim.x,pos=0;
for(int i=0;i<m;i++)
if(A[id]>A[i] || (A[i]==A[id] && i<id))
pos++;
B[pos] = A[id];
}
int main(){
int A[100][100],B[100],m,n,i,j,size;
printf("Enter value for n,m: ");
scanf("%d %d",&n,&m);
printf("Enter the values for matrix :\n");
for(i=0;i<n;i++)
for(j=0;j<m;j++)
scanf("%d",&A[i][j]);
int *da,*db;
size = sizeof(int)*n*m;
hipMalloc((void **)&da,size);
hipMalloc((void**)&db,size/n);
hipMemcpy(da,A,size,hipMemcpyHostToDevice);
printf("Result:\n");
for(i=0;i<n;i++){
hipLaunchKernelGGL(( matrixSort), dim3(n),dim3(m), 0, 0, da,db);
hipMemcpy(B,db,size,hipMemcpyDeviceToHost);
for(j=0;j<m;j++)
printf("%d ",B[i]);
printf("\n");
}
hipFree(da);
hipFree(db);
return 0;
}
| ae21692fddb0e17279b738b6da5a97d4e4b83444.cu | #include <stdio.h>
__global__ void matrixSort(int *A, int *B){
int id = blockIdx.x*blockDim.x + threadIdx.x,m = gridDim.x,pos=0;
for(int i=0;i<m;i++)
if(A[id]>A[i] || (A[i]==A[id] && i<id))
pos++;
B[pos] = A[id];
}
int main(){
int A[100][100],B[100],m,n,i,j,size;
printf("Enter value for n,m: ");
scanf("%d %d",&n,&m);
printf("Enter the values for matrix :\n");
for(i=0;i<n;i++)
for(j=0;j<m;j++)
scanf("%d",&A[i][j]);
int *da,*db;
size = sizeof(int)*n*m;
cudaMalloc((void **)&da,size);
cudaMalloc((void**)&db,size/n);
cudaMemcpy(da,A,size,cudaMemcpyHostToDevice);
printf("Result:\n");
for(i=0;i<n;i++){
matrixSort<<<n,m>>>(da,db);
cudaMemcpy(B,db,size,cudaMemcpyDeviceToHost);
for(j=0;j<m;j++)
printf("%d ",B[i]);
printf("\n");
}
cudaFree(da);
cudaFree(db);
return 0;
}
|
f9d10553995b2edc9cef8b2e1dd298376a3c6ad0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include <layers/fully_connected_layer.hpp>
#include <linalg/matrix_vector_op.cuh>
#include <linalg/reduce.cuh>
#include <utils.cuh>
#include <utils.hpp>
#include <vector>
namespace HugeCTR {
FullyConnectedLayer::FullyConnectedLayer(const std::shared_ptr<BufferBlock2<float>>& weight_buff,
const std::shared_ptr<BufferBlock2<float>>& wgrad_buff,
const Tensor2<float>& in_tensor,
const Tensor2<float>& out_tensor,
const std::shared_ptr<GPUResource>& gpu_resource,
bool use_mixed_precision, bool enable_tf32_compute,
std::vector<Initializer_t> initializer_types)
: Layer(gpu_resource, initializer_types),
use_mixed_precision_(use_mixed_precision),
enable_tf32_compute_(enable_tf32_compute) {
try {
// check the in_tensor and out_tensor
const auto& in_tensor_dim = in_tensor.get_dimensions();
const auto& out_tensor_dim = out_tensor.get_dimensions();
// 1. two dim?
if (in_tensor_dim.size() != 2 || out_tensor_dim.size() != 2) {
CK_THROW_(Error_t::WrongInput, "input or output tensor doesn't has two dimensions");
}
// 2. dim match?
size_t m = in_tensor_dim[0];
size_t n = out_tensor_dim[1];
size_t k = in_tensor_dim[1];
size_t m_ck = out_tensor_dim[0];
if (m != m_ck) {
CK_THROW_(Error_t::WrongInput, "size of input / output tensor doesn't match");
}
std::vector<size_t> weight_dim = {k, n};
std::vector<size_t> bias_dim = {1, n};
{
Tensor2<float> tensor;
weight_buff->reserve(weight_dim, &tensor);
weights_.push_back(tensor);
}
{
Tensor2<float> tensor;
weight_buff->reserve(bias_dim, &tensor);
weights_.push_back(tensor);
}
{
Tensor2<float> tensor;
wgrad_buff->reserve(weight_dim, &tensor);
wgrad_.push_back(tensor);
}
{
Tensor2<float> tensor;
wgrad_buff->reserve(bias_dim, &tensor);
wgrad_.push_back(tensor);
}
in_tensors_.push_back(in_tensor);
out_tensors_.push_back(out_tensor);
// Where should we create this cuBLAS handle?
} catch (const std::runtime_error& rt_err) {
std::cerr << rt_err.what() << std::endl;
throw;
}
}
void __global__ add_bias_kernel_row(float* data, const float* bias, const int m, const int n) {
int offset = blockIdx.x * n;
for (int tid = threadIdx.x; tid < n; tid += blockDim.x) {
data[offset + tid] += bias[tid];
}
}
void __global__ add_bias_kernel_col(float* data, const float* bias, const int m, const int n) {
int offset = blockIdx.x * m;
float b = bias[blockIdx.x];
for (int tid = threadIdx.x; tid < m; tid += blockDim.x) {
data[offset + tid] += b;
}
}
void add_bias(float* data, const float* bias, const int m, const int n, bool row_major,
hipStream_t stream) {
if (row_major) {
dim3 grid(m);
dim3 block(min(n, 1024));
hipLaunchKernelGGL(( add_bias_kernel_row), dim3(grid), dim3(block), 0, stream, data, bias, m, n);
} else {
dim3 grid(n);
dim3 block(min(m, 1024));
hipLaunchKernelGGL(( add_bias_kernel_col), dim3(grid), dim3(block), 0, stream, data, bias, m, n);
}
#ifndef NDEBUG
hipDeviceSynchronize();
CK_CUDA_THROW_(hipGetLastError());
#endif
}
void FullyConnectedLayer::fprop(bool is_train) {
CudaDeviceContext context(get_device_id());
Tensor2<float>& in_tensor = get_in_tensors(is_train)[0];
Tensor2<float>& out_tensor = out_tensors_[0];
float* weight = weights_[0].get_ptr();
float* bias = weights_[1].get_ptr();
float* in = in_tensor.get_ptr();
float* out = out_tensor.get_ptr();
const auto& in_tensor_dim = in_tensor.get_dimensions();
const auto& out_tensor_dim = out_tensor.get_dimensions();
int m, n, k;
m = in_tensor_dim[0];
n = out_tensor_dim[1];
k = in_tensor_dim[1];
float alpha = 1.0f, beta = 0.0f;
hipblasComputeType_t compute_type =
enable_tf32_compute_ ? CUBLAS_COMPUTE_32F_FAST_TF32 : CUBLAS_COMPUTE_32F;
CK_CUBLAS_THROW_(hipblasGemmEx(get_gpu().get_cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k,
&alpha, weight, HIP_R_32F, n, in, HIP_R_32F, k, &beta, out,
HIP_R_32F, n, compute_type, falgo_));
add_bias(out, bias, m, n, true, get_gpu().get_stream());
//PROFILE_RECORD("TopMLP.fprop.stop", get_gpu().get_stream());
}
void FullyConnectedLayer::bprop() {
CudaDeviceContext context(get_device_id());
Tensor2<float>& in_tensor = get_in_tensors(true)[0];
Tensor2<float>& out_tensor = out_tensors_[0];
float* wgrad = wgrad_[0].get_ptr();
float* bias_grad = wgrad_[1].get_ptr();
float* weight = weights_[0].get_ptr();
float* in = in_tensor.get_ptr();
float* out = out_tensor.get_ptr();
const auto& in_tensor_dim = in_tensor.get_dimensions();
const auto& out_tensor_dim = out_tensor.get_dimensions();
int m, n, k;
m = in_tensor_dim[0];
n = out_tensor_dim[1];
k = in_tensor_dim[1];
float alpha = 1.0f, beta_w = 1.0f, beta_x = 0.0f;
// PROFILE_RECORD("TopMLP.bprop.start", get_gpu().get_stream());
hipblasComputeType_t compute_type =
enable_tf32_compute_ ? CUBLAS_COMPUTE_32F_FAST_TF32 : CUBLAS_COMPUTE_32F;
// gradient respect to W
CK_CUBLAS_THROW_(hipblasGemmEx(get_gpu().get_cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_T, n, k, m,
&alpha, out, HIP_R_32F, n, in, HIP_R_32F, k, &beta_w, wgrad,
HIP_R_32F, n, compute_type, balgo_W_));
// gradient respect to Xn
CK_CUBLAS_THROW_(hipblasGemmEx(get_gpu().get_cublas_handle(), HIPBLAS_OP_T, HIPBLAS_OP_N, k, m, n,
&alpha, weight, HIP_R_32F, n, out, HIP_R_32F, n, &beta_x, in,
HIP_R_32F, k, compute_type, balgo_Xn_));
MLCommon::LinAlg::reduce(bias_grad, out, m, n, float(0), false, true, get_gpu().get_stream(),
true);
}
void FullyConnectedLayer::search_algorithm() {
// Set to the CUDA device where this layer assigned to
CudaDeviceContext context(get_device_id());
const int repeat_num = 5;
// Device Tensors to be used
Tensor2<float>& in_tensor = get_in_tensors(true)[0];
Tensor2<float>& out_tensor = out_tensors_[0];
float* weight = weights_[0].get_ptr();
float* in = in_tensor.get_ptr();
float* out = out_tensor.get_ptr();
float* wgrad = wgrad_[0].get_ptr();
// Tensor dim
const auto& in_tensor_dim = in_tensor.get_dimensions();
const auto& out_tensor_dim = out_tensor.get_dimensions();
int m, n, k;
m = in_tensor_dim[0];
n = out_tensor_dim[1];
k = in_tensor_dim[1];
// Record time for each algorithm
float shortestTime = 100000000.0;
float time;
hipEvent_t start, stop;
CK_CUDA_THROW_(hipEventCreate(&start));
CK_CUDA_THROW_(hipEventCreate(&stop));
// cublas ret status
hipblasStatus_t status;
// Start, end for search
int startAlgo, endAlgo;
if (use_mixed_precision_) {
startAlgo = (int)CUBLAS_GEMM_DEFAULT_TENSOR_OP;
endAlgo = (int)CUBLAS_GEMM_ALGO15_TENSOR_OP;
} else {
startAlgo = (int)HIPBLAS_GEMM_DEFAULT;
endAlgo = (int)CUBLAS_GEMM_ALGO23;
}
hipblasComputeType_t compute_type =
enable_tf32_compute_ ? CUBLAS_COMPUTE_32F_FAST_TF32 : CUBLAS_COMPUTE_32F;
// Search all the algorithm for fprop
for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) {
float alpha = 1.0f, beta = 0.0f;
// Record start event
CK_CUDA_THROW_(hipEventRecord(start, get_gpu().get_stream()));
for (int i = 0; i < repeat_num; ++i) {
status = hipblasGemmEx(get_gpu().get_cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k,
&alpha, weight, HIP_R_32F, n, in, HIP_R_32F, k, &beta, out,
HIP_R_32F, n, compute_type, static_cast<hipblasGemmAlgo_t>(testAlgo));
}
CK_CUDA_THROW_(hipEventRecord(stop, get_gpu().get_stream()));
CK_CUDA_THROW_(hipEventSynchronize(stop));
CK_CUDA_THROW_(hipEventElapsedTime(&time, start, stop));
// Avg Time(ms) for this alorithm for fprop GEMM
time = time / repeat_num;
// Skip if the algorithm is supported for fprop configuration
if (status != HIPBLAS_STATUS_SUCCESS) {
// printf("The algorithms %d is not supported for fprop, skipped.\n", testAlgo);
continue;
}
// Record the optimal time and algorithm
if (time < shortestTime) {
shortestTime = time;
falgo_ = static_cast<hipblasGemmAlgo_t>(testAlgo);
}
}
// Reset shortestTime
shortestTime = 100000000.0;
// Search all the algorithm for bprop_W
for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) {
float alpha = 1.0f, beta_w = 1.0f;
// Record start event
CK_CUDA_THROW_(hipEventRecord(start, get_gpu().get_stream()));
for (int i = 0; i < repeat_num; ++i) {
status = hipblasGemmEx(get_gpu().get_cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_T, n, k, m,
&alpha, out, HIP_R_32F, n, in, HIP_R_32F, k, &beta_w, wgrad,
HIP_R_32F, n, compute_type, static_cast<hipblasGemmAlgo_t>(testAlgo));
}
CK_CUDA_THROW_(hipEventRecord(stop, get_gpu().get_stream()));
CK_CUDA_THROW_(hipEventSynchronize(stop));
CK_CUDA_THROW_(hipEventElapsedTime(&time, start, stop));
// Avg Time(ms) for this alorithm for fprop GEMM
time = time / repeat_num;
// Skip if the algorithm is supported for fprop configuration
if (status != HIPBLAS_STATUS_SUCCESS) {
// printf("The algorithms %d is not supported for bprop_W, skipped.\n", testAlgo);
continue;
}
// Record the optimal time and algorithm
if (time < shortestTime) {
shortestTime = time;
balgo_W_ = static_cast<hipblasGemmAlgo_t>(testAlgo);
}
}
// Reset shortestTime
shortestTime = 100000000.0;
// Search all the algorithm for bprop_Xn
for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) {
float alpha = 1.0f, beta_x = 0.0f;
// Record start event
CK_CUDA_THROW_(hipEventRecord(start, get_gpu().get_stream()));
for (int i = 0; i < repeat_num; ++i) {
status = hipblasGemmEx(get_gpu().get_cublas_handle(), HIPBLAS_OP_T, HIPBLAS_OP_N, k, m, n,
&alpha, weight, HIP_R_32F, n, out, HIP_R_32F, n, &beta_x, in,
HIP_R_32F, k, compute_type, static_cast<hipblasGemmAlgo_t>(testAlgo));
}
CK_CUDA_THROW_(hipEventRecord(stop, get_gpu().get_stream()));
CK_CUDA_THROW_(hipEventSynchronize(stop));
CK_CUDA_THROW_(hipEventElapsedTime(&time, start, stop));
// Avg Time(ms) for this alorithm for fprop GEMM
time = time / repeat_num;
// Skip if the algorithm is supported for fprop configuration
if (status != HIPBLAS_STATUS_SUCCESS) {
// printf("The algorithms %d is not supported for bprop_Xn, skipped.\n", testAlgo);
continue;
}
// Record the optimal time and algorithm
if (time < shortestTime) {
shortestTime = time;
balgo_Xn_ = static_cast<hipblasGemmAlgo_t>(testAlgo);
}
}
// Print selection information
// printf("The algorithm selection for fprop, bprop_W and bprop_Xn are: %d, %d and %d.\n",
// (int)falgo_, (int)balgo_W_, (int)balgo_Xn_);
// Output msg
// MESSAGE_("The fully-connected layer has finished choosing the algorithm for cublas Gemm.");
// Clean-up
CK_CUDA_THROW_(hipEventDestroy(start));
CK_CUDA_THROW_(hipEventDestroy(stop));
}
std::unique_ptr<DataSimulator> FullyConnectedLayer::get_uniform_initializer(const int index) {
const Tensor2<float>& in_tensor = get_in_tensors(true)[0];
const Tensor2<float>& out_tensor = out_tensors_[0];
float bottom_dim = in_tensor.get_dimensions()[1];
float top_dim = out_tensor.get_dimensions()[1];
float limit = 1.0f / ((0 == index ? bottom_dim : 0) + top_dim);
return std::make_unique<UniformDataSimulator>(-1 * limit, limit);
}
std::unique_ptr<DataSimulator> FullyConnectedLayer::get_xavier_uniform_initializer(
const int index) {
const Tensor2<float>& in_tensor = get_in_tensors(true)[0];
const Tensor2<float>& out_tensor = out_tensors_[0];
float bottom_dim = in_tensor.get_dimensions()[1];
float top_dim = out_tensor.get_dimensions()[1];
return std::make_unique<VarianceScalingSimulator>(1.f, data_simu::Mode_t::Fan_avg,
data_simu::Distribution_t::Uniform,
0 == index ? bottom_dim : 0, top_dim);
}
std::unique_ptr<DataSimulator> FullyConnectedLayer::get_xavier_norm_initializer(const int index) {
const Tensor2<float>& in_tensor = get_in_tensors(true)[0];
const Tensor2<float>& out_tensor = out_tensors_[0];
float bottom_dim = in_tensor.get_dimensions()[1];
float top_dim = out_tensor.get_dimensions()[1];
return std::make_unique<VarianceScalingSimulator>(1.f, data_simu::Mode_t::Fan_avg,
data_simu::Distribution_t::Norm,
0 == index ? bottom_dim : 0, top_dim);
}
std::unique_ptr<DataSimulator> FullyConnectedLayer::get_default_initializer(const int index) {
const Tensor2<float>& in_tensor = get_in_tensors(true)[0];
const Tensor2<float>& out_tensor = out_tensors_[0];
float bottom_dim = in_tensor.get_dimensions()[1];
float top_dim = out_tensor.get_dimensions()[1];
std::unique_ptr<DataSimulator> simu(nullptr);
if (0 == index) {
simu.reset(new VarianceScalingSimulator(1.f, data_simu::Mode_t::Fan_avg,
data_simu::Distribution_t::Norm, bottom_dim, top_dim));
} else if (1 == index) {
float stddev = sqrt(1.f / top_dim);
simu.reset(new GaussianDataSimulator(0, stddev, -2 * stddev, 2 * stddev));
} else {
CK_THROW_(Error_t::OutOfBound, "index != {0, 1}.");
}
return simu;
}
} // namespace HugeCTR
| f9d10553995b2edc9cef8b2e1dd298376a3c6ad0.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include <layers/fully_connected_layer.hpp>
#include <linalg/matrix_vector_op.cuh>
#include <linalg/reduce.cuh>
#include <utils.cuh>
#include <utils.hpp>
#include <vector>
namespace HugeCTR {
FullyConnectedLayer::FullyConnectedLayer(const std::shared_ptr<BufferBlock2<float>>& weight_buff,
const std::shared_ptr<BufferBlock2<float>>& wgrad_buff,
const Tensor2<float>& in_tensor,
const Tensor2<float>& out_tensor,
const std::shared_ptr<GPUResource>& gpu_resource,
bool use_mixed_precision, bool enable_tf32_compute,
std::vector<Initializer_t> initializer_types)
: Layer(gpu_resource, initializer_types),
use_mixed_precision_(use_mixed_precision),
enable_tf32_compute_(enable_tf32_compute) {
try {
// check the in_tensor and out_tensor
const auto& in_tensor_dim = in_tensor.get_dimensions();
const auto& out_tensor_dim = out_tensor.get_dimensions();
// 1. two dim?
if (in_tensor_dim.size() != 2 || out_tensor_dim.size() != 2) {
CK_THROW_(Error_t::WrongInput, "input or output tensor doesn't has two dimensions");
}
// 2. dim match?
size_t m = in_tensor_dim[0];
size_t n = out_tensor_dim[1];
size_t k = in_tensor_dim[1];
size_t m_ck = out_tensor_dim[0];
if (m != m_ck) {
CK_THROW_(Error_t::WrongInput, "size of input / output tensor doesn't match");
}
std::vector<size_t> weight_dim = {k, n};
std::vector<size_t> bias_dim = {1, n};
{
Tensor2<float> tensor;
weight_buff->reserve(weight_dim, &tensor);
weights_.push_back(tensor);
}
{
Tensor2<float> tensor;
weight_buff->reserve(bias_dim, &tensor);
weights_.push_back(tensor);
}
{
Tensor2<float> tensor;
wgrad_buff->reserve(weight_dim, &tensor);
wgrad_.push_back(tensor);
}
{
Tensor2<float> tensor;
wgrad_buff->reserve(bias_dim, &tensor);
wgrad_.push_back(tensor);
}
in_tensors_.push_back(in_tensor);
out_tensors_.push_back(out_tensor);
// Where should we create this cuBLAS handle?
} catch (const std::runtime_error& rt_err) {
std::cerr << rt_err.what() << std::endl;
throw;
}
}
void __global__ add_bias_kernel_row(float* data, const float* bias, const int m, const int n) {
int offset = blockIdx.x * n;
for (int tid = threadIdx.x; tid < n; tid += blockDim.x) {
data[offset + tid] += bias[tid];
}
}
void __global__ add_bias_kernel_col(float* data, const float* bias, const int m, const int n) {
int offset = blockIdx.x * m;
float b = bias[blockIdx.x];
for (int tid = threadIdx.x; tid < m; tid += blockDim.x) {
data[offset + tid] += b;
}
}
void add_bias(float* data, const float* bias, const int m, const int n, bool row_major,
cudaStream_t stream) {
if (row_major) {
dim3 grid(m);
dim3 block(min(n, 1024));
add_bias_kernel_row<<<grid, block, 0, stream>>>(data, bias, m, n);
} else {
dim3 grid(n);
dim3 block(min(m, 1024));
add_bias_kernel_col<<<grid, block, 0, stream>>>(data, bias, m, n);
}
#ifndef NDEBUG
cudaDeviceSynchronize();
CK_CUDA_THROW_(cudaGetLastError());
#endif
}
void FullyConnectedLayer::fprop(bool is_train) {
CudaDeviceContext context(get_device_id());
Tensor2<float>& in_tensor = get_in_tensors(is_train)[0];
Tensor2<float>& out_tensor = out_tensors_[0];
float* weight = weights_[0].get_ptr();
float* bias = weights_[1].get_ptr();
float* in = in_tensor.get_ptr();
float* out = out_tensor.get_ptr();
const auto& in_tensor_dim = in_tensor.get_dimensions();
const auto& out_tensor_dim = out_tensor.get_dimensions();
int m, n, k;
m = in_tensor_dim[0];
n = out_tensor_dim[1];
k = in_tensor_dim[1];
float alpha = 1.0f, beta = 0.0f;
cublasComputeType_t compute_type =
enable_tf32_compute_ ? CUBLAS_COMPUTE_32F_FAST_TF32 : CUBLAS_COMPUTE_32F;
CK_CUBLAS_THROW_(cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_N, n, m, k,
&alpha, weight, CUDA_R_32F, n, in, CUDA_R_32F, k, &beta, out,
CUDA_R_32F, n, compute_type, falgo_));
add_bias(out, bias, m, n, true, get_gpu().get_stream());
//PROFILE_RECORD("TopMLP.fprop.stop", get_gpu().get_stream());
}
void FullyConnectedLayer::bprop() {
CudaDeviceContext context(get_device_id());
Tensor2<float>& in_tensor = get_in_tensors(true)[0];
Tensor2<float>& out_tensor = out_tensors_[0];
float* wgrad = wgrad_[0].get_ptr();
float* bias_grad = wgrad_[1].get_ptr();
float* weight = weights_[0].get_ptr();
float* in = in_tensor.get_ptr();
float* out = out_tensor.get_ptr();
const auto& in_tensor_dim = in_tensor.get_dimensions();
const auto& out_tensor_dim = out_tensor.get_dimensions();
int m, n, k;
m = in_tensor_dim[0];
n = out_tensor_dim[1];
k = in_tensor_dim[1];
float alpha = 1.0f, beta_w = 1.0f, beta_x = 0.0f;
// PROFILE_RECORD("TopMLP.bprop.start", get_gpu().get_stream());
cublasComputeType_t compute_type =
enable_tf32_compute_ ? CUBLAS_COMPUTE_32F_FAST_TF32 : CUBLAS_COMPUTE_32F;
// gradient respect to W
CK_CUBLAS_THROW_(cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_T, n, k, m,
&alpha, out, CUDA_R_32F, n, in, CUDA_R_32F, k, &beta_w, wgrad,
CUDA_R_32F, n, compute_type, balgo_W_));
// gradient respect to Xn
CK_CUBLAS_THROW_(cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N, k, m, n,
&alpha, weight, CUDA_R_32F, n, out, CUDA_R_32F, n, &beta_x, in,
CUDA_R_32F, k, compute_type, balgo_Xn_));
MLCommon::LinAlg::reduce(bias_grad, out, m, n, float(0), false, true, get_gpu().get_stream(),
true);
}
void FullyConnectedLayer::search_algorithm() {
// Set to the CUDA device where this layer assigned to
CudaDeviceContext context(get_device_id());
const int repeat_num = 5;
// Device Tensors to be used
Tensor2<float>& in_tensor = get_in_tensors(true)[0];
Tensor2<float>& out_tensor = out_tensors_[0];
float* weight = weights_[0].get_ptr();
float* in = in_tensor.get_ptr();
float* out = out_tensor.get_ptr();
float* wgrad = wgrad_[0].get_ptr();
// Tensor dim
const auto& in_tensor_dim = in_tensor.get_dimensions();
const auto& out_tensor_dim = out_tensor.get_dimensions();
int m, n, k;
m = in_tensor_dim[0];
n = out_tensor_dim[1];
k = in_tensor_dim[1];
// Record time for each algorithm
float shortestTime = 100000000.0;
float time;
cudaEvent_t start, stop;
CK_CUDA_THROW_(cudaEventCreate(&start));
CK_CUDA_THROW_(cudaEventCreate(&stop));
// cublas ret status
cublasStatus_t status;
// Start, end for search
int startAlgo, endAlgo;
if (use_mixed_precision_) {
startAlgo = (int)CUBLAS_GEMM_DEFAULT_TENSOR_OP;
endAlgo = (int)CUBLAS_GEMM_ALGO15_TENSOR_OP;
} else {
startAlgo = (int)CUBLAS_GEMM_DEFAULT;
endAlgo = (int)CUBLAS_GEMM_ALGO23;
}
cublasComputeType_t compute_type =
enable_tf32_compute_ ? CUBLAS_COMPUTE_32F_FAST_TF32 : CUBLAS_COMPUTE_32F;
// Search all the algorithm for fprop
for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) {
float alpha = 1.0f, beta = 0.0f;
// Record start event
CK_CUDA_THROW_(cudaEventRecord(start, get_gpu().get_stream()));
for (int i = 0; i < repeat_num; ++i) {
status = cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_N, n, m, k,
&alpha, weight, CUDA_R_32F, n, in, CUDA_R_32F, k, &beta, out,
CUDA_R_32F, n, compute_type, static_cast<cublasGemmAlgo_t>(testAlgo));
}
CK_CUDA_THROW_(cudaEventRecord(stop, get_gpu().get_stream()));
CK_CUDA_THROW_(cudaEventSynchronize(stop));
CK_CUDA_THROW_(cudaEventElapsedTime(&time, start, stop));
// Avg Time(ms) for this alorithm for fprop GEMM
time = time / repeat_num;
// Skip if the algorithm is supported for fprop configuration
if (status != CUBLAS_STATUS_SUCCESS) {
// printf("The algorithms %d is not supported for fprop, skipped.\n", testAlgo);
continue;
}
// Record the optimal time and algorithm
if (time < shortestTime) {
shortestTime = time;
falgo_ = static_cast<cublasGemmAlgo_t>(testAlgo);
}
}
// Reset shortestTime
shortestTime = 100000000.0;
// Search all the algorithm for bprop_W
for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) {
float alpha = 1.0f, beta_w = 1.0f;
// Record start event
CK_CUDA_THROW_(cudaEventRecord(start, get_gpu().get_stream()));
for (int i = 0; i < repeat_num; ++i) {
status = cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_T, n, k, m,
&alpha, out, CUDA_R_32F, n, in, CUDA_R_32F, k, &beta_w, wgrad,
CUDA_R_32F, n, compute_type, static_cast<cublasGemmAlgo_t>(testAlgo));
}
CK_CUDA_THROW_(cudaEventRecord(stop, get_gpu().get_stream()));
CK_CUDA_THROW_(cudaEventSynchronize(stop));
CK_CUDA_THROW_(cudaEventElapsedTime(&time, start, stop));
// Avg Time(ms) for this alorithm for fprop GEMM
time = time / repeat_num;
// Skip if the algorithm is supported for fprop configuration
if (status != CUBLAS_STATUS_SUCCESS) {
// printf("The algorithms %d is not supported for bprop_W, skipped.\n", testAlgo);
continue;
}
// Record the optimal time and algorithm
if (time < shortestTime) {
shortestTime = time;
balgo_W_ = static_cast<cublasGemmAlgo_t>(testAlgo);
}
}
// Reset shortestTime
shortestTime = 100000000.0;
// Search all the algorithm for bprop_Xn
for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) {
float alpha = 1.0f, beta_x = 0.0f;
// Record start event
CK_CUDA_THROW_(cudaEventRecord(start, get_gpu().get_stream()));
for (int i = 0; i < repeat_num; ++i) {
status = cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N, k, m, n,
&alpha, weight, CUDA_R_32F, n, out, CUDA_R_32F, n, &beta_x, in,
CUDA_R_32F, k, compute_type, static_cast<cublasGemmAlgo_t>(testAlgo));
}
CK_CUDA_THROW_(cudaEventRecord(stop, get_gpu().get_stream()));
CK_CUDA_THROW_(cudaEventSynchronize(stop));
CK_CUDA_THROW_(cudaEventElapsedTime(&time, start, stop));
// Avg Time(ms) for this alorithm for fprop GEMM
time = time / repeat_num;
// Skip if the algorithm is supported for fprop configuration
if (status != CUBLAS_STATUS_SUCCESS) {
// printf("The algorithms %d is not supported for bprop_Xn, skipped.\n", testAlgo);
continue;
}
// Record the optimal time and algorithm
if (time < shortestTime) {
shortestTime = time;
balgo_Xn_ = static_cast<cublasGemmAlgo_t>(testAlgo);
}
}
// Print selection information
// printf("The algorithm selection for fprop, bprop_W and bprop_Xn are: %d, %d and %d.\n",
// (int)falgo_, (int)balgo_W_, (int)balgo_Xn_);
// Output msg
// MESSAGE_("The fully-connected layer has finished choosing the algorithm for cublas Gemm.");
// Clean-up
CK_CUDA_THROW_(cudaEventDestroy(start));
CK_CUDA_THROW_(cudaEventDestroy(stop));
}
std::unique_ptr<DataSimulator> FullyConnectedLayer::get_uniform_initializer(const int index) {
const Tensor2<float>& in_tensor = get_in_tensors(true)[0];
const Tensor2<float>& out_tensor = out_tensors_[0];
float bottom_dim = in_tensor.get_dimensions()[1];
float top_dim = out_tensor.get_dimensions()[1];
float limit = 1.0f / ((0 == index ? bottom_dim : 0) + top_dim);
return std::make_unique<UniformDataSimulator>(-1 * limit, limit);
}
std::unique_ptr<DataSimulator> FullyConnectedLayer::get_xavier_uniform_initializer(
const int index) {
const Tensor2<float>& in_tensor = get_in_tensors(true)[0];
const Tensor2<float>& out_tensor = out_tensors_[0];
float bottom_dim = in_tensor.get_dimensions()[1];
float top_dim = out_tensor.get_dimensions()[1];
return std::make_unique<VarianceScalingSimulator>(1.f, data_simu::Mode_t::Fan_avg,
data_simu::Distribution_t::Uniform,
0 == index ? bottom_dim : 0, top_dim);
}
std::unique_ptr<DataSimulator> FullyConnectedLayer::get_xavier_norm_initializer(const int index) {
const Tensor2<float>& in_tensor = get_in_tensors(true)[0];
const Tensor2<float>& out_tensor = out_tensors_[0];
float bottom_dim = in_tensor.get_dimensions()[1];
float top_dim = out_tensor.get_dimensions()[1];
return std::make_unique<VarianceScalingSimulator>(1.f, data_simu::Mode_t::Fan_avg,
data_simu::Distribution_t::Norm,
0 == index ? bottom_dim : 0, top_dim);
}
std::unique_ptr<DataSimulator> FullyConnectedLayer::get_default_initializer(const int index) {
const Tensor2<float>& in_tensor = get_in_tensors(true)[0];
const Tensor2<float>& out_tensor = out_tensors_[0];
float bottom_dim = in_tensor.get_dimensions()[1];
float top_dim = out_tensor.get_dimensions()[1];
std::unique_ptr<DataSimulator> simu(nullptr);
if (0 == index) {
simu.reset(new VarianceScalingSimulator(1.f, data_simu::Mode_t::Fan_avg,
data_simu::Distribution_t::Norm, bottom_dim, top_dim));
} else if (1 == index) {
float stddev = sqrt(1.f / top_dim);
simu.reset(new GaussianDataSimulator(0, stddev, -2 * stddev, 2 * stddev));
} else {
CK_THROW_(Error_t::OutOfBound, "index != {0, 1}.");
}
return simu;
}
} // namespace HugeCTR
|
2ab2e48b6cef8d94ca54fb08090af2555f3714da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <strstream>
#include <iostream>
using namespace std;
#define gap_penalty 3 //penalty to substitute with a gap //insertion or deletion
#define sub_penalty 5 //penalty to substitute with a different character
#define NO_BLOCKS 1024
#define NO_THREADS_PER_BLOCK 512 //kept as a multiple of 32 so as to make sure we make use of optimum number of warps
int findAlignment(int *outarr , char *outstr1 , char *outstr2, char *str1 , char *str2 , int l1 , int l2){
int y = l1; //tracks the current index for string 1 i.e at any time l1 - y characters of str1 have been matched
int x = l2; //tracks the current index for string 2 i.e at any time l1 - x characters of str2 have been matched
//outarr is the matrix with alignment values inserted
//outstr1 and outsrt2 are the final matched strings
outstr1[l1+l2] = '\0';
outstr2[l1+l2] = '\0';
int val1,val2,val3;
int t = l1+l2-1;
while(y>0 && x>0){
val1 = outarr[y*(l2+1) + x-1] + gap_penalty;
val2 = outarr[(y-1)*(l2+1) + x] + gap_penalty;
val3 = outarr[(y-1)*(l2+1) + x-1] + ((str1[y-1] != str2[x-1]) * sub_penalty);
//
if(outarr[y*(l2+1) + x] == val1){
outstr2[t] = str2[x-1];
outstr1[t--] = '_'; //blank
x--;
continue;
}
if(outarr[y*(l2+1) + x] == val2){
outstr1[t] = str1[y-1];
outstr2[t--] = '_'; //blank
y--;
continue;
}
if(outarr[y*(l2+1) + x] == val3){
outstr2[t] = str2[x-1];
outstr1[t--] = str1[y-1]; //substitute
x--; y--;
continue;
}
}
//substitute the remaining elements with _ and other with elements as that of the input string
for(int i=x;i>0 ; i--,t--){
outstr2[t] = str2[i-1];
outstr1[t] = '_';
}
for(int j=y;j>0 ; j--,t--){
outstr1[t] = str1[j-1];
outstr2[t] = '_';
}
//outstr1 = outstr1 + t;
//outstr2 = outstr2 + t;
return t+1;
}
//device function for max of 3 numbers written avoiding many conditional statements.
__device__ int mymax(int a ,int b,int c ){
int max =a;
max = (max<b)*b + (max>=b)*max;
max = (max<c)*c + (max>=c)*max;
return max;
}
//device function for min of 3 numbers written avoiding many conditional statements.
__device__ int mymin3(int a ,int b,int c ){
int min =a;
min = (min>b)*b + (min<=b)*min;
min = (min>c)*c + (min<=c)*min;
return min;
}
//min of 2 nunmbers, host function
int mymin(int a ,int b){
return (a>=b)*b + (a<b)*a;
}
//prints the input vector
void print_vector(int *arr , int len){
for(int i=0; i<len;i++){
printf("%d , ",arr[i]);
}
printf("\n");
}
__global__ void dpf(char *str1 , char *str2 , int *out_arr, int p, int q,int curr_x,int curr_y)
{
int id = blockDim.x* blockIdx.x + threadIdx.x;
/*
|
| diagonal_x \
| diagonal_y --------- |
| /
\_/
*/
int diagonal_x ,diagonal_y;
// while((curr_y != p) || (curr_x != q+1)){
diagonal_x = curr_x+ id;
diagonal_y = curr_y-id;
if(diagonal_x <= q && diagonal_y>=0){
out_arr[diagonal_y*(q+1) + diagonal_x] = (diagonal_x==0 && diagonal_y ==0)*0
+(diagonal_x==0 && diagonal_y !=0)*(diagonal_y * gap_penalty)
+(diagonal_x !=0 && diagonal_y == 0)*(diagonal_x * gap_penalty)
+(diagonal_x !=0 && diagonal_y !=0)*mymin3(out_arr[diagonal_y*(q + 1) + diagonal_x -1] + gap_penalty,
out_arr[(diagonal_y-1)*(q + 1)+ diagonal_x] + gap_penalty,
out_arr[(diagonal_y-1)*(q + 1) + diagonal_x - 1] +
(str1[diagonal_y-1] != str2[diagonal_x-1]) * sub_penalty);
}
//curr_x = curr_x + ((curr_y/p) * 1);
//curr_y = mymin((curr_y +1),p);
//__syncthreads();
//}
}
hipError_t launchProg(char *str1 ,char *str2, int* outarr , int p, int q){
// Steps in cuda program:
// allocate variables space on the cudamemory
// copy the data
// call the kernel function
char *str1_k;
char *str2_k;
int *out_k; //output 2d array
printf("1\n");
hipError_t cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error1;
}
// Allocate GPU buffers for three vectors (two input, one output) .
printf("2\n");
cudaStatus = hipMalloc((void**)&out_k, (p+1)*(q+1) * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stdout, "hipMalloc failed! , could not allot space to output array");
//fprintf(stdout, "%s" , hipGetErrorString(cudaStatus));
goto Error1;
}
cudaStatus = hipMalloc((void**)&str1_k, p * sizeof(char));
if (cudaStatus != hipSuccess) {
fprintf(stdout, "hipMalloc failed!");
goto Error1;
}
cudaStatus = hipMalloc((void**)&str2_k, q * sizeof(char));
if (cudaStatus != hipSuccess) {
fprintf(stdout, "hipMalloc failed!");
goto Error1;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(str1_k,str1, p * sizeof(char), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stdout, "hipMemcpy failed!");
goto Error1;
}
cudaStatus = hipMemcpy(str2_k,str2, q * sizeof(char), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stdout, "hipMemcpy failed!");
goto Error1;
}
// Launch a kernel on the GPU with one thread for each element.
/*
|
| q \
| p --------- |
| /
\_/
*/
int curr_x =0, curr_y=0;
while((curr_y != p) || (curr_x != q+1)){
hipLaunchKernelGGL(( dpf), dim3(NO_BLOCKS), dim3(NO_THREADS_PER_BLOCK), 0, 0, str1_k , str2_k ,out_k,p,q,curr_x,curr_y);
curr_x = curr_x + ((curr_y/p) * 1);
curr_y = mymin((curr_y +1),p);
}
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error1;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error1;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(outarr, out_k, (p + 1)*(q + 1)* sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy of output array failed!");
goto Error1;
}
Error1:
hipFree(out_k);
return cudaStatus;
}
int main() {
// int n = 25;
// char seq1[30000],seq2[30000];
char *seq1 = "aishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfsjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfa";
char *seq2= "SailAwayFromTheShoresSailAwayFromTheShoresSailAwayFromTheShoressljflkajdlkjalkdsjflkjdfdsjkdfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljflaishwariyaVipulajsljflkaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljflaishwariyaVipulajsljflkaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljflaishwariyaVipulajsljflkaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljflaishwariyaVipulajsljflkaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljflaishwariyaVipulajsljflkaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljflaishwariyaVipulajsljflkaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljflaishwariyaVipulajsljflkaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljflaishwariyaVipulajsljflkaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljflaishwariyaVipulajsljflkaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljflaishwariyaVipulajsljflkhwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljflaishwariyaVipulajsljflkhwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljflaishwariyaVipulajsljflkhwariyaVipulajsljflkajdlkjalkdsjflkjdfdaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfa";
int size;
ifstream file;
/*
file.open("data.txt",ios::in);
if(file.is_open()){
file.getline(seq1, 30000);
file.getline(seq2,30000);
// printf("Input string 1 is %s\n %s ", seq1 , seq2);
file.close();
}
*/
// else printf("file could not be opened");
char *str1 = seq1;
char *str2 = seq2;
//char *str1 = "aishwariyaAbhiVipul";
//char *str2 = "aishwariyaVipul";
int l1 = strlen(str1);
int l2 = strlen(str2);
char *outstr1 = new char[l1+l2+1];
char *outstr2 = new char[l2+l2+1];
int *outarr = new int[(l1+1)*(l2+1)];
// Add vectors in parallel.
hipError_t cudaStatus = launchProg(str1 , str2 , outarr , l1 , l2);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "launchProg failed!");
return 1;
}
int offset = findAlignment(outarr , outstr1 , outstr2 , str1 , str2 ,l1,l2);
printf("Aligned Strings are : \n %s \n %s \n" , outstr1 + offset,
outstr2+offset);
//output the table
/*
for(int i=0;i<=l1;i++){
for(int j=0;j<=l2;j++){
printf("%d " , outarr[i*(l2+1) + j]);
}
printf("\n");
}
*/
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
| 2ab2e48b6cef8d94ca54fb08090af2555f3714da.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <strstream>
#include <iostream>
using namespace std;
#define gap_penalty 3 //penalty to substitute with a gap //insertion or deletion
#define sub_penalty 5 //penalty to substitute with a different character
#define NO_BLOCKS 1024
#define NO_THREADS_PER_BLOCK 512 //kept as a multiple of 32 so as to make sure we make use of optimum number of warps
int findAlignment(int *outarr , char *outstr1 , char *outstr2, char *str1 , char *str2 , int l1 , int l2){
int y = l1; //tracks the current index for string 1 i.e at any time l1 - y characters of str1 have been matched
int x = l2; //tracks the current index for string 2 i.e at any time l1 - x characters of str2 have been matched
//outarr is the matrix with alignment values inserted
//outstr1 and outsrt2 are the final matched strings
outstr1[l1+l2] = '\0';
outstr2[l1+l2] = '\0';
int val1,val2,val3;
int t = l1+l2-1;
while(y>0 && x>0){
val1 = outarr[y*(l2+1) + x-1] + gap_penalty;
val2 = outarr[(y-1)*(l2+1) + x] + gap_penalty;
val3 = outarr[(y-1)*(l2+1) + x-1] + ((str1[y-1] != str2[x-1]) * sub_penalty);
//
if(outarr[y*(l2+1) + x] == val1){
outstr2[t] = str2[x-1];
outstr1[t--] = '_'; //blank
x--;
continue;
}
if(outarr[y*(l2+1) + x] == val2){
outstr1[t] = str1[y-1];
outstr2[t--] = '_'; //blank
y--;
continue;
}
if(outarr[y*(l2+1) + x] == val3){
outstr2[t] = str2[x-1];
outstr1[t--] = str1[y-1]; //substitute
x--; y--;
continue;
}
}
//substitute the remaining elements with _ and other with elements as that of the input string
for(int i=x;i>0 ; i--,t--){
outstr2[t] = str2[i-1];
outstr1[t] = '_';
}
for(int j=y;j>0 ; j--,t--){
outstr1[t] = str1[j-1];
outstr2[t] = '_';
}
//outstr1 = outstr1 + t;
//outstr2 = outstr2 + t;
return t+1;
}
//device function for max of 3 numbers written avoiding many conditional statements.
__device__ int mymax(int a ,int b,int c ){
int max =a;
max = (max<b)*b + (max>=b)*max;
max = (max<c)*c + (max>=c)*max;
return max;
}
//device function for min of 3 numbers written avoiding many conditional statements.
__device__ int mymin3(int a ,int b,int c ){
int min =a;
min = (min>b)*b + (min<=b)*min;
min = (min>c)*c + (min<=c)*min;
return min;
}
//min of 2 nunmbers, host function
int mymin(int a ,int b){
return (a>=b)*b + (a<b)*a;
}
//prints the input vector
void print_vector(int *arr , int len){
for(int i=0; i<len;i++){
printf("%d , ",arr[i]);
}
printf("\n");
}
__global__ void dpf(char *str1 , char *str2 , int *out_arr, int p, int q,int curr_x,int curr_y)
{
int id = blockDim.x* blockIdx.x + threadIdx.x;
/*
|
| diagonal_x \
| diagonal_y --------- |
| /
\_/
*/
int diagonal_x ,diagonal_y;
// while((curr_y != p) || (curr_x != q+1)){
diagonal_x = curr_x+ id;
diagonal_y = curr_y-id;
if(diagonal_x <= q && diagonal_y>=0){
out_arr[diagonal_y*(q+1) + diagonal_x] = (diagonal_x==0 && diagonal_y ==0)*0
+(diagonal_x==0 && diagonal_y !=0)*(diagonal_y * gap_penalty)
+(diagonal_x !=0 && diagonal_y == 0)*(diagonal_x * gap_penalty)
+(diagonal_x !=0 && diagonal_y !=0)*mymin3(out_arr[diagonal_y*(q + 1) + diagonal_x -1] + gap_penalty,
out_arr[(diagonal_y-1)*(q + 1)+ diagonal_x] + gap_penalty,
out_arr[(diagonal_y-1)*(q + 1) + diagonal_x - 1] +
(str1[diagonal_y-1] != str2[diagonal_x-1]) * sub_penalty);
}
//curr_x = curr_x + ((curr_y/p) * 1);
//curr_y = mymin((curr_y +1),p);
//__syncthreads();
//}
}
cudaError_t launchProg(char *str1 ,char *str2, int* outarr , int p, int q){
// Steps in cuda program:
// allocate variables space on the cudamemory
// copy the data
// call the kernel function
char *str1_k;
char *str2_k;
int *out_k; //output 2d array
printf("1\n");
cudaError_t cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error1;
}
// Allocate GPU buffers for three vectors (two input, one output) .
printf("2\n");
cudaStatus = cudaMalloc((void**)&out_k, (p+1)*(q+1) * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stdout, "cudaMalloc failed! , could not allot space to output array");
//fprintf(stdout, "%s" , cudaGetErrorString(cudaStatus));
goto Error1;
}
cudaStatus = cudaMalloc((void**)&str1_k, p * sizeof(char));
if (cudaStatus != cudaSuccess) {
fprintf(stdout, "cudaMalloc failed!");
goto Error1;
}
cudaStatus = cudaMalloc((void**)&str2_k, q * sizeof(char));
if (cudaStatus != cudaSuccess) {
fprintf(stdout, "cudaMalloc failed!");
goto Error1;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(str1_k,str1, p * sizeof(char), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stdout, "cudaMemcpy failed!");
goto Error1;
}
cudaStatus = cudaMemcpy(str2_k,str2, q * sizeof(char), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stdout, "cudaMemcpy failed!");
goto Error1;
}
// Launch a kernel on the GPU with one thread for each element.
/*
|
| q \
| p --------- |
| /
\_/
*/
int curr_x =0, curr_y=0;
while((curr_y != p) || (curr_x != q+1)){
dpf<<<NO_BLOCKS, NO_THREADS_PER_BLOCK>>>(str1_k , str2_k ,out_k,p,q,curr_x,curr_y);
curr_x = curr_x + ((curr_y/p) * 1);
curr_y = mymin((curr_y +1),p);
}
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error1;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error1;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(outarr, out_k, (p + 1)*(q + 1)* sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy of output array failed!");
goto Error1;
}
Error1:
cudaFree(out_k);
return cudaStatus;
}
int main() {
// int n = 25;
// char seq1[30000],seq2[30000];
char *seq1 = "aishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfsjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfa";
char *seq2= "SailAwayFromTheShoresSailAwayFromTheShoresSailAwayFromTheShoressljflkajdlkjalkdsjflkjdfdsjkdfaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljflaishwariyaVipulajsljflkaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljflaishwariyaVipulajsljflkaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljflaishwariyaVipulajsljflkaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljflaishwariyaVipulajsljflkaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljflaishwariyaVipulajsljflkaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljflaishwariyaVipulajsljflkaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljflaishwariyaVipulajsljflkaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljflaishwariyaVipulajsljflkaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljflaishwariyaVipulajsljflkaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljflaishwariyaVipulajsljflkhwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljflaishwariyaVipulajsljflkhwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljflaishwariyaVipulajsljflkhwariyaVipulajsljflkajdlkjalkdsjflkjdfdaishwariyaVipulajsljflkajdlkjalkdsjflkjdfdsjkdfjjlsajfkvipualharssljfjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfasjflkjdfdsjkdfjjlsajfkvipualharssljfa";
int size;
ifstream file;
/*
file.open("data.txt",ios::in);
if(file.is_open()){
file.getline(seq1, 30000);
file.getline(seq2,30000);
// printf("Input string 1 is %s\n %s ", seq1 , seq2);
file.close();
}
*/
// else printf("file could not be opened");
char *str1 = seq1;
char *str2 = seq2;
//char *str1 = "aishwariyaAbhiVipul";
//char *str2 = "aishwariyaVipul";
int l1 = strlen(str1);
int l2 = strlen(str2);
char *outstr1 = new char[l1+l2+1];
char *outstr2 = new char[l2+l2+1];
int *outarr = new int[(l1+1)*(l2+1)];
// Add vectors in parallel.
cudaError_t cudaStatus = launchProg(str1 , str2 , outarr , l1 , l2);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "launchProg failed!");
return 1;
}
int offset = findAlignment(outarr , outstr1 , outstr2 , str1 , str2 ,l1,l2);
printf("Aligned Strings are : \n %s \n %s \n" , outstr1 + offset,
outstr2+offset);
//output the table
/*
for(int i=0;i<=l1;i++){
for(int j=0;j<=l2;j++){
printf("%d " , outarr[i*(l2+1) + j]);
}
printf("\n");
}
*/
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
|
8c3966adf02afdb3caf9901263726bce19254091.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gtest/gtest.h>
#include "config.h"
#include "model.h"
#include "global_array_queue.cuh"
#include "queue.h"
#include "sort_hip.cuh"
#include "util.cuh"
extern __device__ event fel[NUM_LPS * FEL_SIZE];
extern __device__ int fel_size;
event h_fel[NUM_LPS * FEL_SIZE];
static __global__ void empty_kernel()
{
}
TEST (pre, assumptions)
{
bool gaq_set = false;
#ifdef _GLOBAL_ARRAY_QUEUE
gaq_set = true;
#endif
int num_lps = NUM_LPS;
int fel_size = FEL_SIZE;
int enqueue_max = ENQUEUE_MAX;
ASSERT_EQ(true, gaq_set);
ASSERT_EQ(2, num_lps);
ASSERT_EQ(5, fel_size);
ASSERT_EQ(4, enqueue_max);
#ifndef _PHOLD
ASSERT_TRUE(false) << "the tests assume phold events stored in the queues.";
#endif
}
__global__ void test_insert_init()
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx == 0) {
queue_insert({.node=0, .ts=1});
queue_insert({.node=1, .ts=2});
}
if (idx == 1) {
queue_insert({.node=0, .ts=3});
queue_insert({.node=1, .ts=4});
}
}
TEST (insert, init)
{
queue_init();
hipLaunchKernelGGL(( test_insert_init), dim3(1), dim3(2), 0, 0, );
CudaCheckError();
queue_post();
CudaSafeCall( hipMemcpyFromSymbol(h_fel, fel, sizeof(h_fel)) );
EXPECT_EQ(1, h_fel[0].ts);
EXPECT_EQ(2, h_fel[1].ts);
EXPECT_EQ(3, h_fel[2].ts);
EXPECT_EQ(4, h_fel[3].ts);
}
__global__ void test_insert_cont()
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx == 0) {
queue_insert({.node=0, .ts=3});
}
if (idx == 1) {
queue_insert({.node=1, .ts=5});
}
}
TEST (insert, cont)
{
queue_init();
hipLaunchKernelGGL(( test_insert_init), dim3(1), dim3(2), 0, 0, );
CudaCheckError();
queue_post();
hipLaunchKernelGGL(( test_insert_cont), dim3(1), dim3(2), 0, 0, );
CudaCheckError();
queue_post();
CudaSafeCall( hipMemcpyFromSymbol(h_fel, fel, sizeof(h_fel)) );
EXPECT_EQ(1, h_fel[0].ts);
EXPECT_EQ(2, h_fel[1].ts);
EXPECT_EQ(3, h_fel[2].ts);
EXPECT_EQ(3, h_fel[3].ts);
EXPECT_EQ(4, h_fel[4].ts);
EXPECT_EQ(5, h_fel[5].ts);
}
__global__ void test_dequeue_dequeue_pre()
{
fel[0] = {.ts=1, .node=0};
fel[1] = {.ts=2, .node=0};
fel[2] = {.ts=3, .node=1};
fel[3] = {.ts=4, .node=1};
fel_size = 4;
}
__device__ event test_dequeue_dequeue_arr[2];
__global__ void test_dequeue_dequeue()
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
event *ev;
queue_peek(&ev, -1);
test_dequeue_dequeue_arr[idx] = *ev;
}
TEST (dequeue, dequeue)
{
queue_init();
hipLaunchKernelGGL(( test_dequeue_dequeue_pre), dim3(1), dim3(1), 0, 0, );
CudaCheckError();
queue_post();
queue_pre();
hipLaunchKernelGGL(( test_dequeue_dequeue), dim3(1), dim3(2), 0, 0, );
event h_test_dequeue_dequeue_arr[2];
CudaSafeCall( hipMemcpyFromSymbol(h_test_dequeue_dequeue_arr,
test_dequeue_dequeue_arr, sizeof(h_test_dequeue_dequeue_arr)) );
EXPECT_EQ(1, h_test_dequeue_dequeue_arr[0].ts);
EXPECT_EQ(3, h_test_dequeue_dequeue_arr[1].ts);
}
int main(int argc, char** argv)
{
hipLaunchKernelGGL(( empty_kernel), dim3(1), dim3(1), 0, 0, );
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 8c3966adf02afdb3caf9901263726bce19254091.cu | #include <gtest/gtest.h>
#include "config.h"
#include "model.h"
#include "global_array_queue.cuh"
#include "queue.h"
#include "sort.cuh"
#include "util.cuh"
extern __device__ event fel[NUM_LPS * FEL_SIZE];
extern __device__ int fel_size;
event h_fel[NUM_LPS * FEL_SIZE];
static __global__ void empty_kernel()
{
}
TEST (pre, assumptions)
{
bool gaq_set = false;
#ifdef _GLOBAL_ARRAY_QUEUE
gaq_set = true;
#endif
int num_lps = NUM_LPS;
int fel_size = FEL_SIZE;
int enqueue_max = ENQUEUE_MAX;
ASSERT_EQ(true, gaq_set);
ASSERT_EQ(2, num_lps);
ASSERT_EQ(5, fel_size);
ASSERT_EQ(4, enqueue_max);
#ifndef _PHOLD
ASSERT_TRUE(false) << "the tests assume phold events stored in the queues.";
#endif
}
__global__ void test_insert_init()
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx == 0) {
queue_insert({.node=0, .ts=1});
queue_insert({.node=1, .ts=2});
}
if (idx == 1) {
queue_insert({.node=0, .ts=3});
queue_insert({.node=1, .ts=4});
}
}
TEST (insert, init)
{
queue_init();
test_insert_init<<<1, 2>>>();
CudaCheckError();
queue_post();
CudaSafeCall( cudaMemcpyFromSymbol(h_fel, fel, sizeof(h_fel)) );
EXPECT_EQ(1, h_fel[0].ts);
EXPECT_EQ(2, h_fel[1].ts);
EXPECT_EQ(3, h_fel[2].ts);
EXPECT_EQ(4, h_fel[3].ts);
}
__global__ void test_insert_cont()
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx == 0) {
queue_insert({.node=0, .ts=3});
}
if (idx == 1) {
queue_insert({.node=1, .ts=5});
}
}
TEST (insert, cont)
{
queue_init();
test_insert_init<<<1, 2>>>();
CudaCheckError();
queue_post();
test_insert_cont<<<1, 2>>>();
CudaCheckError();
queue_post();
CudaSafeCall( cudaMemcpyFromSymbol(h_fel, fel, sizeof(h_fel)) );
EXPECT_EQ(1, h_fel[0].ts);
EXPECT_EQ(2, h_fel[1].ts);
EXPECT_EQ(3, h_fel[2].ts);
EXPECT_EQ(3, h_fel[3].ts);
EXPECT_EQ(4, h_fel[4].ts);
EXPECT_EQ(5, h_fel[5].ts);
}
__global__ void test_dequeue_dequeue_pre()
{
fel[0] = {.ts=1, .node=0};
fel[1] = {.ts=2, .node=0};
fel[2] = {.ts=3, .node=1};
fel[3] = {.ts=4, .node=1};
fel_size = 4;
}
__device__ event test_dequeue_dequeue_arr[2];
__global__ void test_dequeue_dequeue()
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
event *ev;
queue_peek(&ev, -1);
test_dequeue_dequeue_arr[idx] = *ev;
}
TEST (dequeue, dequeue)
{
queue_init();
test_dequeue_dequeue_pre<<<1, 1>>>();
CudaCheckError();
queue_post();
queue_pre();
test_dequeue_dequeue<<<1, 2>>>();
event h_test_dequeue_dequeue_arr[2];
CudaSafeCall( cudaMemcpyFromSymbol(h_test_dequeue_dequeue_arr,
test_dequeue_dequeue_arr, sizeof(h_test_dequeue_dequeue_arr)) );
EXPECT_EQ(1, h_test_dequeue_dequeue_arr[0].ts);
EXPECT_EQ(3, h_test_dequeue_dequeue_arr[1].ts);
}
int main(int argc, char** argv)
{
empty_kernel<<<1, 1>>>();
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
1c31e7dafa1e18187ffcb8e67c5663cc07e30e17.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, false>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 1c31e7dafa1e18187ffcb8e67c5663cc07e30e17.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, false>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
af3f2df8d9070d532f6dcbcd6e20b01580ec3dc4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include "compare.h"
#include "gputimer.h"
// Subpart A:
// Write step 1 as a kernel that operates on threads 0--31.
// Assume that the input flags are 0 for false and 1 for true and are stored
// in a local per-thread register called p (for predicate).
//
// You have access to 31 words of shared memory s[0:31], with s[0]
// corresponding to thread 0 and s[31] corresponding to thread 31.
// You may change the values of s[0:31]. Put the return sum in s[0].
// Your code should execute no more than 5 warp-wide addition operations.
__device__ unsigned int shared_reduce(unsigned int p, volatile unsigned int * s) {
// Assumes values in 'p' are either 1 or 0
// Assumes s[0:31] are allocated
// Sums p across warp, returning the result. Suggest you put
// result in s[0] and return it
// You may change any value in s
// You should execute no more than 5 + operations (if you're doing
// 31, you're doing it wrong)
//
// TODO: Fill in the rest of this function
int t = threadIdx.x;
s[t] = p;
if (t < 16)
s[t] = s[t] + s[t + 16];
if (t < 8)
s[t] = s[t] + s[t + 8];
if (t < 4)
s[t] = s[t] + s[t + 4];
if (t < 2)
s[t] = s[t] + s[t + 2];
if (t < 1)
s[t] = s[t] + s[t + 1];
return s[0];
}
__global__ void reduce(unsigned int * d_out_shared,
const unsigned int * d_in)
{
extern __shared__ unsigned int s[];
int t = threadIdx.x;
int p = d_in[t];
unsigned int sr = shared_reduce(p, s);
if (t == 0)
{
*d_out_shared = sr;
}
}
int main(int argc, char **argv)
{
const int ARRAY_SIZE = 32;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(unsigned int);
// generate the input array on the host
unsigned int h_in[ARRAY_SIZE];
unsigned int sum = 0;
for(int i = 0; i < ARRAY_SIZE; i++) {
// generate random float in [0, 1]
h_in[i] = (float)random()/(float)RAND_MAX > 0.5f ? 1 : 0;
sum += h_in[i];
}
// declare GPU memory pointers
unsigned int * d_in, * d_out_shared;
// allocate GPU memory
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_out_shared, sizeof(unsigned int));
// transfer the input array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
GpuTimer timer;
timer.Start();
// launch the kernel
hipLaunchKernelGGL(( reduce), dim3(1), dim3(ARRAY_SIZE), ARRAY_SIZE * sizeof(unsigned int), 0,
d_out_shared, d_in);
timer.Stop();
printf("Your code executed in %g ms\n", timer.Elapsed());
unsigned int h_out_shared;
// copy back the sum from GPU
hipMemcpy(&h_out_shared, d_out_shared, sizeof(unsigned int),
hipMemcpyDeviceToHost);
compare(h_out_shared, sum);
// free GPU memory allocation
hipFree(d_in);
hipFree(d_out_shared);
}
| af3f2df8d9070d532f6dcbcd6e20b01580ec3dc4.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include "compare.h"
#include "gputimer.h"
// Subpart A:
// Write step 1 as a kernel that operates on threads 0--31.
// Assume that the input flags are 0 for false and 1 for true and are stored
// in a local per-thread register called p (for predicate).
//
// You have access to 31 words of shared memory s[0:31], with s[0]
// corresponding to thread 0 and s[31] corresponding to thread 31.
// You may change the values of s[0:31]. Put the return sum in s[0].
// Your code should execute no more than 5 warp-wide addition operations.
__device__ unsigned int shared_reduce(unsigned int p, volatile unsigned int * s) {
// Assumes values in 'p' are either 1 or 0
// Assumes s[0:31] are allocated
// Sums p across warp, returning the result. Suggest you put
// result in s[0] and return it
// You may change any value in s
// You should execute no more than 5 + operations (if you're doing
// 31, you're doing it wrong)
//
// TODO: Fill in the rest of this function
int t = threadIdx.x;
s[t] = p;
if (t < 16)
s[t] = s[t] + s[t + 16];
if (t < 8)
s[t] = s[t] + s[t + 8];
if (t < 4)
s[t] = s[t] + s[t + 4];
if (t < 2)
s[t] = s[t] + s[t + 2];
if (t < 1)
s[t] = s[t] + s[t + 1];
return s[0];
}
__global__ void reduce(unsigned int * d_out_shared,
const unsigned int * d_in)
{
extern __shared__ unsigned int s[];
int t = threadIdx.x;
int p = d_in[t];
unsigned int sr = shared_reduce(p, s);
if (t == 0)
{
*d_out_shared = sr;
}
}
int main(int argc, char **argv)
{
const int ARRAY_SIZE = 32;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(unsigned int);
// generate the input array on the host
unsigned int h_in[ARRAY_SIZE];
unsigned int sum = 0;
for(int i = 0; i < ARRAY_SIZE; i++) {
// generate random float in [0, 1]
h_in[i] = (float)random()/(float)RAND_MAX > 0.5f ? 1 : 0;
sum += h_in[i];
}
// declare GPU memory pointers
unsigned int * d_in, * d_out_shared;
// allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out_shared, sizeof(unsigned int));
// transfer the input array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
GpuTimer timer;
timer.Start();
// launch the kernel
reduce<<<1, ARRAY_SIZE, ARRAY_SIZE * sizeof(unsigned int)>>>
(d_out_shared, d_in);
timer.Stop();
printf("Your code executed in %g ms\n", timer.Elapsed());
unsigned int h_out_shared;
// copy back the sum from GPU
cudaMemcpy(&h_out_shared, d_out_shared, sizeof(unsigned int),
cudaMemcpyDeviceToHost);
compare(h_out_shared, sum);
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_out_shared);
}
|
a672c1e9d115c9a456d2a8f36366b717e5bb11f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "gpu_func.h"
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <iostream>
#include "rocblas.h"
#define BLOCK_SIZE 16
__global__
void device_add_one(int* d_result, int t) {
*d_result = t + 1;
}
/*
Just a dummy function that can be used to warm up GPU
*/
int useless_gpu_add_one(int t) {
int result;
int* d_result;
checkCudaErrors(hipMalloc((void**)&d_result, 1 * sizeof(int)));
event_pair timer;
start_timer(&timer);
hipLaunchKernelGGL(( device_add_one), dim3(1),dim3(1), 0, 0, d_result, t);
check_launch("device_add_one");
double time = stop_timer(&timer);
std::cout << "device_add_one took: " << time << " seconds" << std::endl;
checkCudaErrors(hipMemcpy(&result, d_result, 1 * sizeof(int),
hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_result));
return result;
}
// __global__
// void gemm_gpu_fast(double* A, double* B, double* C, double* D, int hA, int wA,
// int hB, int wB, double alpha, double beta){
// int bx = blockIdx.x;
// int by = blockIdx.y;
//
// int tx = threadIdx.x;
// int ty = threadIdx.y;
//
// int a_start = BLOCK_SIZE*by;
// int a_end = a_start+(wA-1)*hA;
// int a_step = BLOCK_SIZE*hA;
//
// int b_start = bx*BLOCK_SIZE*hB;
// int b_step = BLOCK_SIZE;
//
// float Dsub = 0;
// //printf("%u %u %u %u\n", bx,by,tx,ty);
// for (int a = a_start, b=b_start; a <= a_end; a+=a_step, b+=b_step){
//
// __shared__ float Asub[BLOCK_SIZE][BLOCK_SIZE];
// __shared__ float Bsub[BLOCK_SIZE][BLOCK_SIZE];
//
// if (a+tx*hA >= wA*hA || by*BLOCK_SIZE+ty >= hA){
// Asub[ty][tx] = 0;
// //printf("af bx-%u by-%u tx-%u ty-%u a-%u b-%u aid-%u b-id%u\n", bx, by, tx, ty, a, b, a + ty + hA*tx, b + ty + hB*tx);
// }
// else{
// printf("ag bx-%u by-%u tx-%u ty-%u a-%u b-%u aid-%u b-id%u\n", bx, by, tx, ty, a, b, a + ty + hA*tx, b + ty + hB*tx);
// Asub[ty][tx] = A[a + ty + hA*tx];
// }
//
// if ((b%hB)+ty >= hB || bx*BLOCK_SIZE+tx >= wB){
// //printf("bf bx-%u by-%u tx-%u ty-%u a-%u b-%u aid-%u b-id%u\n", bx, by, tx, ty, a, b, a + ty + hA*tx, b + ty + hB*tx);
// Bsub[ty][tx] = 0;
// }else {
// printf("bg bx-%u by-%u tx-%u ty-%u a-%u b-%u aid-%u b-id%u\n", bx, by, tx, ty, a, b, a + ty + hA*tx, b + ty + hB*tx);
// Bsub[ty][tx] = B[b + ty + hB*tx];
// }
//
// __syncthreads();
//
// for (int i = 0; i < BLOCK_SIZE; i++){
// Dsub += Asub[ty][i]*Bsub[i][tx];
// }
// __syncthreads();
// }
//
// if (by*BLOCK_SIZE+ty < hA && bx*BLOCK_SIZE+tx < wB){
// printf("%u %u %u %u %u\n", bx, by, tx, ty, Dsub);
// int c = bx*BLOCK_SIZE*hA+by*BLOCK_SIZE;
// D[c + ty + tx*hA] = alpha*Dsub + beta*C[c + ty + tx*hA];
// }
//
// }
__global__
void gemm_gpu_fast(double* A, double* B, double* C, int hA, int wA,
int hB, int wB, double alpha, double beta){
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int a_start = BLOCK_SIZE*by;
int a_end = a_start+(wA-1)*hA;
int a_step = BLOCK_SIZE*hA;
int b_start = bx*BLOCK_SIZE*hB;
int b_step = BLOCK_SIZE;
double Dsub = 0;
for (int a = a_start, b=b_start; a <= a_end; a+=a_step, b+=b_step){
__shared__ double Asub[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double Bsub[BLOCK_SIZE][BLOCK_SIZE];
//int a_row = by*BLOCK_SIZE+ty;
//int a_id = a+ty+tx*hA;
if (by*BLOCK_SIZE+ty < hA && a+ty+tx*hA < wA*hA){
Asub[ty][tx] = A[a+ty+tx*hA];
}else{
Asub[ty][tx] = 0;
}
//int b_col = bx*BLOCK_SIZE+tx;
//int b_id = b+ty+tx*hB;
//int b_row = b%hB+ty;
if (b%hB+ty < hB && bx*BLOCK_SIZE+tx < wB){
Bsub[ty][tx] = B[b+ty+tx*hB];
}else{
Bsub[ty][tx] = 0;
}
__syncthreads();
//printf("ab %u %u %u %u %u %u, %f, %f\n", bx, by, tx, ty, a, b, Asub[tx][ty], Bsub[tx][ty]);
for (int i = 0; i < BLOCK_SIZE; i++){
Dsub += Asub[ty][i]*Bsub[i][tx];
}
__syncthreads();
}
if (by*BLOCK_SIZE+ty < hA && bx*BLOCK_SIZE+tx < wB){
int c_id = bx*BLOCK_SIZE*hA+by*BLOCK_SIZE+ty + tx*hA;
//printf("%u %u %u %u %f\n", bx, by, tx, ty, Dsub);
C[c_id] = alpha*Dsub + beta*C[c_id];
}
}
__global__
void gemm_gpu(double* A, double* B, double* C, int hA, int wA,
int hB, int wB, double alpha, double beta){
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by*BLOCK_SIZE+ty;
int col = bx*BLOCK_SIZE+tx;
int a_start = row;
int a_step = hA;
int b_start = col*hB;
double entry = 0;
if (row < hA && col < wB){
for (int i = 0; i < hB; i++){
entry += A[a_start+i*a_step]*B[b_start+i];
}
C[row+col*hA] = alpha*entry + beta*C[row+col*hA];
}
}
/*
Routine to perform an in-place GEMM operation, i.e., C := alpha*A*B + beta*C
*/
int myGEMM(double* __restrict__ A, double* __restrict__ B,
double* __restrict__ C, double* alpha, double* beta,
int M, int N, int K) {
/* TODO: Write an efficient GEMM implementation on GPU */
// int c_size = M*N*sizeof(double);
//
// double* Dd;
// hipMalloc((void**)&Dd, c_size);
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(N/dimBlock.x+1, M/dimBlock.y+1);
hipLaunchKernelGGL(( gemm_gpu_fast), dim3(dimGrid), dim3(dimBlock), 0, 0, A, B, C, M, K, K, N,
*alpha, *beta);
// hipMemcpy(C, Dd, c_size, hipMemcpyDeviceToDevice);
//
// hipFree(Dd);
return 0;
}
__global__
void sigmoid_gpu(double* __restrict__ X, double* __restrict__ S, int M, int N){
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by*BLOCK_SIZE+ty;
int col = bx*BLOCK_SIZE+tx;
int id = col*M+row;
if (row < M && col < N){
//S[id] = 1.0/(1.0+exp(-X[id]));
if (S[id] >= 0){
S[id] = 1.0/(exp(-X[id])+1);
}else{
S[id] = exp(X[id])/(exp(X[id])+1);
}
}
}
int mySigmoid(double* __restrict__ X, double* __restrict__ S, int M, int N) {
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(N/dimBlock.x+1, M/dimBlock.y+1);
hipLaunchKernelGGL(( sigmoid_gpu), dim3(dimGrid), dim3(dimBlock), 0, 0, X, S, M,N);
return 0;
}
__global__
void hadamard_gpu(double* __restrict__ X, double* __restrict__ Y, double* __restrict__ H, int M, int N){
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by*BLOCK_SIZE+ty;
int col = bx*BLOCK_SIZE+tx;
int id = col*M+row;
if (row < M && col < N){
H[id] = X[id]*Y[id];
}
}
int myHadamard(double* __restrict__ X, double* __restrict__ Y, double* __restrict__ H, int M, int N) {
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(N/dimBlock.x+1, M/dimBlock.y+1);
hipLaunchKernelGGL(( hadamard_gpu), dim3(dimGrid), dim3(dimBlock), 0, 0, X, Y, H, M, N);
return 0;
}
__global__
void transpose_gpu(double* __restrict__ X, double* __restrict__ Xt, int M, int N){
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by*BLOCK_SIZE+ty;
int col = bx*BLOCK_SIZE+tx;
int id1 = col*M+row;
int id2 = row*N+col;
if (row < M && col < N){
Xt[id2] = X[id1];
}
}
int myTranspose(double* __restrict__ X, double* __restrict__ Xt, int M, int N) {
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(N/dimBlock.x+1, M/dimBlock.y+1);
hipLaunchKernelGGL(( transpose_gpu), dim3(dimGrid), dim3(dimBlock), 0, 0, X, Xt, M, N);
return 0;
}
__global__
void matrix_add_gpu(double* __restrict__ X, double* __restrict__ Y, double* __restrict__ Z,
int M, int N, double alpha){
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by*BLOCK_SIZE+ty;
int col = bx*BLOCK_SIZE+tx;
int id = col*M+row;
if (row < M && col < N){
Z[id] = X[id]+alpha*Y[id];
}
}
int myMatAdd(double* __restrict__ X, double* __restrict__ Y, double* __restrict__ Z, int M, int N, double alpha) {
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(N/dimBlock.x+1, M/dimBlock.y+1);
hipLaunchKernelGGL(( matrix_add_gpu), dim3(dimGrid), dim3(dimBlock), 0, 0, X, Y, Z, M, N, alpha);
return 0;
}
__global__
void row_sum_gpu(double* __restrict__ X, double* __restrict__ S,
int M, int N, double alpha){
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by*BLOCK_SIZE+ty;
int col = bx*BLOCK_SIZE+tx;
int id = col*M+row;
if (row < M && col < N){
S[id] = 0;
for(int i = row; i < M*N; i+=M){
S[id] += X[i];
}
S[id] *= alpha;
}
}
int myRowSum(double* __restrict__ X, double* __restrict__ S, int M, int N, double alpha) {
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(N/dimBlock.x+1, M/dimBlock.y+1);
hipLaunchKernelGGL(( row_sum_gpu), dim3(dimGrid), dim3(dimBlock), 0, 0, X, S, M, N, alpha);
return 0;
}
__global__
void softmax_gpu(double* __restrict__ X, double* __restrict__ S, int M, int N){
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by*BLOCK_SIZE+ty;
int col = bx*BLOCK_SIZE+tx;
int id = col*M+row;
if (row < M && col < N){
double sum = 0;
for (int i = col*M; i<(col+1)*M; i++){
sum += exp(X[i]);
}
S[id] = exp(X[id])/sum;
}
}
int mySoftmax(double* __restrict__ X, double* __restrict__ S, int M, int N) {
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(N/dimBlock.x+1, M/dimBlock.y+1);
hipLaunchKernelGGL(( softmax_gpu), dim3(dimGrid), dim3(dimBlock), 0, 0, X, S, M, N);
return 0;
}
__global__
void print_gpu(double* __restrict__ X, int M, int N, int m, int n){
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by*BLOCK_SIZE+ty;
int col = bx*BLOCK_SIZE+tx;
int id = col*M+row;
if (row < m && col < n){
printf("%u,%u,%u: %f \n",row, col, id, X[id]);
}
}
int myPrintMat(double* __restrict__ X, int M, int N, int m, int n) {
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(N/dimBlock.x+1, M/dimBlock.y+1);
hipLaunchKernelGGL(( print_gpu), dim3(dimGrid), dim3(dimBlock), 0, 0, X, M, N, m, n);
return 0;
}
| a672c1e9d115c9a456d2a8f36366b717e5bb11f8.cu | #include "gpu_func.h"
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <iostream>
#include "cublas_v2.h"
#define BLOCK_SIZE 16
__global__
void device_add_one(int* d_result, int t) {
*d_result = t + 1;
}
/*
Just a dummy function that can be used to warm up GPU
*/
int useless_gpu_add_one(int t) {
int result;
int* d_result;
checkCudaErrors(cudaMalloc((void**)&d_result, 1 * sizeof(int)));
event_pair timer;
start_timer(&timer);
device_add_one<<<1,1>>>(d_result, t);
check_launch("device_add_one");
double time = stop_timer(&timer);
std::cout << "device_add_one took: " << time << " seconds" << std::endl;
checkCudaErrors(cudaMemcpy(&result, d_result, 1 * sizeof(int),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_result));
return result;
}
// __global__
// void gemm_gpu_fast(double* A, double* B, double* C, double* D, int hA, int wA,
// int hB, int wB, double alpha, double beta){
// int bx = blockIdx.x;
// int by = blockIdx.y;
//
// int tx = threadIdx.x;
// int ty = threadIdx.y;
//
// int a_start = BLOCK_SIZE*by;
// int a_end = a_start+(wA-1)*hA;
// int a_step = BLOCK_SIZE*hA;
//
// int b_start = bx*BLOCK_SIZE*hB;
// int b_step = BLOCK_SIZE;
//
// float Dsub = 0;
// //printf("%u %u %u %u\n", bx,by,tx,ty);
// for (int a = a_start, b=b_start; a <= a_end; a+=a_step, b+=b_step){
//
// __shared__ float Asub[BLOCK_SIZE][BLOCK_SIZE];
// __shared__ float Bsub[BLOCK_SIZE][BLOCK_SIZE];
//
// if (a+tx*hA >= wA*hA || by*BLOCK_SIZE+ty >= hA){
// Asub[ty][tx] = 0;
// //printf("af bx-%u by-%u tx-%u ty-%u a-%u b-%u aid-%u b-id%u\n", bx, by, tx, ty, a, b, a + ty + hA*tx, b + ty + hB*tx);
// }
// else{
// printf("ag bx-%u by-%u tx-%u ty-%u a-%u b-%u aid-%u b-id%u\n", bx, by, tx, ty, a, b, a + ty + hA*tx, b + ty + hB*tx);
// Asub[ty][tx] = A[a + ty + hA*tx];
// }
//
// if ((b%hB)+ty >= hB || bx*BLOCK_SIZE+tx >= wB){
// //printf("bf bx-%u by-%u tx-%u ty-%u a-%u b-%u aid-%u b-id%u\n", bx, by, tx, ty, a, b, a + ty + hA*tx, b + ty + hB*tx);
// Bsub[ty][tx] = 0;
// }else {
// printf("bg bx-%u by-%u tx-%u ty-%u a-%u b-%u aid-%u b-id%u\n", bx, by, tx, ty, a, b, a + ty + hA*tx, b + ty + hB*tx);
// Bsub[ty][tx] = B[b + ty + hB*tx];
// }
//
// __syncthreads();
//
// for (int i = 0; i < BLOCK_SIZE; i++){
// Dsub += Asub[ty][i]*Bsub[i][tx];
// }
// __syncthreads();
// }
//
// if (by*BLOCK_SIZE+ty < hA && bx*BLOCK_SIZE+tx < wB){
// printf("%u %u %u %u %u\n", bx, by, tx, ty, Dsub);
// int c = bx*BLOCK_SIZE*hA+by*BLOCK_SIZE;
// D[c + ty + tx*hA] = alpha*Dsub + beta*C[c + ty + tx*hA];
// }
//
// }
__global__
void gemm_gpu_fast(double* A, double* B, double* C, int hA, int wA,
int hB, int wB, double alpha, double beta){
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int a_start = BLOCK_SIZE*by;
int a_end = a_start+(wA-1)*hA;
int a_step = BLOCK_SIZE*hA;
int b_start = bx*BLOCK_SIZE*hB;
int b_step = BLOCK_SIZE;
double Dsub = 0;
for (int a = a_start, b=b_start; a <= a_end; a+=a_step, b+=b_step){
__shared__ double Asub[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double Bsub[BLOCK_SIZE][BLOCK_SIZE];
//int a_row = by*BLOCK_SIZE+ty;
//int a_id = a+ty+tx*hA;
if (by*BLOCK_SIZE+ty < hA && a+ty+tx*hA < wA*hA){
Asub[ty][tx] = A[a+ty+tx*hA];
}else{
Asub[ty][tx] = 0;
}
//int b_col = bx*BLOCK_SIZE+tx;
//int b_id = b+ty+tx*hB;
//int b_row = b%hB+ty;
if (b%hB+ty < hB && bx*BLOCK_SIZE+tx < wB){
Bsub[ty][tx] = B[b+ty+tx*hB];
}else{
Bsub[ty][tx] = 0;
}
__syncthreads();
//printf("ab %u %u %u %u %u %u, %f, %f\n", bx, by, tx, ty, a, b, Asub[tx][ty], Bsub[tx][ty]);
for (int i = 0; i < BLOCK_SIZE; i++){
Dsub += Asub[ty][i]*Bsub[i][tx];
}
__syncthreads();
}
if (by*BLOCK_SIZE+ty < hA && bx*BLOCK_SIZE+tx < wB){
int c_id = bx*BLOCK_SIZE*hA+by*BLOCK_SIZE+ty + tx*hA;
//printf("%u %u %u %u %f\n", bx, by, tx, ty, Dsub);
C[c_id] = alpha*Dsub + beta*C[c_id];
}
}
__global__
void gemm_gpu(double* A, double* B, double* C, int hA, int wA,
int hB, int wB, double alpha, double beta){
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by*BLOCK_SIZE+ty;
int col = bx*BLOCK_SIZE+tx;
int a_start = row;
int a_step = hA;
int b_start = col*hB;
double entry = 0;
if (row < hA && col < wB){
for (int i = 0; i < hB; i++){
entry += A[a_start+i*a_step]*B[b_start+i];
}
C[row+col*hA] = alpha*entry + beta*C[row+col*hA];
}
}
/*
Routine to perform an in-place GEMM operation, i.e., C := alpha*A*B + beta*C
*/
int myGEMM(double* __restrict__ A, double* __restrict__ B,
double* __restrict__ C, double* alpha, double* beta,
int M, int N, int K) {
/* TODO: Write an efficient GEMM implementation on GPU */
// int c_size = M*N*sizeof(double);
//
// double* Dd;
// cudaMalloc((void**)&Dd, c_size);
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(N/dimBlock.x+1, M/dimBlock.y+1);
gemm_gpu_fast<<<dimGrid, dimBlock>>>(A, B, C, M, K, K, N,
*alpha, *beta);
// cudaMemcpy(C, Dd, c_size, cudaMemcpyDeviceToDevice);
//
// cudaFree(Dd);
return 0;
}
__global__
void sigmoid_gpu(double* __restrict__ X, double* __restrict__ S, int M, int N){
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by*BLOCK_SIZE+ty;
int col = bx*BLOCK_SIZE+tx;
int id = col*M+row;
if (row < M && col < N){
//S[id] = 1.0/(1.0+exp(-X[id]));
if (S[id] >= 0){
S[id] = 1.0/(exp(-X[id])+1);
}else{
S[id] = exp(X[id])/(exp(X[id])+1);
}
}
}
int mySigmoid(double* __restrict__ X, double* __restrict__ S, int M, int N) {
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(N/dimBlock.x+1, M/dimBlock.y+1);
sigmoid_gpu<<<dimGrid, dimBlock>>>(X, S, M,N);
return 0;
}
__global__
void hadamard_gpu(double* __restrict__ X, double* __restrict__ Y, double* __restrict__ H, int M, int N){
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by*BLOCK_SIZE+ty;
int col = bx*BLOCK_SIZE+tx;
int id = col*M+row;
if (row < M && col < N){
H[id] = X[id]*Y[id];
}
}
int myHadamard(double* __restrict__ X, double* __restrict__ Y, double* __restrict__ H, int M, int N) {
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(N/dimBlock.x+1, M/dimBlock.y+1);
hadamard_gpu<<<dimGrid, dimBlock>>>(X, Y, H, M, N);
return 0;
}
__global__
void transpose_gpu(double* __restrict__ X, double* __restrict__ Xt, int M, int N){
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by*BLOCK_SIZE+ty;
int col = bx*BLOCK_SIZE+tx;
int id1 = col*M+row;
int id2 = row*N+col;
if (row < M && col < N){
Xt[id2] = X[id1];
}
}
int myTranspose(double* __restrict__ X, double* __restrict__ Xt, int M, int N) {
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(N/dimBlock.x+1, M/dimBlock.y+1);
transpose_gpu<<<dimGrid, dimBlock>>>(X, Xt, M, N);
return 0;
}
__global__
void matrix_add_gpu(double* __restrict__ X, double* __restrict__ Y, double* __restrict__ Z,
int M, int N, double alpha){
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by*BLOCK_SIZE+ty;
int col = bx*BLOCK_SIZE+tx;
int id = col*M+row;
if (row < M && col < N){
Z[id] = X[id]+alpha*Y[id];
}
}
int myMatAdd(double* __restrict__ X, double* __restrict__ Y, double* __restrict__ Z, int M, int N, double alpha) {
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(N/dimBlock.x+1, M/dimBlock.y+1);
matrix_add_gpu<<<dimGrid, dimBlock>>>(X, Y, Z, M, N, alpha);
return 0;
}
__global__
void row_sum_gpu(double* __restrict__ X, double* __restrict__ S,
int M, int N, double alpha){
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by*BLOCK_SIZE+ty;
int col = bx*BLOCK_SIZE+tx;
int id = col*M+row;
if (row < M && col < N){
S[id] = 0;
for(int i = row; i < M*N; i+=M){
S[id] += X[i];
}
S[id] *= alpha;
}
}
int myRowSum(double* __restrict__ X, double* __restrict__ S, int M, int N, double alpha) {
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(N/dimBlock.x+1, M/dimBlock.y+1);
row_sum_gpu<<<dimGrid, dimBlock>>>(X, S, M, N, alpha);
return 0;
}
__global__
void softmax_gpu(double* __restrict__ X, double* __restrict__ S, int M, int N){
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by*BLOCK_SIZE+ty;
int col = bx*BLOCK_SIZE+tx;
int id = col*M+row;
if (row < M && col < N){
double sum = 0;
for (int i = col*M; i<(col+1)*M; i++){
sum += exp(X[i]);
}
S[id] = exp(X[id])/sum;
}
}
int mySoftmax(double* __restrict__ X, double* __restrict__ S, int M, int N) {
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(N/dimBlock.x+1, M/dimBlock.y+1);
softmax_gpu<<<dimGrid, dimBlock>>>(X, S, M, N);
return 0;
}
__global__
void print_gpu(double* __restrict__ X, int M, int N, int m, int n){
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by*BLOCK_SIZE+ty;
int col = bx*BLOCK_SIZE+tx;
int id = col*M+row;
if (row < m && col < n){
printf("%u,%u,%u: %f \n",row, col, id, X[id]);
}
}
int myPrintMat(double* __restrict__ X, int M, int N, int m, int n) {
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid(N/dimBlock.x+1, M/dimBlock.y+1);
print_gpu<<<dimGrid, dimBlock>>>(X, M, N, m, n);
return 0;
}
|
917fcc252d5fce418584131775f7ad949099159e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*/
#include <assert.h>
#include <cutil_inline.h>
#include <math_functions.h>
// #include <vector>
////////////////////////////////////////////////////////////////////////////////
// Convolution kernel storage
////////////////////////////////////////////////////////////////////////////////
__constant__ float c_Kernel_h[100];
__constant__ float c_Kernel_v[100];
__constant__ float c_Kernel_d[100];
extern "C" void setConvolutionKernel_horizontal(float *h_Kernel, int kernel_length){
hipMemcpyToSymbol(c_Kernel_h, h_Kernel, kernel_length * sizeof(float));
}
extern "C" void setConvolutionKernel_vertical(float *h_Kernel, int kernel_length){
hipMemcpyToSymbol(c_Kernel_v, h_Kernel, kernel_length * sizeof(float));
}
extern "C" void setConvolutionKernel_depth(float *h_Kernel, int kernel_length){
hipMemcpyToSymbol(c_Kernel_d, h_Kernel, kernel_length * sizeof(float));
}
////////////////////////////////////////////////////////////////////////////////
// Constants
////////////////////////////////////////////////////////////////////////////////
#define ROWS_BLOCKDIM_X 16
#define ROWS_BLOCKDIM_Y 16
#define ROWS_RESULT_STEPS 8
#define ROWS_HALO_STEPS 3
#define COLUMNS_BLOCKDIM_X 16
#define COLUMNS_BLOCKDIM_Y 16
#define COLUMNS_RESULT_STEPS 8
#define COLUMNS_HALO_STEPS 3
#define DEPTH_BLOCKDIM_Y 16
#define DEPTH_BLOCKDIM_Z 16
#define DEPTH_RESULT_STEPS 4
#define DEPTH_HALO_STEPS 3
////////////////////////////////////////////////////////////////////////////////
// Row convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionRowsKernel(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int imageD,
int kernel_radius
){
__shared__ float s_Data[ROWS_BLOCKDIM_Y]
[(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X];
//Offset to the left halo edge
int n_blocks_per_row = imageW/(ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X);
int basez = floor(float(blockIdx.x)/n_blocks_per_row);
int blockx = blockIdx.x - basez*n_blocks_per_row;
// const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) *
// ROWS_BLOCKDIM_X + threadIdx.x;
const int baseX = (blockx * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) *
ROWS_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y;
d_Src += basez*imageW*imageH + baseY * imageW + baseX;
d_Dst += basez*imageW*imageH + baseY * imageW + baseX;
//Main data
#pragma unroll
for(int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++)
s_Data[threadIdx.y]
[threadIdx.x + i * ROWS_BLOCKDIM_X]
= d_Src[i * ROWS_BLOCKDIM_X];
//Left halo
for(int i = 0; i < ROWS_HALO_STEPS; i++){
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] =
(baseX >= -i * ROWS_BLOCKDIM_X ) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Right halo
for(int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS;
i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++){
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] =
(imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Compute and store results
__syncthreads();
// #pragma unroll
for(int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++){
float sum = 0;
#pragma unroll
for(int j = -kernel_radius; j <= kernel_radius; j++)
sum += c_Kernel_h[kernel_radius - j] *
s_Data [threadIdx.y]
[threadIdx.x + i * ROWS_BLOCKDIM_X + j];
d_Dst[i * ROWS_BLOCKDIM_X] = sum;
}
}
extern "C" void convolutionRowsGPU(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int imageD,
int kernel_radius
){
assert( ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= kernel_radius );
//There is a rational division of the image into blocks
assert( imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0 );
assert( imageH % ROWS_BLOCKDIM_Y == 0 );
dim3 blocks(imageD*(imageW / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X)),
imageH / ROWS_BLOCKDIM_Y);
dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y);
hipLaunchKernelGGL(( convolutionRowsKernel), dim3(blocks), dim3(threads), 0, 0,
// &d_Dst[i*imageH*imageW],
// &d_Src[i*imageH*imageW],
d_Dst,
d_Src,
imageW,
imageH,
imageD,
kernel_radius
);
cutilCheckMsg("convolutionRowsKernel() execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Column convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionColumnsKernel(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch,
int kernel_radius
){
__shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1];
int n_blocks_per_column = imageH/(COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y);
int basez = floor(float(blockIdx.y)/n_blocks_per_column);
int blocky = blockIdx.y - basez*n_blocks_per_column;
//Offset to the upper halo edge
const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x;
const int baseY = (blocky * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y;
d_Src += basez*imageH*imageW + baseY * imageH + baseX;
d_Dst += basez*imageH*imageW + baseY * imageH + baseX;
//Main data
#pragma unroll
for(int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch];
//Upper halo
for(int i = 0; i < COLUMNS_HALO_STEPS; i++)
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] =
(baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
//Lower halo
for(int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++)
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] =
(imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
//Compute and store results
__syncthreads();
// #pragma unroll
for(int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++){
float sum = 0;
#pragma unroll
for(int j = -kernel_radius; j <= kernel_radius; j++)
sum += c_Kernel_v[kernel_radius - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j];
d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum;
}
}
extern "C" void convolutionColumnsGPU(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int imageD,
int kernel_radius
){
assert( COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= kernel_radius );
assert( imageW % COLUMNS_BLOCKDIM_X == 0 );
assert( imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0 );
dim3 blocks(imageW / COLUMNS_BLOCKDIM_X, imageD * imageH / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y));
dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y);
hipLaunchKernelGGL(( convolutionColumnsKernel), dim3(blocks), dim3(threads), 0, 0,
d_Dst,
d_Src,
imageW,
imageH,
imageW,
kernel_radius
);
cutilCheckMsg("convolutionColumnsKernel() execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Depth convolution filter - Really naive implementation
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionDepthKernel(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int imageD,
int kernel_radius
){
__shared__ float s_Data[DEPTH_BLOCKDIM_Y]
[(DEPTH_RESULT_STEPS + 2 * DEPTH_HALO_STEPS) * DEPTH_BLOCKDIM_Z];
//Offset to the left halo edge
int n_blocks_per_depth = imageD / (DEPTH_RESULT_STEPS * DEPTH_BLOCKDIM_Z);
int basex = floor(float(blockIdx.x)/n_blocks_per_depth);
int blockz = blockIdx.x - basex*n_blocks_per_depth;
const int baseZ = (blockz * DEPTH_RESULT_STEPS - DEPTH_HALO_STEPS)*DEPTH_BLOCKDIM_Z +
threadIdx.x;
const int baseY = blockIdx.y * DEPTH_BLOCKDIM_Y + threadIdx.y;
//Put the pointers to the beginning of the data
d_Src += baseZ * imageW * imageH + baseY * imageW + basex;
d_Dst += baseZ * imageW * imageH + baseY * imageW + basex;
// //Main data
#pragma unroll
for(int i = DEPTH_HALO_STEPS; i < DEPTH_HALO_STEPS + DEPTH_RESULT_STEPS; i++)
s_Data[threadIdx.y]
[threadIdx.x + i * DEPTH_BLOCKDIM_Z]
= d_Src[i * DEPTH_BLOCKDIM_Z * imageH * imageW];
//Left halo
for(int i = 0; i < DEPTH_HALO_STEPS; i++){
s_Data[threadIdx.y][threadIdx.x + i * DEPTH_BLOCKDIM_Z] =
(baseZ >= -i * DEPTH_BLOCKDIM_Z ) ?
d_Src[i * DEPTH_BLOCKDIM_Z * imageH * imageW] : 0;
}
// Right halo
for(int i = DEPTH_HALO_STEPS + DEPTH_RESULT_STEPS;
i < DEPTH_HALO_STEPS + DEPTH_RESULT_STEPS + DEPTH_HALO_STEPS; i++){
s_Data[threadIdx.y][threadIdx.x + i * DEPTH_BLOCKDIM_Z] =
(imageD - baseZ > i * DEPTH_BLOCKDIM_Z ) ?
d_Src[i * DEPTH_BLOCKDIM_Z * imageH * imageW] : 0;
}
// //Compute and store results
__syncthreads();
#pragma unroll
for(int i = DEPTH_HALO_STEPS; i < DEPTH_HALO_STEPS + DEPTH_RESULT_STEPS; i++){
float sum = 0;
#pragma unroll
for(int j = -kernel_radius; j <= kernel_radius; j++)
sum += c_Kernel_d[kernel_radius - j] *
s_Data [threadIdx.y]
[threadIdx.x + i * DEPTH_BLOCKDIM_Z + j];
d_Dst[i * DEPTH_BLOCKDIM_Z * imageH * imageW] = sum;
}
}
extern "C" void convolutionDepthGPU(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int imageD,
int kernel_radius
){
assert( DEPTH_BLOCKDIM_Z * DEPTH_HALO_STEPS >= kernel_radius );
//There is a rational division of the image into blocks
assert( imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0 );
assert( imageH % ROWS_BLOCKDIM_Y == 0 );
dim3 blocks(imageW*imageD / (DEPTH_RESULT_STEPS * DEPTH_BLOCKDIM_Z),
imageH / DEPTH_BLOCKDIM_Y);
dim3 threads(DEPTH_BLOCKDIM_Z, DEPTH_BLOCKDIM_Y);
// for(int x = 0; x < imageW; x++)
hipLaunchKernelGGL(( convolutionDepthKernel), dim3(blocks), dim3(threads), 0, 0,
d_Dst,
d_Src,
imageW,
imageH,
imageD,
kernel_radius
);
cutilCheckMsg("convolutionRowsKernel() execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Computes the higher eigenvalue of the hessian
////////////////////////////////////////////////////////////////////////////////
__device__ float computeDeterminant
(float e00, float e01, float e02,
float e10, float e11, float e12,
float e20, float e21, float e22)
{
return e00*e11*e22-e00*e12*e21+e10*e21*e02-e10*e01*e22+e20*e01*e12-e20*e11*e02;
}
__global__ void hessianKernel
(
float *d_output,
float *d_gxx,
float *d_gxy,
float *d_gxz,
float *d_gyy,
float *d_gyz,
float *d_gzz,
float sigma,
int imageW,
int imageH,
int imageD
)
{
int n_blocks_per_width = imageW/blockDim.x;
int z = (int)ceilf(blockIdx.x/n_blocks_per_width);
int y = blockIdx.y*blockDim.y + threadIdx.y;
int x = (blockIdx.x - z*n_blocks_per_width)*blockDim.x + threadIdx.x;
int i = z*imageW*imageH + y*imageW + x;
// // //Brute force eigen-values computation
float a0, b0, c0, e0, f0, k0;
a0 = -d_gxx[i]; b0 = -d_gxy[i]; c0 = -d_gxz[i];
e0 = -d_gyy[i]; f0 = -d_gyz[i]; k0 = -d_gzz[i];
// http://en.wikipedia.org/wiki/Eigenvalue_algorithm
//Oliver K. Smith: Eigenvalues of a symmetric 3 3 matrix. Commun. ACM 4(4): 168 (1961)
float m = (a0+e0+k0)/3;
float q = computeDeterminant
(a0-m, b0, c0, b0, e0-m, f0, c0, f0, k0-m)/2;
float p = (a0-m)*(a0-m) + b0*b0 + c0*c0 + b0*b0 + (e0-m)*(e0-m) +
f0*f0 + c0*c0 + f0*f0 + (k0-m)*(k0-m);
p = p / 6;
float phi = 1/3*atan(sqrt(p*p*p-q*q)/q);
if(phi<0)
phi=phi+3.14159/3;
float eig1 = m + 2*sqrt(p)*cos(phi);
float eig2 = m - sqrt(p)*(cos(phi) + sqrt(3.0)*sin(phi));
float eig3 = m - sqrt(p)*(cos(phi) - sqrt(3.0)*sin(phi));
if( (eig1 > eig2) & (eig1 > eig3))
d_output[i] = eig1/sigma;
if( (eig2 > eig1) & (eig2 > eig3))
d_output[i] = eig2/sigma;
if( (eig3 > eig2) & (eig3 > eig1))
d_output[i] = eig3/sigma;
}
extern "C" void hessianGPU
(
float *d_output,
float *d_gxx,
float *d_gxy,
float *d_gxz,
float *d_gyy,
float *d_gyz,
float *d_gzz,
float sigma,
int imageW,
int imageH,
int imageD
)
{
dim3 gird (imageD*imageW/ROWS_BLOCKDIM_X,imageH/ROWS_BLOCKDIM_Y);
dim3 block(ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y);
hipLaunchKernelGGL(( hessianKernel), dim3(gird), dim3(block), 0, 0, d_output, d_gxx, d_gxy, d_gxz,
d_gyy, d_gyz, d_gzz, sigma, imageW, imageH, imageD );
cutilCheckMsg("hessianKernel() execution failed\n");
}
/*********************************************************************************
** hessian con orientacion
********************************************************************************/
__global__ void hessianKernelO
(
float *d_output,
float *d_output_theta,
float *d_output_phi,
float *d_gxx,
float *d_gxy,
float *d_gxz,
float *d_gyy,
float *d_gyz,
float *d_gzz,
float sigma,
int imageW,
int imageH,
int imageD
)
{
int n_blocks_per_width = imageW/blockDim.x;
int z = (int)ceilf(blockIdx.x/n_blocks_per_width);
int y = blockIdx.y*blockDim.y + threadIdx.y;
int x = (blockIdx.x - z*n_blocks_per_width)*blockDim.x + threadIdx.x;
int i = z*imageW*imageH + y*imageW + x;
// // //Brute force eigen-values computation
// http://en.wikipedia.org/wiki/Eigenvalue_algorithm
//Oliver K. Smith: Eigenvalues of a symmetric 3 3 matrix. Commun. ACM 4(4): 168 (1961)
float a0, b0, c0, d0, e0, f0;
a0 = -d_gxx[i]; b0 = -d_gxy[i]; c0 = -d_gxz[i];
d0 = -d_gyy[i]; e0 = -d_gyz[i]; f0 = -d_gzz[i];
float m = (a0+d0+f0)/3;
float q = computeDeterminant
(a0-m, b0, c0, b0, d0-m, e0, c0, e0, f0-m)/2;
float p = (a0-m)*(a0-m) + b0*b0 + c0*c0 + b0*b0 + (d0-m)*(d0-m) +
e0*e0 + c0*c0 + e0*e0 + (f0-m)*(f0-m);
p = p / 6;
float phi = 1/3*atan(sqrt(p*p*p-q*q)/q);
if(phi<0)
phi=phi+3.14159/3;
float eig1 = m + 2*sqrt(p)*cos(phi);
float eig2 = m - sqrt(p)*(cos(phi) + sqrt(3.0)*sin(phi));
float eig3 = m - sqrt(p)*(cos(phi) - sqrt(3.0)*sin(phi));
if( (eig1 > eig2) & (eig1 > eig3))
d_output[i] = eig1*sigma*sigma;
if( (eig2 > eig1) & (eig2 > eig3))
d_output[i] = eig2*sigma*sigma;
if( (eig3 > eig2) & (eig3 > eig1))
d_output[i] = eig3*sigma*sigma;
// // Now it comes to compute the eigenvector
float l = d_output[i]/(sigma*sigma);
a0 = a0 - l;
d0 = d0 - l;
f0 = f0 - l;
float xv = b0*e0 - c0*d0;
float yv = e0*a0 - c0*b0;
float zv = d0*a0 - b0*b0;
float radius = sqrt(xv*xv+yv*yv+zv*zv);
float thetav = atan2(yv, xv);
float phiv = 0;
if(radius > 1e-6)
phiv = acos( zv/radius);
d_output_theta[i] = thetav;
d_output_phi[i] = phiv;
}
extern "C" void hessianGPU_orientation
(
float *d_Output,
float *d_Output_theta,
float *d_Output_phi,
float *d_gxx,
float *d_gxy,
float *d_gxz,
float *d_gyy,
float *d_gyz,
float *d_gzz,
float sigma,
int imageW,
int imageH,
int imageD
)
{
dim3 gird (imageD*imageW/ROWS_BLOCKDIM_X,imageH/ROWS_BLOCKDIM_Y);
dim3 block(ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y);
hipLaunchKernelGGL(( hessianKernelO), dim3(gird), dim3(block), 0, 0, d_Output, d_Output_theta, d_Output_phi,
d_gxx, d_gxy, d_gxz,
d_gyy, d_gyz, d_gzz,
sigma, imageW, imageH, imageD );
cutilCheckMsg("hessianKernel() execution failed\n");
cutilCheckMsg("hessianKernel() execution failed\n");
}
//////////////////////////////////////////////////////////////////////
// Maxes and things alike
//////////////////////////////////////////////////////////////////////
__global__ void putKernel(
float *d_output,
float value,
int imageW,
int imageH,
int imageD
){
int n_blocks_per_width = imageW/blockDim.x;
int z = (int)ceilf(blockIdx.x/n_blocks_per_width);
int y = blockIdx.y*blockDim.y + threadIdx.y;
int x = (blockIdx.x - z*n_blocks_per_width)*blockDim.x + threadIdx.x;
int i = z*imageW*imageH + y*imageW + x;
d_output[i] = value;
}
extern "C" void putGPU
(
float *d_Output,
float value,
int imageW,
int imageH,
int imageD
)
{
dim3 gird (imageD*imageW/ROWS_BLOCKDIM_X,imageH/ROWS_BLOCKDIM_Y);
dim3 block(ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y);
hipLaunchKernelGGL(( putKernel), dim3(gird), dim3(block), 0, 0, d_Output, value,
imageW, imageH, imageD );
cutilCheckMsg("putKernel() execution failed\n");
}
__global__ void maxKernel(
float *d_output,
float* d_value,
int imageW,
int imageH,
int imageD
){
int n_blocks_per_width = imageW/blockDim.x;
int z = (int)ceilf(blockIdx.x/n_blocks_per_width);
int y = blockIdx.y*blockDim.y + threadIdx.y;
int x = (blockIdx.x - z*n_blocks_per_width)*blockDim.x + threadIdx.x;
int i = z*imageW*imageH + y*imageW + x;
// if(d_value[i] > d_output[i])
d_output[i] = d_value[i];
}
extern "C" void maxGPU
(
float *d_Output,
float *d_isMaximumThanOutput,
int imageW,
int imageH,
int imageD
)
{
dim3 gird (imageD*imageW/ROWS_BLOCKDIM_X,imageH/ROWS_BLOCKDIM_Y);
dim3 block(ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y);
hipLaunchKernelGGL(( maxKernel), dim3(gird), dim3(block), 0, 0, d_Output, d_isMaximumThanOutput,
imageW, imageH, imageD );
cutilCheckMsg("putKernel() execution failed\n");
}
| 917fcc252d5fce418584131775f7ad949099159e.cu | /*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*/
#include <assert.h>
#include <cutil_inline.h>
#include <math_functions.h>
// #include <vector>
////////////////////////////////////////////////////////////////////////////////
// Convolution kernel storage
////////////////////////////////////////////////////////////////////////////////
__constant__ float c_Kernel_h[100];
__constant__ float c_Kernel_v[100];
__constant__ float c_Kernel_d[100];
extern "C" void setConvolutionKernel_horizontal(float *h_Kernel, int kernel_length){
cudaMemcpyToSymbol(c_Kernel_h, h_Kernel, kernel_length * sizeof(float));
}
extern "C" void setConvolutionKernel_vertical(float *h_Kernel, int kernel_length){
cudaMemcpyToSymbol(c_Kernel_v, h_Kernel, kernel_length * sizeof(float));
}
extern "C" void setConvolutionKernel_depth(float *h_Kernel, int kernel_length){
cudaMemcpyToSymbol(c_Kernel_d, h_Kernel, kernel_length * sizeof(float));
}
////////////////////////////////////////////////////////////////////////////////
// Constants
////////////////////////////////////////////////////////////////////////////////
#define ROWS_BLOCKDIM_X 16
#define ROWS_BLOCKDIM_Y 16
#define ROWS_RESULT_STEPS 8
#define ROWS_HALO_STEPS 3
#define COLUMNS_BLOCKDIM_X 16
#define COLUMNS_BLOCKDIM_Y 16
#define COLUMNS_RESULT_STEPS 8
#define COLUMNS_HALO_STEPS 3
#define DEPTH_BLOCKDIM_Y 16
#define DEPTH_BLOCKDIM_Z 16
#define DEPTH_RESULT_STEPS 4
#define DEPTH_HALO_STEPS 3
////////////////////////////////////////////////////////////////////////////////
// Row convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionRowsKernel(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int imageD,
int kernel_radius
){
__shared__ float s_Data[ROWS_BLOCKDIM_Y]
[(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X];
//Offset to the left halo edge
int n_blocks_per_row = imageW/(ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X);
int basez = floor(float(blockIdx.x)/n_blocks_per_row);
int blockx = blockIdx.x - basez*n_blocks_per_row;
// const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) *
// ROWS_BLOCKDIM_X + threadIdx.x;
const int baseX = (blockx * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) *
ROWS_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y;
d_Src += basez*imageW*imageH + baseY * imageW + baseX;
d_Dst += basez*imageW*imageH + baseY * imageW + baseX;
//Main data
#pragma unroll
for(int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++)
s_Data[threadIdx.y]
[threadIdx.x + i * ROWS_BLOCKDIM_X]
= d_Src[i * ROWS_BLOCKDIM_X];
//Left halo
for(int i = 0; i < ROWS_HALO_STEPS; i++){
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] =
(baseX >= -i * ROWS_BLOCKDIM_X ) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Right halo
for(int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS;
i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++){
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] =
(imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Compute and store results
__syncthreads();
// #pragma unroll
for(int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++){
float sum = 0;
#pragma unroll
for(int j = -kernel_radius; j <= kernel_radius; j++)
sum += c_Kernel_h[kernel_radius - j] *
s_Data [threadIdx.y]
[threadIdx.x + i * ROWS_BLOCKDIM_X + j];
d_Dst[i * ROWS_BLOCKDIM_X] = sum;
}
}
extern "C" void convolutionRowsGPU(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int imageD,
int kernel_radius
){
assert( ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= kernel_radius );
//There is a rational division of the image into blocks
assert( imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0 );
assert( imageH % ROWS_BLOCKDIM_Y == 0 );
dim3 blocks(imageD*(imageW / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X)),
imageH / ROWS_BLOCKDIM_Y);
dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y);
convolutionRowsKernel<<<blocks, threads>>>(
// &d_Dst[i*imageH*imageW],
// &d_Src[i*imageH*imageW],
d_Dst,
d_Src,
imageW,
imageH,
imageD,
kernel_radius
);
cutilCheckMsg("convolutionRowsKernel() execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Column convolution filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionColumnsKernel(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch,
int kernel_radius
){
__shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1];
int n_blocks_per_column = imageH/(COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y);
int basez = floor(float(blockIdx.y)/n_blocks_per_column);
int blocky = blockIdx.y - basez*n_blocks_per_column;
//Offset to the upper halo edge
const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x;
const int baseY = (blocky * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y;
d_Src += basez*imageH*imageW + baseY * imageH + baseX;
d_Dst += basez*imageH*imageW + baseY * imageH + baseX;
//Main data
#pragma unroll
for(int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch];
//Upper halo
for(int i = 0; i < COLUMNS_HALO_STEPS; i++)
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] =
(baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
//Lower halo
for(int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++)
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] =
(imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
//Compute and store results
__syncthreads();
// #pragma unroll
for(int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++){
float sum = 0;
#pragma unroll
for(int j = -kernel_radius; j <= kernel_radius; j++)
sum += c_Kernel_v[kernel_radius - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j];
d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum;
}
}
extern "C" void convolutionColumnsGPU(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int imageD,
int kernel_radius
){
assert( COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= kernel_radius );
assert( imageW % COLUMNS_BLOCKDIM_X == 0 );
assert( imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0 );
dim3 blocks(imageW / COLUMNS_BLOCKDIM_X, imageD * imageH / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y));
dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y);
convolutionColumnsKernel<<<blocks, threads>>>(
d_Dst,
d_Src,
imageW,
imageH,
imageW,
kernel_radius
);
cutilCheckMsg("convolutionColumnsKernel() execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Depth convolution filter - Really naive implementation
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionDepthKernel(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int imageD,
int kernel_radius
){
__shared__ float s_Data[DEPTH_BLOCKDIM_Y]
[(DEPTH_RESULT_STEPS + 2 * DEPTH_HALO_STEPS) * DEPTH_BLOCKDIM_Z];
//Offset to the left halo edge
int n_blocks_per_depth = imageD / (DEPTH_RESULT_STEPS * DEPTH_BLOCKDIM_Z);
int basex = floor(float(blockIdx.x)/n_blocks_per_depth);
int blockz = blockIdx.x - basex*n_blocks_per_depth;
const int baseZ = (blockz * DEPTH_RESULT_STEPS - DEPTH_HALO_STEPS)*DEPTH_BLOCKDIM_Z +
threadIdx.x;
const int baseY = blockIdx.y * DEPTH_BLOCKDIM_Y + threadIdx.y;
//Put the pointers to the beginning of the data
d_Src += baseZ * imageW * imageH + baseY * imageW + basex;
d_Dst += baseZ * imageW * imageH + baseY * imageW + basex;
// //Main data
#pragma unroll
for(int i = DEPTH_HALO_STEPS; i < DEPTH_HALO_STEPS + DEPTH_RESULT_STEPS; i++)
s_Data[threadIdx.y]
[threadIdx.x + i * DEPTH_BLOCKDIM_Z]
= d_Src[i * DEPTH_BLOCKDIM_Z * imageH * imageW];
//Left halo
for(int i = 0; i < DEPTH_HALO_STEPS; i++){
s_Data[threadIdx.y][threadIdx.x + i * DEPTH_BLOCKDIM_Z] =
(baseZ >= -i * DEPTH_BLOCKDIM_Z ) ?
d_Src[i * DEPTH_BLOCKDIM_Z * imageH * imageW] : 0;
}
// Right halo
for(int i = DEPTH_HALO_STEPS + DEPTH_RESULT_STEPS;
i < DEPTH_HALO_STEPS + DEPTH_RESULT_STEPS + DEPTH_HALO_STEPS; i++){
s_Data[threadIdx.y][threadIdx.x + i * DEPTH_BLOCKDIM_Z] =
(imageD - baseZ > i * DEPTH_BLOCKDIM_Z ) ?
d_Src[i * DEPTH_BLOCKDIM_Z * imageH * imageW] : 0;
}
// //Compute and store results
__syncthreads();
#pragma unroll
for(int i = DEPTH_HALO_STEPS; i < DEPTH_HALO_STEPS + DEPTH_RESULT_STEPS; i++){
float sum = 0;
#pragma unroll
for(int j = -kernel_radius; j <= kernel_radius; j++)
sum += c_Kernel_d[kernel_radius - j] *
s_Data [threadIdx.y]
[threadIdx.x + i * DEPTH_BLOCKDIM_Z + j];
d_Dst[i * DEPTH_BLOCKDIM_Z * imageH * imageW] = sum;
}
}
extern "C" void convolutionDepthGPU(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int imageD,
int kernel_radius
){
assert( DEPTH_BLOCKDIM_Z * DEPTH_HALO_STEPS >= kernel_radius );
//There is a rational division of the image into blocks
assert( imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0 );
assert( imageH % ROWS_BLOCKDIM_Y == 0 );
dim3 blocks(imageW*imageD / (DEPTH_RESULT_STEPS * DEPTH_BLOCKDIM_Z),
imageH / DEPTH_BLOCKDIM_Y);
dim3 threads(DEPTH_BLOCKDIM_Z, DEPTH_BLOCKDIM_Y);
// for(int x = 0; x < imageW; x++)
convolutionDepthKernel<<<blocks, threads>>>(
d_Dst,
d_Src,
imageW,
imageH,
imageD,
kernel_radius
);
cutilCheckMsg("convolutionRowsKernel() execution failed\n");
}
////////////////////////////////////////////////////////////////////////////////
// Computes the higher eigenvalue of the hessian
////////////////////////////////////////////////////////////////////////////////
__device__ float computeDeterminant
(float e00, float e01, float e02,
float e10, float e11, float e12,
float e20, float e21, float e22)
{
return e00*e11*e22-e00*e12*e21+e10*e21*e02-e10*e01*e22+e20*e01*e12-e20*e11*e02;
}
__global__ void hessianKernel
(
float *d_output,
float *d_gxx,
float *d_gxy,
float *d_gxz,
float *d_gyy,
float *d_gyz,
float *d_gzz,
float sigma,
int imageW,
int imageH,
int imageD
)
{
int n_blocks_per_width = imageW/blockDim.x;
int z = (int)ceilf(blockIdx.x/n_blocks_per_width);
int y = blockIdx.y*blockDim.y + threadIdx.y;
int x = (blockIdx.x - z*n_blocks_per_width)*blockDim.x + threadIdx.x;
int i = z*imageW*imageH + y*imageW + x;
// // //Brute force eigen-values computation
float a0, b0, c0, e0, f0, k0;
a0 = -d_gxx[i]; b0 = -d_gxy[i]; c0 = -d_gxz[i];
e0 = -d_gyy[i]; f0 = -d_gyz[i]; k0 = -d_gzz[i];
// http://en.wikipedia.org/wiki/Eigenvalue_algorithm
//Oliver K. Smith: Eigenvalues of a symmetric 3 × 3 matrix. Commun. ACM 4(4): 168 (1961)
float m = (a0+e0+k0)/3;
float q = computeDeterminant
(a0-m, b0, c0, b0, e0-m, f0, c0, f0, k0-m)/2;
float p = (a0-m)*(a0-m) + b0*b0 + c0*c0 + b0*b0 + (e0-m)*(e0-m) +
f0*f0 + c0*c0 + f0*f0 + (k0-m)*(k0-m);
p = p / 6;
float phi = 1/3*atan(sqrt(p*p*p-q*q)/q);
if(phi<0)
phi=phi+3.14159/3;
float eig1 = m + 2*sqrt(p)*cos(phi);
float eig2 = m - sqrt(p)*(cos(phi) + sqrt(3.0)*sin(phi));
float eig3 = m - sqrt(p)*(cos(phi) - sqrt(3.0)*sin(phi));
if( (eig1 > eig2) & (eig1 > eig3))
d_output[i] = eig1/sigma;
if( (eig2 > eig1) & (eig2 > eig3))
d_output[i] = eig2/sigma;
if( (eig3 > eig2) & (eig3 > eig1))
d_output[i] = eig3/sigma;
}
extern "C" void hessianGPU
(
float *d_output,
float *d_gxx,
float *d_gxy,
float *d_gxz,
float *d_gyy,
float *d_gyz,
float *d_gzz,
float sigma,
int imageW,
int imageH,
int imageD
)
{
dim3 gird (imageD*imageW/ROWS_BLOCKDIM_X,imageH/ROWS_BLOCKDIM_Y);
dim3 block(ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y);
hessianKernel<<<gird, block>>>( d_output, d_gxx, d_gxy, d_gxz,
d_gyy, d_gyz, d_gzz, sigma, imageW, imageH, imageD );
cutilCheckMsg("hessianKernel() execution failed\n");
}
/*********************************************************************************
** hessian con orientacion
********************************************************************************/
__global__ void hessianKernelO
(
float *d_output,
float *d_output_theta,
float *d_output_phi,
float *d_gxx,
float *d_gxy,
float *d_gxz,
float *d_gyy,
float *d_gyz,
float *d_gzz,
float sigma,
int imageW,
int imageH,
int imageD
)
{
int n_blocks_per_width = imageW/blockDim.x;
int z = (int)ceilf(blockIdx.x/n_blocks_per_width);
int y = blockIdx.y*blockDim.y + threadIdx.y;
int x = (blockIdx.x - z*n_blocks_per_width)*blockDim.x + threadIdx.x;
int i = z*imageW*imageH + y*imageW + x;
// // //Brute force eigen-values computation
// http://en.wikipedia.org/wiki/Eigenvalue_algorithm
//Oliver K. Smith: Eigenvalues of a symmetric 3 × 3 matrix. Commun. ACM 4(4): 168 (1961)
float a0, b0, c0, d0, e0, f0;
a0 = -d_gxx[i]; b0 = -d_gxy[i]; c0 = -d_gxz[i];
d0 = -d_gyy[i]; e0 = -d_gyz[i]; f0 = -d_gzz[i];
float m = (a0+d0+f0)/3;
float q = computeDeterminant
(a0-m, b0, c0, b0, d0-m, e0, c0, e0, f0-m)/2;
float p = (a0-m)*(a0-m) + b0*b0 + c0*c0 + b0*b0 + (d0-m)*(d0-m) +
e0*e0 + c0*c0 + e0*e0 + (f0-m)*(f0-m);
p = p / 6;
float phi = 1/3*atan(sqrt(p*p*p-q*q)/q);
if(phi<0)
phi=phi+3.14159/3;
float eig1 = m + 2*sqrt(p)*cos(phi);
float eig2 = m - sqrt(p)*(cos(phi) + sqrt(3.0)*sin(phi));
float eig3 = m - sqrt(p)*(cos(phi) - sqrt(3.0)*sin(phi));
if( (eig1 > eig2) & (eig1 > eig3))
d_output[i] = eig1*sigma*sigma;
if( (eig2 > eig1) & (eig2 > eig3))
d_output[i] = eig2*sigma*sigma;
if( (eig3 > eig2) & (eig3 > eig1))
d_output[i] = eig3*sigma*sigma;
// // Now it comes to compute the eigenvector
float l = d_output[i]/(sigma*sigma);
a0 = a0 - l;
d0 = d0 - l;
f0 = f0 - l;
float xv = b0*e0 - c0*d0;
float yv = e0*a0 - c0*b0;
float zv = d0*a0 - b0*b0;
float radius = sqrt(xv*xv+yv*yv+zv*zv);
float thetav = atan2(yv, xv);
float phiv = 0;
if(radius > 1e-6)
phiv = acos( zv/radius);
d_output_theta[i] = thetav;
d_output_phi[i] = phiv;
}
extern "C" void hessianGPU_orientation
(
float *d_Output,
float *d_Output_theta,
float *d_Output_phi,
float *d_gxx,
float *d_gxy,
float *d_gxz,
float *d_gyy,
float *d_gyz,
float *d_gzz,
float sigma,
int imageW,
int imageH,
int imageD
)
{
dim3 gird (imageD*imageW/ROWS_BLOCKDIM_X,imageH/ROWS_BLOCKDIM_Y);
dim3 block(ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y);
hessianKernelO<<<gird, block>>>( d_Output, d_Output_theta, d_Output_phi,
d_gxx, d_gxy, d_gxz,
d_gyy, d_gyz, d_gzz,
sigma, imageW, imageH, imageD );
cutilCheckMsg("hessianKernel() execution failed\n");
cutilCheckMsg("hessianKernel() execution failed\n");
}
//////////////////////////////////////////////////////////////////////
// Maxes and things alike
//////////////////////////////////////////////////////////////////////
__global__ void putKernel(
float *d_output,
float value,
int imageW,
int imageH,
int imageD
){
int n_blocks_per_width = imageW/blockDim.x;
int z = (int)ceilf(blockIdx.x/n_blocks_per_width);
int y = blockIdx.y*blockDim.y + threadIdx.y;
int x = (blockIdx.x - z*n_blocks_per_width)*blockDim.x + threadIdx.x;
int i = z*imageW*imageH + y*imageW + x;
d_output[i] = value;
}
extern "C" void putGPU
(
float *d_Output,
float value,
int imageW,
int imageH,
int imageD
)
{
dim3 gird (imageD*imageW/ROWS_BLOCKDIM_X,imageH/ROWS_BLOCKDIM_Y);
dim3 block(ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y);
putKernel<<<gird, block>>>( d_Output, value,
imageW, imageH, imageD );
cutilCheckMsg("putKernel() execution failed\n");
}
__global__ void maxKernel(
float *d_output,
float* d_value,
int imageW,
int imageH,
int imageD
){
int n_blocks_per_width = imageW/blockDim.x;
int z = (int)ceilf(blockIdx.x/n_blocks_per_width);
int y = blockIdx.y*blockDim.y + threadIdx.y;
int x = (blockIdx.x - z*n_blocks_per_width)*blockDim.x + threadIdx.x;
int i = z*imageW*imageH + y*imageW + x;
// if(d_value[i] > d_output[i])
d_output[i] = d_value[i];
}
extern "C" void maxGPU
(
float *d_Output,
float *d_isMaximumThanOutput,
int imageW,
int imageH,
int imageD
)
{
dim3 gird (imageD*imageW/ROWS_BLOCKDIM_X,imageH/ROWS_BLOCKDIM_Y);
dim3 block(ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y);
maxKernel<<<gird, block>>>( d_Output, d_isMaximumThanOutput,
imageW, imageH, imageD );
cutilCheckMsg("putKernel() execution failed\n");
}
|
a4d913dc7c995426ca8b8a817931283d1ece37e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/common/data_type.h"
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/kernel/util/cuda_half_util.h"
namespace oneflow {
namespace {
template<typename T>
__global__ void TrilGpu(const int64_t elem_cnt, const int64_t num_rows, const int64_t num_cols,
const int64_t diagonal, const T* x, const T fill, T* y) {
const int64_t matrix_size = num_rows * num_cols;
CUDA_1D_KERNEL_LOOP_T(int64_t, k, elem_cnt) {
const int64_t offset_in_matrix = k % matrix_size;
const int64_t i = offset_in_matrix / num_cols;
const int64_t j = offset_in_matrix - num_cols * i;
y[k] = j > i + diagonal ? fill : x[k];
}
}
template<typename T>
__global__ void TrilWarpProcessRowGpu(const int64_t total_rows, const int64_t num_rows,
const int64_t num_cols, const int64_t diagonal, const T* x,
const T fill, T* y) {
const int64_t warp_id = (blockIdx.x * blockDim.x + threadIdx.x) / kCudaWarpSize;
const int64_t lan_id = threadIdx.x % kCudaWarpSize;
const int64_t num_warp = blockDim.x * gridDim.x / kCudaWarpSize;
for (int64_t i = warp_id; i < total_rows; i += num_warp) {
const int64_t row = i % num_rows;
for (int64_t col = lan_id; col < num_cols; col += kCudaWarpSize) {
const int64_t idx = i * num_cols + col;
y[idx] = col > row + diagonal ? fill : x[idx];
}
}
}
template<>
__global__ void TrilWarpProcessRowGpu<half>(const int64_t total_rows, const int64_t num_rows,
const int64_t num_cols, const int64_t diagonal,
const half* x, const half fill, half* y) {
const int64_t h2_num_cols = num_cols / 2;
const auto* x_h2 = reinterpret_cast<const half2*>(x);
auto* y_h2 = reinterpret_cast<half2*>(y);
const int64_t warp_id = (blockIdx.x * blockDim.x + threadIdx.x) / kCudaWarpSize;
const int64_t lan_id = threadIdx.x % kCudaWarpSize;
const int64_t num_warp = blockDim.x * gridDim.x / kCudaWarpSize;
for (int64_t i = warp_id; i < total_rows; i += num_warp) {
const int64_t row = i % num_rows;
for (int64_t col = lan_id; col < h2_num_cols; col += kCudaWarpSize) {
const int64_t idx = i * h2_num_cols + col;
const half2 x_val = x_h2[idx];
half2 y_val;
y_val.x = (2 * col) > row + diagonal ? fill : x_val.x;
y_val.y = (2 * col + 1) > row + diagonal ? fill : x_val.y;
y_h2[idx] = y_val;
}
}
}
template<typename T>
__global__ void FusedScaleTrilGpu(const int64_t elem_cnt, const int64_t num_rows,
const int64_t num_cols, const int64_t diagonal, const T scale,
const T* x, const T fill, T* y) {
const int64_t matrix_size = num_rows * num_cols;
CUDA_1D_KERNEL_LOOP_T(int64_t, k, elem_cnt) {
const int64_t offset_in_matrix = k % matrix_size;
const int64_t i = offset_in_matrix / num_cols;
const int64_t j = offset_in_matrix - num_cols * i;
y[k] = j > i + diagonal ? fill : (scale * x[k]);
}
}
template<typename T>
__global__ void FusedScaleTrilWarpProcessRowGpu(const int64_t total_rows, const int64_t num_rows,
const int64_t num_cols, const int64_t diagonal,
const T scale, const T* x, const T fill, T* y) {
const int64_t warp_id = (blockIdx.x * blockDim.x + threadIdx.x) / kCudaWarpSize;
const int64_t lan_id = threadIdx.x % kCudaWarpSize;
const int64_t num_warp = blockDim.x * gridDim.x / kCudaWarpSize;
for (int64_t i = warp_id; i < total_rows; i += num_warp) {
const int64_t row = i % num_rows;
for (int64_t col = lan_id; col < num_cols; col += kCudaWarpSize) {
const int64_t idx = i * num_cols + col;
y[idx] = col > row + diagonal ? fill : (scale * x[idx]);
}
}
}
template<>
__global__ void FusedScaleTrilWarpProcessRowGpu<half>(const int64_t total_rows,
const int64_t num_rows,
const int64_t num_cols,
const int64_t diagonal, const half scale,
const half* x, const half fill, half* y) {
const int64_t h2_num_cols = num_cols / 2;
const auto* x_h2 = reinterpret_cast<const half2*>(x);
auto* y_h2 = reinterpret_cast<half2*>(y);
const half2 h2_scale = __half2half2(scale);
const int64_t warp_id = (blockIdx.x * blockDim.x + threadIdx.x) / kCudaWarpSize;
const int64_t lan_id = threadIdx.x % kCudaWarpSize;
const int64_t num_warp = blockDim.x * gridDim.x / kCudaWarpSize;
for (int64_t i = warp_id; i < total_rows; i += num_warp) {
const int64_t row = i % num_rows;
for (int64_t col = lan_id; col < h2_num_cols; col += kCudaWarpSize) {
const int64_t idx = i * h2_num_cols + col;
const half2 scaled_x = __hmul2(h2_scale, x_h2[idx]);
half2 y_val;
y_val.x = (2 * col) > row + diagonal ? fill : scaled_x.x;
y_val.y = (2 * col + 1) > row + diagonal ? fill : scaled_x.y;
y_h2[idx] = y_val;
}
}
}
template<typename T>
T GetAttrVal(bool is_floating_val, double floating_value, int64_t integer_value) {
return is_floating_val ? static_cast<T>(floating_value) : static_cast<T>(integer_value);
}
template<>
half GetAttrVal<half>(bool is_floating_val, double floating_value, int64_t integer_value) {
return is_floating_val ? __float2half(floating_value) : __float2half(integer_value);
}
} // namespace
template<typename T>
class GpuTrilKernel final : public user_op::OpKernel {
public:
GpuTrilKernel() = default;
~GpuTrilKernel() override = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("in", 0);
const auto shape = x->shape();
const auto diagonal = ctx->Attr<int64_t>("diagonal");
const int64_t num_rows = shape.At(shape.NumAxes() - 2);
const int64_t num_cols = shape.At(shape.NumAxes() - 1);
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("out", 0);
const int32_t elem_cnt = shape.elem_cnt();
const T fill = GetAttrVal<T>(ctx->Attr<bool>("is_floating_fill_value"),
ctx->Attr<double>("floating_fill_value"),
ctx->Attr<int64_t>("integer_fill_value"));
if (num_cols % (kCudaWarpSize * 2) == 0) {
const int64_t total_rows = elem_cnt / num_cols;
hipLaunchKernelGGL(( TrilWarpProcessRowGpu), dim3(BlocksNum4ThreadsNum(total_rows * kCudaWarpSize)),
dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(),
total_rows, num_rows, num_cols, diagonal, x->dptr<T>(), fill, y->mut_dptr<T>());
} else {
hipLaunchKernelGGL(( TrilGpu), dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->device_ctx()->cuda_stream(), elem_cnt, num_rows, num_cols, diagonal,
x->dptr<T>(), fill, y->mut_dptr<T>());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_GPU_TRIL_KERNEL(dtype) \
REGISTER_USER_KERNEL("tril") \
.SetCreateFn<GpuTrilKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("out", 0) == GetDataType<dtype>::value)) \
.SetInplaceProposalFn([](const user_op::InferContext&, \
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, true)); \
return Maybe<void>::Ok(); \
});
REGISTER_GPU_TRIL_KERNEL(float)
REGISTER_GPU_TRIL_KERNEL(double)
REGISTER_GPU_TRIL_KERNEL(int8_t)
REGISTER_GPU_TRIL_KERNEL(int32_t)
REGISTER_GPU_TRIL_KERNEL(int64_t)
REGISTER_GPU_TRIL_KERNEL(half)
template<typename T>
class GpuFusedScaleTrilKernel final : public user_op::OpKernel {
public:
GpuFusedScaleTrilKernel() = default;
~GpuFusedScaleTrilKernel() override = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("in", 0);
const auto shape = x->shape();
const auto diagonal = ctx->Attr<int64_t>("diagonal");
const int32_t num_rows = shape.At(shape.NumAxes() - 2);
const int32_t num_cols = shape.At(shape.NumAxes() - 1);
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("out", 0);
const int32_t elem_cnt = shape.elem_cnt();
const T fill = GetAttrVal<T>(ctx->Attr<bool>("is_floating_fill_value"),
ctx->Attr<double>("floating_fill_value"),
ctx->Attr<int64_t>("integer_fill_value"));
const T scale = GetAttrVal<T>(ctx->Attr<bool>("is_floating_scale_value"),
ctx->Attr<double>("floating_scale_value"),
ctx->Attr<int64_t>("integer_scale_value"));
if (num_cols % (kCudaWarpSize * 2) == 0) {
const int64_t total_rows = elem_cnt / num_cols;
hipLaunchKernelGGL(( FusedScaleTrilWarpProcessRowGpu), dim3(BlocksNum4ThreadsNum(total_rows * kCudaWarpSize)),
dim3(kCudaThreadsNumPerBlock), 0,
ctx->device_ctx()->cuda_stream(),
total_rows, num_rows, num_cols, diagonal, scale, x->dptr<T>(), fill, y->mut_dptr<T>());
} else {
hipLaunchKernelGGL(( FusedScaleTrilGpu), dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->device_ctx()->cuda_stream(),
elem_cnt, num_rows, num_cols, diagonal, scale, x->dptr<T>(), fill, y->mut_dptr<T>());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(dtype) \
REGISTER_USER_KERNEL("fused_scale_tril") \
.SetCreateFn<GpuFusedScaleTrilKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("out", 0) == GetDataType<dtype>::value)) \
.SetInplaceProposalFn([](const user_op::InferContext&, \
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, true)); \
return Maybe<void>::Ok(); \
});
REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(float)
REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(double)
REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(int8_t)
REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(int32_t)
REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(int64_t)
REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(half)
} // namespace oneflow
| a4d913dc7c995426ca8b8a817931283d1ece37e4.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/common/data_type.h"
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/kernel/util/cuda_half_util.h"
namespace oneflow {
namespace {
template<typename T>
__global__ void TrilGpu(const int64_t elem_cnt, const int64_t num_rows, const int64_t num_cols,
const int64_t diagonal, const T* x, const T fill, T* y) {
const int64_t matrix_size = num_rows * num_cols;
CUDA_1D_KERNEL_LOOP_T(int64_t, k, elem_cnt) {
const int64_t offset_in_matrix = k % matrix_size;
const int64_t i = offset_in_matrix / num_cols;
const int64_t j = offset_in_matrix - num_cols * i;
y[k] = j > i + diagonal ? fill : x[k];
}
}
template<typename T>
__global__ void TrilWarpProcessRowGpu(const int64_t total_rows, const int64_t num_rows,
const int64_t num_cols, const int64_t diagonal, const T* x,
const T fill, T* y) {
const int64_t warp_id = (blockIdx.x * blockDim.x + threadIdx.x) / kCudaWarpSize;
const int64_t lan_id = threadIdx.x % kCudaWarpSize;
const int64_t num_warp = blockDim.x * gridDim.x / kCudaWarpSize;
for (int64_t i = warp_id; i < total_rows; i += num_warp) {
const int64_t row = i % num_rows;
for (int64_t col = lan_id; col < num_cols; col += kCudaWarpSize) {
const int64_t idx = i * num_cols + col;
y[idx] = col > row + diagonal ? fill : x[idx];
}
}
}
template<>
__global__ void TrilWarpProcessRowGpu<half>(const int64_t total_rows, const int64_t num_rows,
const int64_t num_cols, const int64_t diagonal,
const half* x, const half fill, half* y) {
const int64_t h2_num_cols = num_cols / 2;
const auto* x_h2 = reinterpret_cast<const half2*>(x);
auto* y_h2 = reinterpret_cast<half2*>(y);
const int64_t warp_id = (blockIdx.x * blockDim.x + threadIdx.x) / kCudaWarpSize;
const int64_t lan_id = threadIdx.x % kCudaWarpSize;
const int64_t num_warp = blockDim.x * gridDim.x / kCudaWarpSize;
for (int64_t i = warp_id; i < total_rows; i += num_warp) {
const int64_t row = i % num_rows;
for (int64_t col = lan_id; col < h2_num_cols; col += kCudaWarpSize) {
const int64_t idx = i * h2_num_cols + col;
const half2 x_val = x_h2[idx];
half2 y_val;
y_val.x = (2 * col) > row + diagonal ? fill : x_val.x;
y_val.y = (2 * col + 1) > row + diagonal ? fill : x_val.y;
y_h2[idx] = y_val;
}
}
}
template<typename T>
__global__ void FusedScaleTrilGpu(const int64_t elem_cnt, const int64_t num_rows,
const int64_t num_cols, const int64_t diagonal, const T scale,
const T* x, const T fill, T* y) {
const int64_t matrix_size = num_rows * num_cols;
CUDA_1D_KERNEL_LOOP_T(int64_t, k, elem_cnt) {
const int64_t offset_in_matrix = k % matrix_size;
const int64_t i = offset_in_matrix / num_cols;
const int64_t j = offset_in_matrix - num_cols * i;
y[k] = j > i + diagonal ? fill : (scale * x[k]);
}
}
template<typename T>
__global__ void FusedScaleTrilWarpProcessRowGpu(const int64_t total_rows, const int64_t num_rows,
const int64_t num_cols, const int64_t diagonal,
const T scale, const T* x, const T fill, T* y) {
const int64_t warp_id = (blockIdx.x * blockDim.x + threadIdx.x) / kCudaWarpSize;
const int64_t lan_id = threadIdx.x % kCudaWarpSize;
const int64_t num_warp = blockDim.x * gridDim.x / kCudaWarpSize;
for (int64_t i = warp_id; i < total_rows; i += num_warp) {
const int64_t row = i % num_rows;
for (int64_t col = lan_id; col < num_cols; col += kCudaWarpSize) {
const int64_t idx = i * num_cols + col;
y[idx] = col > row + diagonal ? fill : (scale * x[idx]);
}
}
}
template<>
__global__ void FusedScaleTrilWarpProcessRowGpu<half>(const int64_t total_rows,
const int64_t num_rows,
const int64_t num_cols,
const int64_t diagonal, const half scale,
const half* x, const half fill, half* y) {
const int64_t h2_num_cols = num_cols / 2;
const auto* x_h2 = reinterpret_cast<const half2*>(x);
auto* y_h2 = reinterpret_cast<half2*>(y);
const half2 h2_scale = __half2half2(scale);
const int64_t warp_id = (blockIdx.x * blockDim.x + threadIdx.x) / kCudaWarpSize;
const int64_t lan_id = threadIdx.x % kCudaWarpSize;
const int64_t num_warp = blockDim.x * gridDim.x / kCudaWarpSize;
for (int64_t i = warp_id; i < total_rows; i += num_warp) {
const int64_t row = i % num_rows;
for (int64_t col = lan_id; col < h2_num_cols; col += kCudaWarpSize) {
const int64_t idx = i * h2_num_cols + col;
const half2 scaled_x = __hmul2(h2_scale, x_h2[idx]);
half2 y_val;
y_val.x = (2 * col) > row + diagonal ? fill : scaled_x.x;
y_val.y = (2 * col + 1) > row + diagonal ? fill : scaled_x.y;
y_h2[idx] = y_val;
}
}
}
template<typename T>
T GetAttrVal(bool is_floating_val, double floating_value, int64_t integer_value) {
return is_floating_val ? static_cast<T>(floating_value) : static_cast<T>(integer_value);
}
template<>
half GetAttrVal<half>(bool is_floating_val, double floating_value, int64_t integer_value) {
return is_floating_val ? __float2half(floating_value) : __float2half(integer_value);
}
} // namespace
template<typename T>
class GpuTrilKernel final : public user_op::OpKernel {
public:
GpuTrilKernel() = default;
~GpuTrilKernel() override = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("in", 0);
const auto shape = x->shape();
const auto diagonal = ctx->Attr<int64_t>("diagonal");
const int64_t num_rows = shape.At(shape.NumAxes() - 2);
const int64_t num_cols = shape.At(shape.NumAxes() - 1);
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("out", 0);
const int32_t elem_cnt = shape.elem_cnt();
const T fill = GetAttrVal<T>(ctx->Attr<bool>("is_floating_fill_value"),
ctx->Attr<double>("floating_fill_value"),
ctx->Attr<int64_t>("integer_fill_value"));
if (num_cols % (kCudaWarpSize * 2) == 0) {
const int64_t total_rows = elem_cnt / num_cols;
TrilWarpProcessRowGpu<<<BlocksNum4ThreadsNum(total_rows * kCudaWarpSize),
kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>(
total_rows, num_rows, num_cols, diagonal, x->dptr<T>(), fill, y->mut_dptr<T>());
} else {
TrilGpu<<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0,
ctx->device_ctx()->cuda_stream()>>>(elem_cnt, num_rows, num_cols, diagonal,
x->dptr<T>(), fill, y->mut_dptr<T>());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_GPU_TRIL_KERNEL(dtype) \
REGISTER_USER_KERNEL("tril") \
.SetCreateFn<GpuTrilKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("out", 0) == GetDataType<dtype>::value)) \
.SetInplaceProposalFn([](const user_op::InferContext&, \
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, true)); \
return Maybe<void>::Ok(); \
});
REGISTER_GPU_TRIL_KERNEL(float)
REGISTER_GPU_TRIL_KERNEL(double)
REGISTER_GPU_TRIL_KERNEL(int8_t)
REGISTER_GPU_TRIL_KERNEL(int32_t)
REGISTER_GPU_TRIL_KERNEL(int64_t)
REGISTER_GPU_TRIL_KERNEL(half)
template<typename T>
class GpuFusedScaleTrilKernel final : public user_op::OpKernel {
public:
GpuFusedScaleTrilKernel() = default;
~GpuFusedScaleTrilKernel() override = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("in", 0);
const auto shape = x->shape();
const auto diagonal = ctx->Attr<int64_t>("diagonal");
const int32_t num_rows = shape.At(shape.NumAxes() - 2);
const int32_t num_cols = shape.At(shape.NumAxes() - 1);
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("out", 0);
const int32_t elem_cnt = shape.elem_cnt();
const T fill = GetAttrVal<T>(ctx->Attr<bool>("is_floating_fill_value"),
ctx->Attr<double>("floating_fill_value"),
ctx->Attr<int64_t>("integer_fill_value"));
const T scale = GetAttrVal<T>(ctx->Attr<bool>("is_floating_scale_value"),
ctx->Attr<double>("floating_scale_value"),
ctx->Attr<int64_t>("integer_scale_value"));
if (num_cols % (kCudaWarpSize * 2) == 0) {
const int64_t total_rows = elem_cnt / num_cols;
FusedScaleTrilWarpProcessRowGpu<<<BlocksNum4ThreadsNum(total_rows * kCudaWarpSize),
kCudaThreadsNumPerBlock, 0,
ctx->device_ctx()->cuda_stream()>>>(
total_rows, num_rows, num_cols, diagonal, scale, x->dptr<T>(), fill, y->mut_dptr<T>());
} else {
FusedScaleTrilGpu<<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0,
ctx->device_ctx()->cuda_stream()>>>(
elem_cnt, num_rows, num_cols, diagonal, scale, x->dptr<T>(), fill, y->mut_dptr<T>());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(dtype) \
REGISTER_USER_KERNEL("fused_scale_tril") \
.SetCreateFn<GpuFusedScaleTrilKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("out", 0) == GetDataType<dtype>::value)) \
.SetInplaceProposalFn([](const user_op::InferContext&, \
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, true)); \
return Maybe<void>::Ok(); \
});
REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(float)
REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(double)
REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(int8_t)
REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(int32_t)
REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(int64_t)
REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(half)
} // namespace oneflow
|
2991d964207449d45f69e581c7838628e51de023.hip | // !!! This is a file automatically generated by hipify!!!
//
// srt.cu - a single relaxation time, LB solution to Navier-Stokes
//
//
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <values.h>
#include <sys/time.h>
#include <signal.h>
#include <stdint.h>
#include <unistd.h>
#include "srt.h"
#include "srt_kernels.cu"
// CUDA global memory pointers:
float *f[2];
rrnode *dev_bounce;
unsigned char *dev_nclass;
// Large host arrays:
float *host_f, *vfield;
int *host_rindex;
rrnode *host_bounce = NULL;
// Class array:
unsigned char nclass[NODES];
// This holds the Maxwell-Boltzmann equilibrium for rho=1, u=(0.1,0,0).
double eqvnode[DIRECTIONS];
// This holds the Maxwell-Boltzmann equilibrium for rho=1, u=(0,0,0), which
// is just the link weights.
double eqznode[DIRECTIONS];
// Parameters
double max_target_dim = 40.0;
struct rvector override_scale = {0.0, 0.0, 102.4};
struct rvector target_xlate = {200.5, 112.5, -128.5};
double dot(struct rvector u, struct rvector v)
{
return(u.x*v.x + u.y*v.y + u.z*v.z);
}
double vlength(struct rvector u)
{
return(sqrt(dot(u,u)));
}
struct rvector cross(struct rvector a, struct rvector b)
{
struct rvector c;
c.x = a.y*b.z-a.z*b.y;
c.y = -a.x*b.z+a.z*b.x;
c.z = a.x*b.y-a.y*b.x;
return(c);
}
double host_dotci(int i,struct rvector u)
{
struct rvector ci;
ci.x = (double)(host_ci[i].x);
ci.y = (double)(host_ci[i].y);
ci.z = (double)(host_ci[i].z);
return(dot(ci,u));
}
double genrand()
{
return(((double)(random()+1))/2147483649.);
}
// This is not called until node classes are set by set_initial_nclass( ) and
// find_boundary_nodes( ).
void init_lattice()
{
int i,j,k,m,nc;
for(i=0;i<=WIDTH;i++){
for(j=0;j<=HEIGHT;j++){
for(k=0;k<=DEPTH;k++){
nc = nclass[cstore(i,j,k)];
if(nc==SOLID || nc==BOUNDARY){
for(m=0;m<DIRECTIONS;m++){
host_f[store(i,j,k,m)] = eqznode[m];
}
}
else {
for(m=0;m<DIRECTIONS;m++){
host_f[store(i,j,k,m)] = eqvnode[m]
+ NOISE*(2.0*genrand()-1.0);
}
}
}
}
}
}
void set_initial_nclass()
{
int i,j,k;
for(i=0;i<=WIDTH;i++){
for(j=0;j<=HEIGHT;j++){
for(k=0;k<=DEPTH;k++){
nclass[cstore(i,j,k)] = FREE;
}
}
}
// Overwrite front and back.
for(i=0;i<=WIDTH;i++){
for(j=0;j<=HEIGHT;j++){
nclass[cstore(i,j,0)] = BACK;
nclass[cstore(i,j,DEPTH)] = FRONT;
}
}
// Overwrite floor and roof.
for(i=0;i<=WIDTH;i++){
for(k=0;k<=DEPTH;k++){
nclass[cstore(i,0,k)] = FLOOR;
nclass[cstore(i,HEIGHT,k)] = ROOF;
}
}
// Overwrite input and exit.
for(j=0;j<=HEIGHT;j++){
for(k=0;k<=DEPTH;k++){
nclass[cstore(0,j,k)] = INPUT;
nclass[cstore(WIDTH,j,k)] = EXIT;
}
}
}
double get_equilibrium_f(double rho, struct rvector u, int m)
{
double ddot;
ddot = host_dotci(m,u);
return(host_link_weight[m]*rho*(1.0+3.0*ddot+4.5*ddot*ddot-1.5*dot(u,u)));
}
// Note that the .obj file must specify triangles.
struct tri *trilist;
struct rvector *vlist;
int hits(struct rvector sp, struct rvector dp, int idx, double *tptr,int *insp,
struct rvector *pnrml)
{
struct rvector a,b,c,d,nrml;
double t, u, v, base, vsize;
a.x = dp.x - sp.x;
a.y = dp.y - sp.y;
a.z = dp.z - sp.z;
b.x = trilist[idx].v2.x - sp.x;
b.y = trilist[idx].v2.y - sp.y;
b.z = trilist[idx].v2.z - sp.z;
c.x = trilist[idx].v2.x - trilist[idx].v0.x;
c.y = trilist[idx].v2.y - trilist[idx].v0.y;
c.z = trilist[idx].v2.z - trilist[idx].v0.z;
d.x = trilist[idx].v2.x - trilist[idx].v1.x;
d.y = trilist[idx].v2.y - trilist[idx].v1.y;
d.z = trilist[idx].v2.z - trilist[idx].v1.z;
nrml = cross(c,d);
base = dot(a,nrml);
if(fabs(base)<SMALL) return(0);
if(base > 0.0) *insp=1;
else *insp=0;
t = dot(b,nrml)/base;
if(t<SMALL) return(0);
u = dot(a,cross(b,d))/base;
v = dot(a,cross(c,b))/base;
if((u<0.0) || (v<0.0) || ((u+v)>1.0)) return(0);
vsize = vlength(nrml);
nrml.x /= vsize;
nrml.y /= vsize;
nrml.z /= vsize;
*pnrml = nrml;
*tptr = t;
return(1);
}
int find_boundary_nodes(int fcount)
{
int i, j, k, m, itri, nc, slot, xslot, new_winner;
int tx, ty, tz;
struct rvector sp, dp, nrml;
double del;
double minx, maxx, miny, maxy, minz, maxz;
int imin, imax, jmin, jmax, kmin, kmax;
int inside, revcount, rindex;
// Now determine, for each triangle, which links hit it, and mark those
// links as reverse directions. Note that node classification can change
// because a given node may be included in the rasterization of more than
// one triangle. The closest hit determines it.
//
// Since objects can push through walls, we also need to prevent
// rewriting the labels on wall nodes. It hoses the flood fill.
//
// Since we can't afford the space to store normals for every grid node,
// we have to do the compression here. This means we have to do two passes,
// where the first counts how many slots we'll need.
fprintf(stderr,"finding object boundary\n");
fprintf(stderr,"PASS 1\n");
slot = 0;
for(itri=0;itri<fcount;itri++){
if(itri%1000==0) fprintf(stderr,"%d/%d done\n",itri,fcount);
maxx = minx = trilist[itri].v0.x;
maxy = miny = trilist[itri].v0.y;
maxz = minz = trilist[itri].v0.z;
if(trilist[itri].v1.x > maxx) maxx = trilist[itri].v1.x;
if(trilist[itri].v1.x < minx) minx = trilist[itri].v1.x;
if(trilist[itri].v1.y > maxy) maxy = trilist[itri].v1.y;
if(trilist[itri].v1.y < miny) miny = trilist[itri].v1.y;
if(trilist[itri].v1.z > maxz) maxz = trilist[itri].v1.z;
if(trilist[itri].v1.z < minz) minz = trilist[itri].v1.z;
if(trilist[itri].v2.x > maxx) maxx = trilist[itri].v2.x;
if(trilist[itri].v2.x < minx) minx = trilist[itri].v2.x;
if(trilist[itri].v2.y > maxy) maxy = trilist[itri].v2.y;
if(trilist[itri].v2.y < miny) miny = trilist[itri].v2.y;
if(trilist[itri].v2.z > maxz) maxz = trilist[itri].v2.z;
if(trilist[itri].v2.z < minz) minz = trilist[itri].v2.z;
imax = (int)(maxx+SR3+1);
jmax = (int)(maxy+SR3+1);
kmax = (int)(maxz+SR3+1);
imin = (int)(minx-SR3-1);
jmin = (int)(miny-SR3-1);
kmin = (int)(minz-SR3-1);
for(i=imin;i<=imax;i++){
for(j=jmin;j<=jmax;j++){
for(k=kmin;k<=kmax;k++){
if(!LEGAL(i,j,k)) continue;
// Most classes are not set yet.
// Here we want only class FREE.
nc = nclass[cstore(i,j,k)];
if(nc!=FREE) continue;
sp.x = (double)(i);
sp.y = (double)(j);
sp.z = (double)(k);
for(m=1;m<DIRECTIONS;m++){
tx = i+host_ci[m].x;
ty = j+host_ci[m].y;
tz = k+host_ci[m].z;
dp.x = (double)(tx);
dp.y = (double)(ty);
dp.z = (double)(tz);
if(hits(sp,dp,itri,&del,&inside,&nrml)){
if(del>1.0) continue; // Hit beyond link.
if((rindex=host_rindex[cstore(i,j,k)])==-1){
// New slot needed.
host_rindex[cstore(i,j,k)] = slot++;
break;
}
}
}
}
}
}
}
revcount = slot;
host_bounce = (rrnode *)calloc(revcount,sizeof(rrnode));
// Set all links to non-broken.
for(slot=0;slot<revcount;slot++) {
for(m=0;m<DIRECTIONS;m++){
host_bounce[slot].del[m] = -1.0;
}
}
// Reset reverse index.
for(i=0;i<NODES;i++) host_rindex[i] = -1;
fprintf(stderr,"PASS 2\n");
slot = 0;
for(itri=0;itri<fcount;itri++){
if(itri%1000==0) fprintf(stderr,"%d/%d done\n",itri,fcount);
maxx = minx = trilist[itri].v0.x;
maxy = miny = trilist[itri].v0.y;
maxz = minz = trilist[itri].v0.z;
if(trilist[itri].v1.x > maxx) maxx = trilist[itri].v1.x;
if(trilist[itri].v1.x < minx) minx = trilist[itri].v1.x;
if(trilist[itri].v1.y > maxy) maxy = trilist[itri].v1.y;
if(trilist[itri].v1.y < miny) miny = trilist[itri].v1.y;
if(trilist[itri].v1.z > maxz) maxz = trilist[itri].v1.z;
if(trilist[itri].v1.z < minz) minz = trilist[itri].v1.z;
if(trilist[itri].v2.x > maxx) maxx = trilist[itri].v2.x;
if(trilist[itri].v2.x < minx) minx = trilist[itri].v2.x;
if(trilist[itri].v2.y > maxy) maxy = trilist[itri].v2.y;
if(trilist[itri].v2.y < miny) miny = trilist[itri].v2.y;
if(trilist[itri].v2.z > maxz) maxz = trilist[itri].v2.z;
if(trilist[itri].v2.z < minz) minz = trilist[itri].v2.z;
imax = (int)(maxx+SR3+1);
jmax = (int)(maxy+SR3+1);
kmax = (int)(maxz+SR3+1);
imin = (int)(minx-SR3-1);
jmin = (int)(miny-SR3-1);
kmin = (int)(minz-SR3-1);
for(i=imin;i<=imax;i++){
for(j=jmin;j<=jmax;j++){
for(k=kmin;k<=kmax;k++){
if(!LEGAL(i,j,k)) continue;
nc = nclass[cstore(i,j,k)];
if((nc!=FREE)&&(nc!=BOUNDARY)&&(nc!=FFLOW)) continue;
sp.x = (double)(i);
sp.y = (double)(j);
sp.z = (double)(k);
for(m=1;m<DIRECTIONS;m++){
tx = i+host_ci[m].x;
ty = j+host_ci[m].y;
tz = k+host_ci[m].z;
dp.x = (double)(tx);
dp.y = (double)(ty);
dp.z = (double)(tz);
new_winner = 0;
if(hits(sp,dp,itri,&del,&inside,&nrml)){
if(del>1.0) continue;
// 1 ray unit != 1 grid unit. Convert to grid distance.
del *= host_link_length[m];
if((rindex=host_rindex[cstore(i,j,k)])==-1){
// We've not seen this node.
host_bounce[slot].i = i;
host_bounce[slot].j = j;
host_bounce[slot].k = k;
host_bounce[slot].del[m] = del;
host_bounce[slot].nrml[m] = nrml;
// Overall winner in direction slot 0.
host_bounce[slot].del[0] = del;
host_rindex[cstore(i,j,k)] = slot++;
new_winner = 1;
}
else {
// We have seen this node, but maybe not
// this direction.
if (host_bounce[rindex].del[m]<0.0 ||
del<host_bounce[rindex].del[m]){
host_bounce[rindex].del[m] = del;
host_bounce[rindex].nrml[m] = nrml;
if (del<host_bounce[rindex].del[0]){
host_bounce[rindex].del[0] = del;
new_winner = 1;
}
}
}
if(new_winner){
// We have to reclassify (perhaps) on
// each hit.
if(inside) nclass[cstore(i,j,k)] = BOUNDARY;
else nclass[cstore(i,j,k)] = FFLOW;
}
}
}
}
}
}
}
// Now go back and remove broken links from BOUNDARY or FFLOW
// nodes that don't reach FFLOW nodes, since such links are not used
// in the bounce( ) kernel.
for(slot=0;slot<revcount;slot++){
i = host_bounce[slot].i;
j = host_bounce[slot].j;
k = host_bounce[slot].k;
nc = nclass[cstore(i,j,k)];
for(m=1;m<DIRECTIONS;m++){
tx = i+host_ci[m].x;
ty = j+host_ci[m].y;
tz = k+host_ci[m].z;
if((nc==BOUNDARY || nc==FFLOW) &&
nclass[cstore(tx,ty,tz)]!=FFLOW){
host_bounce[slot].del[m] = -1.0;
}
}
}
// Do we have any broken links left?
for(slot=0;slot<revcount;slot++){
minx = LARGE;
for(m=1;m<DIRECTIONS;m++){
if((del=host_bounce[slot].del[m])>0.0 && del<minx) minx=del;
}
if(minx<LARGE) host_bounce[slot].del[0] = minx;
else host_bounce[slot].del[0] = -1.0;
}
// Finally, squeeze host_bounce[ ] list down.
slot=0; xslot=0;
while(xslot<revcount){
if(host_bounce[xslot].del[0]>0.0){
host_bounce[slot].i = host_bounce[xslot].i;
host_bounce[slot].j = host_bounce[xslot].j;
host_bounce[slot].k = host_bounce[xslot].k;
for(m=0;m<DIRECTIONS;m++){
host_bounce[slot].del[m] = host_bounce[xslot].del[m];
host_bounce[slot].nrml[m] = host_bounce[xslot].nrml[m];
}
slot++;
}
xslot++;
}
revcount = slot;
return(revcount);
}
void host_get_u(float *hf,int i,int j,int k,double *rptr, struct rvector *uptr)
{
int m;
double rho = 0.0;
struct rvector mo, u;
for(m=0;m<DIRECTIONS;m++) rho += (double)(hf[store(i,j,k,m)]);
mo.x = mo.y = mo.z = 0.0;
for(m=0;m<DIRECTIONS;m++){
mo.x += host_ci[m].x*(double)(hf[store(i,j,k,m)]);
mo.y += host_ci[m].y*(double)(hf[store(i,j,k,m)]);
mo.z += host_ci[m].z*(double)(hf[store(i,j,k,m)]);
}
u.x = mo.x/rho;
u.y = mo.y/rho;
u.z = mo.z/rho;
*rptr = rho;
*uptr = u;
}
float ReverseEndian(float val)
{
uint32_t ivalue = htobe32(*(uint32_t *)(&val));
return(*(float *)&ivalue);
}
void write_vtk_header(FILE* fptr, char* buf)
{
fprintf(fptr,"# vtk DataFile Version 2.0\n");
fprintf(fptr,"%s\n", buf);
fprintf(fptr,"BINARY\n");
fprintf(fptr,"DATASET STRUCTURED_POINTS\n");
// fprintf(fptr,"DIMENSIONS %d %d %d\n", WIDTH+1, HEIGHT+1, DEPTH+1);
fprintf(fptr,"DIMENSIONS %d %d %d\n", WIDTH+1, HEIGHT+1, 1);
fprintf(fptr,"SPACING 1 1 1\n");
fprintf(fptr,"ORIGIN 0 0 0\n");
fprintf(fptr,"POINT_DATA %d\n", SLICENODES);
fprintf(fptr,"VECTORS vfield float\n");
}
void save_velocity_field(int iteration)
{
int i,j;
FILE *fptr;
char buf[256];
double rho;
struct rvector u;
float *vptr;
hipMemcpy(&host_f[0],&f[iteration%2][0],FLOWS*sizeof(float),hipMemcpyDefault);
hipDeviceSynchronize();
sprintf(buf,"%s/vfield.%d.vtk","vdir",iteration);
fptr=fopen(buf,"w");
write_vtk_header(fptr,buf);
vptr = vfield;
for(j=0;j<=HEIGHT;j++){
for(i=0;i<=WIDTH;i++){
host_get_u(host_f,i,j,SLICE,&rho,&u);
*vptr++ = ReverseEndian((float)(u.x));
*vptr++ = ReverseEndian((float)(u.y));
*vptr++ = ReverseEndian((float)(u.z));
}
}
fwrite(vfield,SLICENODES,3*sizeof(float),fptr);
fclose(fptr);
}
void cleanup(int signum)
{
hipFree(f[0]);
hipFree(f[1]);
hipFree(dev_bounce);
hipFree(dev_nclass);
hipDeviceReset();
exit(0);
}
void go(int rvcount)
{
int t, from, to, pad;
dim3 stream_lws(1,4,32);
dim3 stream_ws(WIDTH,HEIGHT/4,DEPTH/32);
dim3 cascade_lws(1,4,32);
dim3 cascade_ws(WIDTH,HEIGHT/4,DEPTH/32);
pad = (rvcount/LWS+1);
to = 0;
from = 1;
for(t=1;t<=FINAL_TIME;t++){
hipDeviceSynchronize();
hipLaunchKernelGGL(( cascade), dim3(cascade_ws),dim3(cascade_lws), 0, 0, f[to],f[from],dev_nclass);
hipMemcpy(f[from],f[to],FLOWS*sizeof(float),hipMemcpyDefault);
// This one has a 1D grid:
hipLaunchKernelGGL(( bounce), dim3(pad),dim3(LWS), 0, 0, f[to],f[from],dev_bounce,dev_nclass,rvcount);
hipLaunchKernelGGL(( stream), dim3(stream_ws),dim3(stream_lws), 0, 0, f[from],f[to],dev_nclass);
if(t>=V_DUMP_START && (t%V_DUMP_INTERVAL)==0){
save_velocity_field(t);
}
}
}
void set_eqvalues(double rho, struct rvector u)
{
int m;
for(m=0;m<DIRECTIONS;m++) {
eqvnode[m] = get_equilibrium_f(rho,u,m);
eqznode[m] = host_link_weight[m];
}
}
void buffers(int revcount)
{
long long bytes = 0;
hipError_t err;
err = hipMalloc(&f[0],FLOWS*sizeof(float));
if(!(err==hipSuccess)) fprintf(stderr,"hipMalloc f[0] failed\n");
hipMemcpy(f[0],&host_f[0],FLOWS*sizeof(float),hipMemcpyDefault);
err = hipMalloc(&f[1],FLOWS*sizeof(float));
if(!(err==hipSuccess)) fprintf(stderr,"hipMalloc f[1] failed\n");
hipMemcpy(f[1],&host_f[0],FLOWS*sizeof(float), hipMemcpyDefault);
bytes += 2*((long long)(FLOWS*sizeof(float)));
err = hipMalloc(&dev_bounce,revcount*sizeof(rrnode));
if(!(err==hipSuccess)) fprintf(stderr,"hipMalloc dev_bounce failed\n");
hipMemcpy(dev_bounce,host_bounce,revcount*sizeof(rrnode),hipMemcpyDefault);
bytes += (revcount)*sizeof(rrnode);
err = hipMalloc(&dev_nclass,NODES*sizeof(unsigned char));
if(!(err==hipSuccess)) fprintf(stderr,"hipMalloc dev_nclass failed\n");
hipMemcpy(dev_nclass,nclass,NODES*sizeof(unsigned char),hipMemcpyDefault);
bytes += NODES*sizeof(unsigned char);
fprintf(stderr,"total allocated card memory: %lld\n",bytes);
}
void host_arrays()
{
int i;
host_f = (float *)calloc(FLOWS,sizeof(float));
if(host_f == NULL) fprintf(stderr,"oops\n");
host_rindex = (int *)calloc(NODES,sizeof(int));
if(host_rindex == NULL) fprintf(stderr,"oops\n");
for(i=0;i<NODES;i++) host_rindex[i] = -1;
vfield = (float *)calloc(NODES*3,sizeof(float));
if(vfield == NULL) fprintf(stderr,"oops\n");
}
// This loads an obj file, computes a bounding box, and places the bounding box
// within the grid using a scale and a translate. The .obj file is of the
// stripped-down variety, i.e., no material library,
// no normal indices, no texture indices, just vertices and faces.
int parse_geometry(char *filename)
{
char buf[512];
int vcount, fcount, iv0, iv1, iv2;
double minx, maxx, miny, maxy, minz, maxz;
double mdatadim, mtargetdim, x, y, z;
double xscale, yscale, zscale;
FILE *fptr;
fptr = fopen(filename,"r");
vcount = fcount = 0;
minx = miny = minz = 1000000000.0;
maxx = maxy = maxz = -1000000000.0;
while(fgets(buf,512,fptr)>0){
if(buf[0]=='v'){
vcount++;
// Caution: engineers think z (last coordinate) is up.
sscanf(buf,"v %lf %lf %lf",&x,&y,&z);
if(x<minx) minx = x;
if(y<miny) miny = y;
if(z<minz) minz = z;
if(x>maxx) maxx = x;
if(y>maxy) maxy = y;
if(z>maxz) maxz = z;
}
else {
if(buf[0]=='f') fcount++;
}
}
fprintf(stderr,"min %f %f %f\n",minx,miny,minz);
fprintf(stderr,"max %f %f %f\n",maxx,maxy,maxz);
// Enforce non-zero dimensions.
if(minx>=maxx) maxx += SMALL;
if(miny>=maxy) maxy += SMALL;
if(minz>=maxz) maxz += SMALL;
// Now scale and translate so that the data fits into the grid.
xscale = yscale = zscale = -1.0;
if(override_scale.x==0.0) xscale = maxx-minx;
if(override_scale.y==0.0) yscale = maxy-miny;
if(override_scale.z==0.0) zscale = maxz-minz;
mdatadim = xscale;
if(yscale>mdatadim) mdatadim = yscale;
if(zscale>mdatadim) mdatadim = zscale;
mtargetdim = max_target_dim;
xscale = yscale = zscale = mtargetdim/mdatadim;
if(override_scale.x>0.0) xscale = override_scale.x;
if(override_scale.y>0.0) yscale = override_scale.y;
if(override_scale.z>0.0) zscale = override_scale.z;
fprintf(stderr,"scales are %f %f %f\n",xscale,yscale,zscale);
vlist = (struct rvector *)calloc(vcount+1,sizeof(struct rvector));
trilist = (struct tri *)calloc(fcount,sizeof(struct tri));
rewind(fptr);
vcount = 0;
fcount = 0;
// This assumes all vertices appear before all faces.
while(fgets(buf,512,fptr)>0){
if(buf[0]=='v'){
vcount++;
sscanf(buf,"v %lf %lf %lf",&x,&y,&z);
vlist[vcount].x = xscale*(x - minx) + target_xlate.x;
vlist[vcount].y = yscale*(y - miny) + target_xlate.y;
vlist[vcount].z = zscale*(z - minz) + target_xlate.z;
}
else {
if(buf[0]=='f'){
sscanf(buf,"f %d %d %d",&iv0,&iv1,&iv2);
trilist[fcount].v0 = vlist[iv0];
trilist[fcount].v1 = vlist[iv1];
trilist[fcount].v2 = vlist[iv2];
fcount++;
}
}
}
fclose(fptr);
return(fcount);
}
int qtop;
struct ivector *ffq;
#define FFQSIZE 10000000
void qpush(int i, int j, int k)
{
struct ivector val;
val.x = i;
val.y = j;
val.z = k;
if(qtop==FFQSIZE-1){
fprintf(stderr,"queue overflow\n");
exit(1);
}
ffq[++qtop] = val;
}
struct ivector qpop()
{
return(ffq[qtop--]);
}
void qprocess()
{
int nc,i,j,k;
struct ivector val;
while(qtop>0){
val = qpop();
i = val.x;
j = val.y;
k = val.z;
if(!LEGAL(i,j,k)) {
fprintf(stderr,"eek %d %d %d\n",i,j,k);
exit(1);
}
nc = nclass[cstore(i,j,k)];
if(nc!=FREE) continue;
nclass[cstore(i,j,k)] = SOLID;
qpush(i+1,j,k);
qpush(i,j+1,k);
qpush(i,j,k+1);
qpush(i-1,j,k);
qpush(i,j-1,k);
qpush(i,j,k-1);
}
}
void flood_fill()
{
// Mark all nodes inside BOUNDARY nodes as SOLID nodes.
// We need to find one to start.
int i,j,k;
qtop = -1;
ffq = (struct ivector *)calloc(FFQSIZE,sizeof(struct ivector));
for(i=0;i<WIDTH;i++){
for(j=0;j<HEIGHT;j++){
for(k=0;k<DEPTH;k++){
if(nclass[cstore(i,j,k)]==BOUNDARY){
qpush(i+1,j,k);
qpush(i,j+1,k);
qpush(i,j,k+1);
qpush(i-1,j,k);
qpush(i,j-1,k);
qpush(i,j,k-1);
qprocess();
}
}
}
}
free(ffq);
return;
}
int main(int argc, char **argv)
{
struct rvector u = {0.1,0.0,0.0};
double rho = 1.0;
int revcount, tricount;
srandom(123456789);
signal(SIGUSR1,cleanup);
host_arrays();
set_eqvalues(rho,u);
set_initial_nclass();
tricount = parse_geometry(argv[1]);
fprintf(stderr,"tricount is %d\n",tricount);
revcount = find_boundary_nodes(tricount);
flood_fill();
init_lattice();
fprintf(stderr,"allocating buffers\n");
buffers(revcount);
go(revcount);
cleanup(SIGUSR1);
return(0);
}
| 2991d964207449d45f69e581c7838628e51de023.cu | //
// srt.cu - a single relaxation time, LB solution to Navier-Stokes
//
//
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <values.h>
#include <sys/time.h>
#include <signal.h>
#include <stdint.h>
#include <unistd.h>
#include "srt.h"
#include "srt_kernels.cu"
// CUDA global memory pointers:
float *f[2];
rrnode *dev_bounce;
unsigned char *dev_nclass;
// Large host arrays:
float *host_f, *vfield;
int *host_rindex;
rrnode *host_bounce = NULL;
// Class array:
unsigned char nclass[NODES];
// This holds the Maxwell-Boltzmann equilibrium for rho=1, u=(0.1,0,0).
double eqvnode[DIRECTIONS];
// This holds the Maxwell-Boltzmann equilibrium for rho=1, u=(0,0,0), which
// is just the link weights.
double eqznode[DIRECTIONS];
// Parameters
double max_target_dim = 40.0;
struct rvector override_scale = {0.0, 0.0, 102.4};
struct rvector target_xlate = {200.5, 112.5, -128.5};
double dot(struct rvector u, struct rvector v)
{
return(u.x*v.x + u.y*v.y + u.z*v.z);
}
double vlength(struct rvector u)
{
return(sqrt(dot(u,u)));
}
struct rvector cross(struct rvector a, struct rvector b)
{
struct rvector c;
c.x = a.y*b.z-a.z*b.y;
c.y = -a.x*b.z+a.z*b.x;
c.z = a.x*b.y-a.y*b.x;
return(c);
}
double host_dotci(int i,struct rvector u)
{
struct rvector ci;
ci.x = (double)(host_ci[i].x);
ci.y = (double)(host_ci[i].y);
ci.z = (double)(host_ci[i].z);
return(dot(ci,u));
}
double genrand()
{
return(((double)(random()+1))/2147483649.);
}
// This is not called until node classes are set by set_initial_nclass( ) and
// find_boundary_nodes( ).
void init_lattice()
{
int i,j,k,m,nc;
for(i=0;i<=WIDTH;i++){
for(j=0;j<=HEIGHT;j++){
for(k=0;k<=DEPTH;k++){
nc = nclass[cstore(i,j,k)];
if(nc==SOLID || nc==BOUNDARY){
for(m=0;m<DIRECTIONS;m++){
host_f[store(i,j,k,m)] = eqznode[m];
}
}
else {
for(m=0;m<DIRECTIONS;m++){
host_f[store(i,j,k,m)] = eqvnode[m]
+ NOISE*(2.0*genrand()-1.0);
}
}
}
}
}
}
void set_initial_nclass()
{
int i,j,k;
for(i=0;i<=WIDTH;i++){
for(j=0;j<=HEIGHT;j++){
for(k=0;k<=DEPTH;k++){
nclass[cstore(i,j,k)] = FREE;
}
}
}
// Overwrite front and back.
for(i=0;i<=WIDTH;i++){
for(j=0;j<=HEIGHT;j++){
nclass[cstore(i,j,0)] = BACK;
nclass[cstore(i,j,DEPTH)] = FRONT;
}
}
// Overwrite floor and roof.
for(i=0;i<=WIDTH;i++){
for(k=0;k<=DEPTH;k++){
nclass[cstore(i,0,k)] = FLOOR;
nclass[cstore(i,HEIGHT,k)] = ROOF;
}
}
// Overwrite input and exit.
for(j=0;j<=HEIGHT;j++){
for(k=0;k<=DEPTH;k++){
nclass[cstore(0,j,k)] = INPUT;
nclass[cstore(WIDTH,j,k)] = EXIT;
}
}
}
double get_equilibrium_f(double rho, struct rvector u, int m)
{
double ddot;
ddot = host_dotci(m,u);
return(host_link_weight[m]*rho*(1.0+3.0*ddot+4.5*ddot*ddot-1.5*dot(u,u)));
}
// Note that the .obj file must specify triangles.
struct tri *trilist;
struct rvector *vlist;
int hits(struct rvector sp, struct rvector dp, int idx, double *tptr,int *insp,
struct rvector *pnrml)
{
struct rvector a,b,c,d,nrml;
double t, u, v, base, vsize;
a.x = dp.x - sp.x;
a.y = dp.y - sp.y;
a.z = dp.z - sp.z;
b.x = trilist[idx].v2.x - sp.x;
b.y = trilist[idx].v2.y - sp.y;
b.z = trilist[idx].v2.z - sp.z;
c.x = trilist[idx].v2.x - trilist[idx].v0.x;
c.y = trilist[idx].v2.y - trilist[idx].v0.y;
c.z = trilist[idx].v2.z - trilist[idx].v0.z;
d.x = trilist[idx].v2.x - trilist[idx].v1.x;
d.y = trilist[idx].v2.y - trilist[idx].v1.y;
d.z = trilist[idx].v2.z - trilist[idx].v1.z;
nrml = cross(c,d);
base = dot(a,nrml);
if(fabs(base)<SMALL) return(0);
if(base > 0.0) *insp=1;
else *insp=0;
t = dot(b,nrml)/base;
if(t<SMALL) return(0);
u = dot(a,cross(b,d))/base;
v = dot(a,cross(c,b))/base;
if((u<0.0) || (v<0.0) || ((u+v)>1.0)) return(0);
vsize = vlength(nrml);
nrml.x /= vsize;
nrml.y /= vsize;
nrml.z /= vsize;
*pnrml = nrml;
*tptr = t;
return(1);
}
int find_boundary_nodes(int fcount)
{
int i, j, k, m, itri, nc, slot, xslot, new_winner;
int tx, ty, tz;
struct rvector sp, dp, nrml;
double del;
double minx, maxx, miny, maxy, minz, maxz;
int imin, imax, jmin, jmax, kmin, kmax;
int inside, revcount, rindex;
// Now determine, for each triangle, which links hit it, and mark those
// links as reverse directions. Note that node classification can change
// because a given node may be included in the rasterization of more than
// one triangle. The closest hit determines it.
//
// Since objects can push through walls, we also need to prevent
// rewriting the labels on wall nodes. It hoses the flood fill.
//
// Since we can't afford the space to store normals for every grid node,
// we have to do the compression here. This means we have to do two passes,
// where the first counts how many slots we'll need.
fprintf(stderr,"finding object boundary\n");
fprintf(stderr,"PASS 1\n");
slot = 0;
for(itri=0;itri<fcount;itri++){
if(itri%1000==0) fprintf(stderr,"%d/%d done\n",itri,fcount);
maxx = minx = trilist[itri].v0.x;
maxy = miny = trilist[itri].v0.y;
maxz = minz = trilist[itri].v0.z;
if(trilist[itri].v1.x > maxx) maxx = trilist[itri].v1.x;
if(trilist[itri].v1.x < minx) minx = trilist[itri].v1.x;
if(trilist[itri].v1.y > maxy) maxy = trilist[itri].v1.y;
if(trilist[itri].v1.y < miny) miny = trilist[itri].v1.y;
if(trilist[itri].v1.z > maxz) maxz = trilist[itri].v1.z;
if(trilist[itri].v1.z < minz) minz = trilist[itri].v1.z;
if(trilist[itri].v2.x > maxx) maxx = trilist[itri].v2.x;
if(trilist[itri].v2.x < minx) minx = trilist[itri].v2.x;
if(trilist[itri].v2.y > maxy) maxy = trilist[itri].v2.y;
if(trilist[itri].v2.y < miny) miny = trilist[itri].v2.y;
if(trilist[itri].v2.z > maxz) maxz = trilist[itri].v2.z;
if(trilist[itri].v2.z < minz) minz = trilist[itri].v2.z;
imax = (int)(maxx+SR3+1);
jmax = (int)(maxy+SR3+1);
kmax = (int)(maxz+SR3+1);
imin = (int)(minx-SR3-1);
jmin = (int)(miny-SR3-1);
kmin = (int)(minz-SR3-1);
for(i=imin;i<=imax;i++){
for(j=jmin;j<=jmax;j++){
for(k=kmin;k<=kmax;k++){
if(!LEGAL(i,j,k)) continue;
// Most classes are not set yet.
// Here we want only class FREE.
nc = nclass[cstore(i,j,k)];
if(nc!=FREE) continue;
sp.x = (double)(i);
sp.y = (double)(j);
sp.z = (double)(k);
for(m=1;m<DIRECTIONS;m++){
tx = i+host_ci[m].x;
ty = j+host_ci[m].y;
tz = k+host_ci[m].z;
dp.x = (double)(tx);
dp.y = (double)(ty);
dp.z = (double)(tz);
if(hits(sp,dp,itri,&del,&inside,&nrml)){
if(del>1.0) continue; // Hit beyond link.
if((rindex=host_rindex[cstore(i,j,k)])==-1){
// New slot needed.
host_rindex[cstore(i,j,k)] = slot++;
break;
}
}
}
}
}
}
}
revcount = slot;
host_bounce = (rrnode *)calloc(revcount,sizeof(rrnode));
// Set all links to non-broken.
for(slot=0;slot<revcount;slot++) {
for(m=0;m<DIRECTIONS;m++){
host_bounce[slot].del[m] = -1.0;
}
}
// Reset reverse index.
for(i=0;i<NODES;i++) host_rindex[i] = -1;
fprintf(stderr,"PASS 2\n");
slot = 0;
for(itri=0;itri<fcount;itri++){
if(itri%1000==0) fprintf(stderr,"%d/%d done\n",itri,fcount);
maxx = minx = trilist[itri].v0.x;
maxy = miny = trilist[itri].v0.y;
maxz = minz = trilist[itri].v0.z;
if(trilist[itri].v1.x > maxx) maxx = trilist[itri].v1.x;
if(trilist[itri].v1.x < minx) minx = trilist[itri].v1.x;
if(trilist[itri].v1.y > maxy) maxy = trilist[itri].v1.y;
if(trilist[itri].v1.y < miny) miny = trilist[itri].v1.y;
if(trilist[itri].v1.z > maxz) maxz = trilist[itri].v1.z;
if(trilist[itri].v1.z < minz) minz = trilist[itri].v1.z;
if(trilist[itri].v2.x > maxx) maxx = trilist[itri].v2.x;
if(trilist[itri].v2.x < minx) minx = trilist[itri].v2.x;
if(trilist[itri].v2.y > maxy) maxy = trilist[itri].v2.y;
if(trilist[itri].v2.y < miny) miny = trilist[itri].v2.y;
if(trilist[itri].v2.z > maxz) maxz = trilist[itri].v2.z;
if(trilist[itri].v2.z < minz) minz = trilist[itri].v2.z;
imax = (int)(maxx+SR3+1);
jmax = (int)(maxy+SR3+1);
kmax = (int)(maxz+SR3+1);
imin = (int)(minx-SR3-1);
jmin = (int)(miny-SR3-1);
kmin = (int)(minz-SR3-1);
for(i=imin;i<=imax;i++){
for(j=jmin;j<=jmax;j++){
for(k=kmin;k<=kmax;k++){
if(!LEGAL(i,j,k)) continue;
nc = nclass[cstore(i,j,k)];
if((nc!=FREE)&&(nc!=BOUNDARY)&&(nc!=FFLOW)) continue;
sp.x = (double)(i);
sp.y = (double)(j);
sp.z = (double)(k);
for(m=1;m<DIRECTIONS;m++){
tx = i+host_ci[m].x;
ty = j+host_ci[m].y;
tz = k+host_ci[m].z;
dp.x = (double)(tx);
dp.y = (double)(ty);
dp.z = (double)(tz);
new_winner = 0;
if(hits(sp,dp,itri,&del,&inside,&nrml)){
if(del>1.0) continue;
// 1 ray unit != 1 grid unit. Convert to grid distance.
del *= host_link_length[m];
if((rindex=host_rindex[cstore(i,j,k)])==-1){
// We've not seen this node.
host_bounce[slot].i = i;
host_bounce[slot].j = j;
host_bounce[slot].k = k;
host_bounce[slot].del[m] = del;
host_bounce[slot].nrml[m] = nrml;
// Overall winner in direction slot 0.
host_bounce[slot].del[0] = del;
host_rindex[cstore(i,j,k)] = slot++;
new_winner = 1;
}
else {
// We have seen this node, but maybe not
// this direction.
if (host_bounce[rindex].del[m]<0.0 ||
del<host_bounce[rindex].del[m]){
host_bounce[rindex].del[m] = del;
host_bounce[rindex].nrml[m] = nrml;
if (del<host_bounce[rindex].del[0]){
host_bounce[rindex].del[0] = del;
new_winner = 1;
}
}
}
if(new_winner){
// We have to reclassify (perhaps) on
// each hit.
if(inside) nclass[cstore(i,j,k)] = BOUNDARY;
else nclass[cstore(i,j,k)] = FFLOW;
}
}
}
}
}
}
}
// Now go back and remove broken links from BOUNDARY or FFLOW
// nodes that don't reach FFLOW nodes, since such links are not used
// in the bounce( ) kernel.
for(slot=0;slot<revcount;slot++){
i = host_bounce[slot].i;
j = host_bounce[slot].j;
k = host_bounce[slot].k;
nc = nclass[cstore(i,j,k)];
for(m=1;m<DIRECTIONS;m++){
tx = i+host_ci[m].x;
ty = j+host_ci[m].y;
tz = k+host_ci[m].z;
if((nc==BOUNDARY || nc==FFLOW) &&
nclass[cstore(tx,ty,tz)]!=FFLOW){
host_bounce[slot].del[m] = -1.0;
}
}
}
// Do we have any broken links left?
for(slot=0;slot<revcount;slot++){
minx = LARGE;
for(m=1;m<DIRECTIONS;m++){
if((del=host_bounce[slot].del[m])>0.0 && del<minx) minx=del;
}
if(minx<LARGE) host_bounce[slot].del[0] = minx;
else host_bounce[slot].del[0] = -1.0;
}
// Finally, squeeze host_bounce[ ] list down.
slot=0; xslot=0;
while(xslot<revcount){
if(host_bounce[xslot].del[0]>0.0){
host_bounce[slot].i = host_bounce[xslot].i;
host_bounce[slot].j = host_bounce[xslot].j;
host_bounce[slot].k = host_bounce[xslot].k;
for(m=0;m<DIRECTIONS;m++){
host_bounce[slot].del[m] = host_bounce[xslot].del[m];
host_bounce[slot].nrml[m] = host_bounce[xslot].nrml[m];
}
slot++;
}
xslot++;
}
revcount = slot;
return(revcount);
}
void host_get_u(float *hf,int i,int j,int k,double *rptr, struct rvector *uptr)
{
int m;
double rho = 0.0;
struct rvector mo, u;
for(m=0;m<DIRECTIONS;m++) rho += (double)(hf[store(i,j,k,m)]);
mo.x = mo.y = mo.z = 0.0;
for(m=0;m<DIRECTIONS;m++){
mo.x += host_ci[m].x*(double)(hf[store(i,j,k,m)]);
mo.y += host_ci[m].y*(double)(hf[store(i,j,k,m)]);
mo.z += host_ci[m].z*(double)(hf[store(i,j,k,m)]);
}
u.x = mo.x/rho;
u.y = mo.y/rho;
u.z = mo.z/rho;
*rptr = rho;
*uptr = u;
}
float ReverseEndian(float val)
{
uint32_t ivalue = htobe32(*(uint32_t *)(&val));
return(*(float *)&ivalue);
}
void write_vtk_header(FILE* fptr, char* buf)
{
fprintf(fptr,"# vtk DataFile Version 2.0\n");
fprintf(fptr,"%s\n", buf);
fprintf(fptr,"BINARY\n");
fprintf(fptr,"DATASET STRUCTURED_POINTS\n");
// fprintf(fptr,"DIMENSIONS %d %d %d\n", WIDTH+1, HEIGHT+1, DEPTH+1);
fprintf(fptr,"DIMENSIONS %d %d %d\n", WIDTH+1, HEIGHT+1, 1);
fprintf(fptr,"SPACING 1 1 1\n");
fprintf(fptr,"ORIGIN 0 0 0\n");
fprintf(fptr,"POINT_DATA %d\n", SLICENODES);
fprintf(fptr,"VECTORS vfield float\n");
}
void save_velocity_field(int iteration)
{
int i,j;
FILE *fptr;
char buf[256];
double rho;
struct rvector u;
float *vptr;
cudaMemcpy(&host_f[0],&f[iteration%2][0],FLOWS*sizeof(float),cudaMemcpyDefault);
cudaDeviceSynchronize();
sprintf(buf,"%s/vfield.%d.vtk","vdir",iteration);
fptr=fopen(buf,"w");
write_vtk_header(fptr,buf);
vptr = vfield;
for(j=0;j<=HEIGHT;j++){
for(i=0;i<=WIDTH;i++){
host_get_u(host_f,i,j,SLICE,&rho,&u);
*vptr++ = ReverseEndian((float)(u.x));
*vptr++ = ReverseEndian((float)(u.y));
*vptr++ = ReverseEndian((float)(u.z));
}
}
fwrite(vfield,SLICENODES,3*sizeof(float),fptr);
fclose(fptr);
}
void cleanup(int signum)
{
cudaFree(f[0]);
cudaFree(f[1]);
cudaFree(dev_bounce);
cudaFree(dev_nclass);
cudaDeviceReset();
exit(0);
}
void go(int rvcount)
{
int t, from, to, pad;
dim3 stream_lws(1,4,32);
dim3 stream_ws(WIDTH,HEIGHT/4,DEPTH/32);
dim3 cascade_lws(1,4,32);
dim3 cascade_ws(WIDTH,HEIGHT/4,DEPTH/32);
pad = (rvcount/LWS+1);
to = 0;
from = 1;
for(t=1;t<=FINAL_TIME;t++){
cudaDeviceSynchronize();
cascade<<<cascade_ws,cascade_lws>>>(f[to],f[from],dev_nclass);
cudaMemcpy(f[from],f[to],FLOWS*sizeof(float),cudaMemcpyDefault);
// This one has a 1D grid:
bounce<<<pad,LWS>>>(f[to],f[from],dev_bounce,dev_nclass,rvcount);
stream<<<stream_ws,stream_lws>>>(f[from],f[to],dev_nclass);
if(t>=V_DUMP_START && (t%V_DUMP_INTERVAL)==0){
save_velocity_field(t);
}
}
}
void set_eqvalues(double rho, struct rvector u)
{
int m;
for(m=0;m<DIRECTIONS;m++) {
eqvnode[m] = get_equilibrium_f(rho,u,m);
eqznode[m] = host_link_weight[m];
}
}
void buffers(int revcount)
{
long long bytes = 0;
cudaError_t err;
err = cudaMalloc(&f[0],FLOWS*sizeof(float));
if(!(err==cudaSuccess)) fprintf(stderr,"cudaMalloc f[0] failed\n");
cudaMemcpy(f[0],&host_f[0],FLOWS*sizeof(float),cudaMemcpyDefault);
err = cudaMalloc(&f[1],FLOWS*sizeof(float));
if(!(err==cudaSuccess)) fprintf(stderr,"cudaMalloc f[1] failed\n");
cudaMemcpy(f[1],&host_f[0],FLOWS*sizeof(float), cudaMemcpyDefault);
bytes += 2*((long long)(FLOWS*sizeof(float)));
err = cudaMalloc(&dev_bounce,revcount*sizeof(rrnode));
if(!(err==cudaSuccess)) fprintf(stderr,"cudaMalloc dev_bounce failed\n");
cudaMemcpy(dev_bounce,host_bounce,revcount*sizeof(rrnode),cudaMemcpyDefault);
bytes += (revcount)*sizeof(rrnode);
err = cudaMalloc(&dev_nclass,NODES*sizeof(unsigned char));
if(!(err==cudaSuccess)) fprintf(stderr,"cudaMalloc dev_nclass failed\n");
cudaMemcpy(dev_nclass,nclass,NODES*sizeof(unsigned char),cudaMemcpyDefault);
bytes += NODES*sizeof(unsigned char);
fprintf(stderr,"total allocated card memory: %lld\n",bytes);
}
void host_arrays()
{
int i;
host_f = (float *)calloc(FLOWS,sizeof(float));
if(host_f == NULL) fprintf(stderr,"oops\n");
host_rindex = (int *)calloc(NODES,sizeof(int));
if(host_rindex == NULL) fprintf(stderr,"oops\n");
for(i=0;i<NODES;i++) host_rindex[i] = -1;
vfield = (float *)calloc(NODES*3,sizeof(float));
if(vfield == NULL) fprintf(stderr,"oops\n");
}
// This loads an obj file, computes a bounding box, and places the bounding box
// within the grid using a scale and a translate. The .obj file is of the
// stripped-down variety, i.e., no material library,
// no normal indices, no texture indices, just vertices and faces.
int parse_geometry(char *filename)
{
char buf[512];
int vcount, fcount, iv0, iv1, iv2;
double minx, maxx, miny, maxy, minz, maxz;
double mdatadim, mtargetdim, x, y, z;
double xscale, yscale, zscale;
FILE *fptr;
fptr = fopen(filename,"r");
vcount = fcount = 0;
minx = miny = minz = 1000000000.0;
maxx = maxy = maxz = -1000000000.0;
while(fgets(buf,512,fptr)>0){
if(buf[0]=='v'){
vcount++;
// Caution: engineers think z (last coordinate) is up.
sscanf(buf,"v %lf %lf %lf",&x,&y,&z);
if(x<minx) minx = x;
if(y<miny) miny = y;
if(z<minz) minz = z;
if(x>maxx) maxx = x;
if(y>maxy) maxy = y;
if(z>maxz) maxz = z;
}
else {
if(buf[0]=='f') fcount++;
}
}
fprintf(stderr,"min %f %f %f\n",minx,miny,minz);
fprintf(stderr,"max %f %f %f\n",maxx,maxy,maxz);
// Enforce non-zero dimensions.
if(minx>=maxx) maxx += SMALL;
if(miny>=maxy) maxy += SMALL;
if(minz>=maxz) maxz += SMALL;
// Now scale and translate so that the data fits into the grid.
xscale = yscale = zscale = -1.0;
if(override_scale.x==0.0) xscale = maxx-minx;
if(override_scale.y==0.0) yscale = maxy-miny;
if(override_scale.z==0.0) zscale = maxz-minz;
mdatadim = xscale;
if(yscale>mdatadim) mdatadim = yscale;
if(zscale>mdatadim) mdatadim = zscale;
mtargetdim = max_target_dim;
xscale = yscale = zscale = mtargetdim/mdatadim;
if(override_scale.x>0.0) xscale = override_scale.x;
if(override_scale.y>0.0) yscale = override_scale.y;
if(override_scale.z>0.0) zscale = override_scale.z;
fprintf(stderr,"scales are %f %f %f\n",xscale,yscale,zscale);
vlist = (struct rvector *)calloc(vcount+1,sizeof(struct rvector));
trilist = (struct tri *)calloc(fcount,sizeof(struct tri));
rewind(fptr);
vcount = 0;
fcount = 0;
// This assumes all vertices appear before all faces.
while(fgets(buf,512,fptr)>0){
if(buf[0]=='v'){
vcount++;
sscanf(buf,"v %lf %lf %lf",&x,&y,&z);
vlist[vcount].x = xscale*(x - minx) + target_xlate.x;
vlist[vcount].y = yscale*(y - miny) + target_xlate.y;
vlist[vcount].z = zscale*(z - minz) + target_xlate.z;
}
else {
if(buf[0]=='f'){
sscanf(buf,"f %d %d %d",&iv0,&iv1,&iv2);
trilist[fcount].v0 = vlist[iv0];
trilist[fcount].v1 = vlist[iv1];
trilist[fcount].v2 = vlist[iv2];
fcount++;
}
}
}
fclose(fptr);
return(fcount);
}
int qtop;
struct ivector *ffq;
#define FFQSIZE 10000000
void qpush(int i, int j, int k)
{
struct ivector val;
val.x = i;
val.y = j;
val.z = k;
if(qtop==FFQSIZE-1){
fprintf(stderr,"queue overflow\n");
exit(1);
}
ffq[++qtop] = val;
}
struct ivector qpop()
{
return(ffq[qtop--]);
}
void qprocess()
{
int nc,i,j,k;
struct ivector val;
while(qtop>0){
val = qpop();
i = val.x;
j = val.y;
k = val.z;
if(!LEGAL(i,j,k)) {
fprintf(stderr,"eek %d %d %d\n",i,j,k);
exit(1);
}
nc = nclass[cstore(i,j,k)];
if(nc!=FREE) continue;
nclass[cstore(i,j,k)] = SOLID;
qpush(i+1,j,k);
qpush(i,j+1,k);
qpush(i,j,k+1);
qpush(i-1,j,k);
qpush(i,j-1,k);
qpush(i,j,k-1);
}
}
void flood_fill()
{
// Mark all nodes inside BOUNDARY nodes as SOLID nodes.
// We need to find one to start.
int i,j,k;
qtop = -1;
ffq = (struct ivector *)calloc(FFQSIZE,sizeof(struct ivector));
for(i=0;i<WIDTH;i++){
for(j=0;j<HEIGHT;j++){
for(k=0;k<DEPTH;k++){
if(nclass[cstore(i,j,k)]==BOUNDARY){
qpush(i+1,j,k);
qpush(i,j+1,k);
qpush(i,j,k+1);
qpush(i-1,j,k);
qpush(i,j-1,k);
qpush(i,j,k-1);
qprocess();
}
}
}
}
free(ffq);
return;
}
int main(int argc, char **argv)
{
struct rvector u = {0.1,0.0,0.0};
double rho = 1.0;
int revcount, tricount;
srandom(123456789);
signal(SIGUSR1,cleanup);
host_arrays();
set_eqvalues(rho,u);
set_initial_nclass();
tricount = parse_geometry(argv[1]);
fprintf(stderr,"tricount is %d\n",tricount);
revcount = find_boundary_nodes(tricount);
flood_fill();
init_lattice();
fprintf(stderr,"allocating buffers\n");
buffers(revcount);
go(revcount);
cleanup(SIGUSR1);
return(0);
}
|
e36a74dc65d9d980fa0ee0f5c50c73934db9047e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "hip/hip_runtime.h"
//pass
//--blockDim=10 --gridDim=64 --no-inline
__global__ void foo() {
__shared__ int A[10][10];
A[threadIdx.y][threadIdx.x] = 2;
assert(A[threadIdx.y][threadIdx.x]==2);
}
int main(){
dim3 dimBlock(2,2);
//foo<<<1, dimBlock>>>();
ESBMC_verify_kernel(foo, 1, dimBlock);
hipDeviceSynchronize();
}
| e36a74dc65d9d980fa0ee0f5c50c73934db9047e.cu | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "cuda.h"
//pass
//--blockDim=10 --gridDim=64 --no-inline
__global__ void foo() {
__shared__ int A[10][10];
A[threadIdx.y][threadIdx.x] = 2;
assert(A[threadIdx.y][threadIdx.x]==2);
}
int main(){
dim3 dimBlock(2,2);
//foo<<<1, dimBlock>>>();
ESBMC_verify_kernel(foo, 1, dimBlock);
cudaThreadSynchronize();
}
|
14ea6f16735b7b2de2d8a8740392a8f0682a197b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int xpos = threadIdx.x + blockIdx.x * blockDim.x;
int ypos = threadIdx.y + blockIdx.y * blockDim.y;
uchar4 rgba = rgbaImage[xpos * numCols + ypos];
float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[xpos * numCols + ypos] = channelSum;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
printf("numrows %d, numcols %d\n", numRows, numCols);
int blkX = numRows/100;
int blkY = numCols/100;
int gridX = 100;
int gridY = 100;
printf("blkx %d, blky %d, gridx %d, gridy %d\n", blkX, blkY, gridX, gridY);
const dim3 blockSize(1, 1, 1); //TODO
const dim3 gridSize(313, 557, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
} | 14ea6f16735b7b2de2d8a8740392a8f0682a197b.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int xpos = threadIdx.x + blockIdx.x * blockDim.x;
int ypos = threadIdx.y + blockIdx.y * blockDim.y;
uchar4 rgba = rgbaImage[xpos * numCols + ypos];
float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[xpos * numCols + ypos] = channelSum;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
printf("numrows %d, numcols %d\n", numRows, numCols);
int blkX = numRows/100;
int blkY = numCols/100;
int gridX = 100;
int gridY = 100;
printf("blkx %d, blky %d, gridx %d, gridy %d\n", blkX, blkY, gridX, gridY);
const dim3 blockSize(1, 1, 1); //TODO
const dim3 gridSize(313, 557, 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
} |
88e3fc80a9de57078ff872cd7e0997c3c57833d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "CUAPI.h"
void Aux_GetCPUInfo( const char *FileName );
#ifdef GPU
//-------------------------------------------------------------------------------------------------------
// Function : CUAPI_DiagnoseDevice
// Description : Take a diagnosis of each GPU
//-------------------------------------------------------------------------------------------------------
void CUAPI_DiagnoseDevice()
{
if ( MPI_Rank == 0 ) Aux_Message( stdout, "%s ...\n", __FUNCTION__ );
// get the hostname and PID of each process
const int PID = getpid();
char Host[1024];
gethostname( Host, 1024 );
// get the number of devices
int DeviceCount;
CUDA_CHECK_ERROR( hipGetDeviceCount( &DeviceCount ) );
if ( DeviceCount == 0 )
Aux_Error( ERROR_INFO, "no devices supporting CUDA at MPI_Rank %2d (host = %8s) !!\n", MPI_Rank, Host );
// get the device ID
int GetDeviceID = 999;
CUDA_CHECK_ERROR( hipGetDevice( &GetDeviceID ) );
// load the device properties
hipDeviceProp_t DeviceProp;
CUDA_CHECK_ERROR( hipGetDeviceProperties( &DeviceProp, GetDeviceID ) );
// get the number of cores per multiprocessor
int NCorePerMP;
if ( DeviceProp.major == 2 && DeviceProp.minor == 0 ) NCorePerMP = 32;
else if ( DeviceProp.major == 2 && DeviceProp.minor == 1 ) NCorePerMP = 48;
else if ( DeviceProp.major == 3 ) NCorePerMP = 192;
else if ( DeviceProp.major == 5 ) NCorePerMP = 128;
else if ( DeviceProp.major == 6 ) NCorePerMP = 64;
else if ( DeviceProp.major == 7 ) NCorePerMP = 64;
else
fprintf( stderr, "WARNING : unable to determine the number of cores per multiprocessor for version %d.%d ...\n",
DeviceProp.major, DeviceProp.minor );
// record the device properties
const char FileName[] = "Record__Note";
if ( MPI_Rank == 0 )
{
FILE *Note = fopen( FileName, "a" );
fprintf( Note, "Device Diagnosis\n" );
fprintf( Note, "***********************************************************************************\n" );
fclose( Note );
}
for (int YourTurn=0; YourTurn<MPI_NRank; YourTurn++)
{
if ( MPI_Rank == YourTurn )
{
int DriverVersion = 0, RuntimeVersion = 0;
CUDA_CHECK_ERROR( hipDriverGetVersion( &DriverVersion ) );
CUDA_CHECK_ERROR( hipRuntimeGetVersion( &RuntimeVersion ) );
FILE *Note = fopen( FileName, "a" );
if ( MPI_Rank != 0 ) fprintf( Note, "\n\n" );
fprintf( Note, "MPI_Rank = %3d, hostname = %10s, PID = %5d\n\n", MPI_Rank, Host, PID );
fprintf( Note, "CPU Info :\n" );
fflush( Note );
Aux_GetCPUInfo( FileName );
fprintf( Note, "\n" );
fprintf( Note, "GPU Info :\n" );
fprintf( Note, "Number of GPUs : %d\n" , DeviceCount );
fprintf( Note, "GPU ID : %d\n" , GetDeviceID );
fprintf( Note, "GPU Name : %s\n" , DeviceProp.name );
fprintf( Note, "CUDA Driver Version : %d.%d\n" , DriverVersion/1000, DriverVersion%100 );
fprintf( Note, "CUDA Runtime Version : %d.%d\n" , RuntimeVersion/1000, RuntimeVersion%100 );
fprintf( Note, "CUDA Major Revision Number : %d\n" , DeviceProp.major );
fprintf( Note, "CUDA Minor Revision Number : %d\n" , DeviceProp.minor );
fprintf( Note, "Clock Rate : %f GHz\n" , DeviceProp.clockRate/1.0e6 );
fprintf( Note, "Global Memory Size : %ld MB\n" , (long)DeviceProp.totalGlobalMem/1024/1024 );
fprintf( Note, "Constant Memory Size : %ld KB\n" , (long)DeviceProp.totalConstMem/1024 );
fprintf( Note, "Shared Memory Size per Block : %ld KB\n" , (long)DeviceProp.sharedMemPerBlock/1024 );
fprintf( Note, "Number of Registers per Block : %d\n" , DeviceProp.regsPerBlock );
fprintf( Note, "Warp Size : %d\n" , DeviceProp.warpSize );
fprintf( Note, "Number of Multiprocessors: : %d\n" , DeviceProp.multiProcessorCount );
fprintf( Note, "Number of Cores per Multiprocessor: %d\n" , NCorePerMP );
fprintf( Note, "Total Number of Cores: : %d\n" , DeviceProp.multiProcessorCount * NCorePerMP );
fprintf( Note, "Max Number of Threads per Block : %d\n" , DeviceProp.maxThreadsPerBlock );
fprintf( Note, "Max Size of the Block X-Dimension : %d\n" , DeviceProp.maxThreadsDim[0] );
fprintf( Note, "Max Size of the Grid X-Dimension : %d\n" , DeviceProp.maxGridSize[0] );
fprintf( Note, "Concurrent Copy and Execution : %s\n" , DeviceProp.asyncEngineCount>0 ? "Yes" : "No" );
fprintf( Note, "Concurrent Up/Downstream Copies : %s\n" , DeviceProp.asyncEngineCount==2 ? "Yes" : "No" );
# if ( CUDART_VERSION >= 3000 )
fprintf( Note, "Concurrent Kernel Execution : %s\n" , DeviceProp.concurrentKernels ? "Yes" : "No" );
# endif
# if ( CUDART_VERSION >= 3010 )
fprintf( Note, "GPU has ECC Support Enabled : %s\n" , DeviceProp.ECCEnabled ? "Yes" : "No" );
# endif
fclose( Note );
}
MPI_Barrier( MPI_COMM_WORLD );
} // for (int YourTurn=0; YourTurn<NGPU; YourTurn++)
if ( MPI_Rank == 0 )
{
FILE *Note = fopen( FileName, "a" );
fprintf( Note, "***********************************************************************************\n" );
fclose( Note );
Aux_Message( stdout, "%s ... done\n", __FUNCTION__ );
}
} // FUNCTION : CUAPI_DiagnoseDevice
#endif // #ifdef GPU
| 88e3fc80a9de57078ff872cd7e0997c3c57833d5.cu | #include "CUAPI.h"
void Aux_GetCPUInfo( const char *FileName );
#ifdef GPU
//-------------------------------------------------------------------------------------------------------
// Function : CUAPI_DiagnoseDevice
// Description : Take a diagnosis of each GPU
//-------------------------------------------------------------------------------------------------------
void CUAPI_DiagnoseDevice()
{
if ( MPI_Rank == 0 ) Aux_Message( stdout, "%s ...\n", __FUNCTION__ );
// get the hostname and PID of each process
const int PID = getpid();
char Host[1024];
gethostname( Host, 1024 );
// get the number of devices
int DeviceCount;
CUDA_CHECK_ERROR( cudaGetDeviceCount( &DeviceCount ) );
if ( DeviceCount == 0 )
Aux_Error( ERROR_INFO, "no devices supporting CUDA at MPI_Rank %2d (host = %8s) !!\n", MPI_Rank, Host );
// get the device ID
int GetDeviceID = 999;
CUDA_CHECK_ERROR( cudaGetDevice( &GetDeviceID ) );
// load the device properties
cudaDeviceProp DeviceProp;
CUDA_CHECK_ERROR( cudaGetDeviceProperties( &DeviceProp, GetDeviceID ) );
// get the number of cores per multiprocessor
int NCorePerMP;
if ( DeviceProp.major == 2 && DeviceProp.minor == 0 ) NCorePerMP = 32;
else if ( DeviceProp.major == 2 && DeviceProp.minor == 1 ) NCorePerMP = 48;
else if ( DeviceProp.major == 3 ) NCorePerMP = 192;
else if ( DeviceProp.major == 5 ) NCorePerMP = 128;
else if ( DeviceProp.major == 6 ) NCorePerMP = 64;
else if ( DeviceProp.major == 7 ) NCorePerMP = 64;
else
fprintf( stderr, "WARNING : unable to determine the number of cores per multiprocessor for version %d.%d ...\n",
DeviceProp.major, DeviceProp.minor );
// record the device properties
const char FileName[] = "Record__Note";
if ( MPI_Rank == 0 )
{
FILE *Note = fopen( FileName, "a" );
fprintf( Note, "Device Diagnosis\n" );
fprintf( Note, "***********************************************************************************\n" );
fclose( Note );
}
for (int YourTurn=0; YourTurn<MPI_NRank; YourTurn++)
{
if ( MPI_Rank == YourTurn )
{
int DriverVersion = 0, RuntimeVersion = 0;
CUDA_CHECK_ERROR( cudaDriverGetVersion( &DriverVersion ) );
CUDA_CHECK_ERROR( cudaRuntimeGetVersion( &RuntimeVersion ) );
FILE *Note = fopen( FileName, "a" );
if ( MPI_Rank != 0 ) fprintf( Note, "\n\n" );
fprintf( Note, "MPI_Rank = %3d, hostname = %10s, PID = %5d\n\n", MPI_Rank, Host, PID );
fprintf( Note, "CPU Info :\n" );
fflush( Note );
Aux_GetCPUInfo( FileName );
fprintf( Note, "\n" );
fprintf( Note, "GPU Info :\n" );
fprintf( Note, "Number of GPUs : %d\n" , DeviceCount );
fprintf( Note, "GPU ID : %d\n" , GetDeviceID );
fprintf( Note, "GPU Name : %s\n" , DeviceProp.name );
fprintf( Note, "CUDA Driver Version : %d.%d\n" , DriverVersion/1000, DriverVersion%100 );
fprintf( Note, "CUDA Runtime Version : %d.%d\n" , RuntimeVersion/1000, RuntimeVersion%100 );
fprintf( Note, "CUDA Major Revision Number : %d\n" , DeviceProp.major );
fprintf( Note, "CUDA Minor Revision Number : %d\n" , DeviceProp.minor );
fprintf( Note, "Clock Rate : %f GHz\n" , DeviceProp.clockRate/1.0e6 );
fprintf( Note, "Global Memory Size : %ld MB\n" , (long)DeviceProp.totalGlobalMem/1024/1024 );
fprintf( Note, "Constant Memory Size : %ld KB\n" , (long)DeviceProp.totalConstMem/1024 );
fprintf( Note, "Shared Memory Size per Block : %ld KB\n" , (long)DeviceProp.sharedMemPerBlock/1024 );
fprintf( Note, "Number of Registers per Block : %d\n" , DeviceProp.regsPerBlock );
fprintf( Note, "Warp Size : %d\n" , DeviceProp.warpSize );
fprintf( Note, "Number of Multiprocessors: : %d\n" , DeviceProp.multiProcessorCount );
fprintf( Note, "Number of Cores per Multiprocessor: %d\n" , NCorePerMP );
fprintf( Note, "Total Number of Cores: : %d\n" , DeviceProp.multiProcessorCount * NCorePerMP );
fprintf( Note, "Max Number of Threads per Block : %d\n" , DeviceProp.maxThreadsPerBlock );
fprintf( Note, "Max Size of the Block X-Dimension : %d\n" , DeviceProp.maxThreadsDim[0] );
fprintf( Note, "Max Size of the Grid X-Dimension : %d\n" , DeviceProp.maxGridSize[0] );
fprintf( Note, "Concurrent Copy and Execution : %s\n" , DeviceProp.asyncEngineCount>0 ? "Yes" : "No" );
fprintf( Note, "Concurrent Up/Downstream Copies : %s\n" , DeviceProp.asyncEngineCount==2 ? "Yes" : "No" );
# if ( CUDART_VERSION >= 3000 )
fprintf( Note, "Concurrent Kernel Execution : %s\n" , DeviceProp.concurrentKernels ? "Yes" : "No" );
# endif
# if ( CUDART_VERSION >= 3010 )
fprintf( Note, "GPU has ECC Support Enabled : %s\n" , DeviceProp.ECCEnabled ? "Yes" : "No" );
# endif
fclose( Note );
}
MPI_Barrier( MPI_COMM_WORLD );
} // for (int YourTurn=0; YourTurn<NGPU; YourTurn++)
if ( MPI_Rank == 0 )
{
FILE *Note = fopen( FileName, "a" );
fprintf( Note, "***********************************************************************************\n" );
fclose( Note );
Aux_Message( stdout, "%s ... done\n", __FUNCTION__ );
}
} // FUNCTION : CUAPI_DiagnoseDevice
#endif // #ifdef GPU
|
ab915a2355221c7b91a6ffab204b7a4c7104e7bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void sd_t_s1_8_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2];
}
}
__syncthreads();
} | ab915a2355221c7b91a6ffab204b7a4c7104e7bd.cu | #include "includes.h"
__global__ void sd_t_s1_8_kernel(int h1d,int h2d,int h3d,int p4d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2];
}
}
__syncthreads();
} |
827506ae44ff43ffe22231b89806011ced072d6c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
PROGRAMMING ASSIGNMENT 1
JESSICA SMITH
CS791V
*/
#include <iostream>
#include "stdio.h"
#include "kernel.h"
#include <fstream>
int main() {
int n, T, B;
// n = pow(2,21);
// n = 2097087;
int check = 0;
int result = 0;
float calcTime, memTransTime;
char version;
// std::ofstream out("ParVSSeq.csv");
// std::ofstream out("test.csv");
// Handle I/O from user
printf("Input program version (a, b, c):\na: Multiple cpu kernel calls\nb: Kernel calls kernel\nc: CPU finishes the sum\n");
scanf(" %c", &version);
if(version != 'a' && version != 'b' && version != 'c'){
printf("Error: Invalid input.\n");
return 0;
}
// Input size for N, B and T
printf("Input Size of N (0 < N < 100,000,000): ");
scanf(" %d", &n);
// printf("N: %d \n", size);
if(n <= 0)
printf("Error: Wrong input value for N \n");
else if( n > 100000000){
printf("Error: Size too large, setting N = 100,000,000 \n");
n = 100000000;
}
printf("Input Size of T (0 < T < 1024): ");
scanf(" %d", &T);
if(T <= 0)
printf("Error: Wrong input value for T \n");
else if( T > 1024){
printf("Error: Size too large, setting T = 1024 \n");
T = 1024;
}
printf("Input Size of B (0 < B < 1024): ");
scanf(" %d", &B);
if(B <= 0){
printf("Error: Wrong input value for B \n");
}
else if( B > 56000){
printf("Error: Size too large");
B = 56000;
}
int memorySize = B*2;
if(B <= 32)
memorySize = 64;
int *input, *output;
input = (int*) malloc(n*sizeof(int));
output = (int*) malloc(B*sizeof(int));
int *g_in, *g_out;
hipError_t err = hipMalloc( (void**) &g_in, n * sizeof(int));
err = hipMalloc( (void**) &g_out, B * sizeof(int));
if (err != hipSuccess) {
std::cerr << "Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
// Populate Array for addition
for (int i = 0; i < n; ++i) {
input[i] = 1;
// input[i] = i%3;
}
// Initialize output array to avoid garbage additions
for(int i = 0; i < B; i++){
output[i] = 0;
}
// Create cuda Events
hipEvent_t start, end, m_start, m_end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventCreate(&m_start);
hipEventCreate(&m_end);
hipEventRecord( m_start, 0 );
err = hipMemcpy(g_out, output, B * sizeof(int), hipMemcpyHostToDevice);
err = hipMemcpy(g_in, input, n * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
std::cerr << "Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
// Different calls for each version of code
if(version == 'a'){
// n = B * T;
int s = n;
// int s = B;
// int count = 0;
hipEventRecord(start,0);
// std::cout << memorySize << std::endl;
while( s > 1){
std::cout << "S IS: " << s << std::endl;
// Call Kernel:
hipLaunchKernelGGL(( reduce), dim3(B),dim3(T),B*T*sizeof(int), 0, g_in, g_out, s);
// Adjust new processing size:
s = ceil(s / (T*2));
if(s > B)
s = B;
// s = (s + (T*2-1)) / (T*2);
// Swap pointers to loop
int *temp = g_in;
g_in = g_out;
g_out = temp;
// Count the number of iterations to determine which variable holds the final sum
// std::cout << count << std::endl;
// s = 0;
// count++;
err = hipMemcpy(output, g_in, B * sizeof(int), hipMemcpyDeviceToHost);
if (err != hipSuccess) {
std::cerr << "Out Copy Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
std::cout << "OUTPUT VECTOR: " << std::endl;
for(int i = 0; i < B; i++){
std::cout << output[i] << std::endl;
}
}
hipEventRecord(end, 0);
hipEventSynchronize(end);
err = hipMemcpy(output, g_in, B * sizeof(int), hipMemcpyDeviceToHost);
if (err != hipSuccess) {
std::cerr << "Out Copy Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
hipEventElapsedTime( &calcTime, start, end );
hipEventRecord( m_end, 0 );
hipEventSynchronize( m_end );
hipEventElapsedTime( &memTransTime, m_start, m_end );
// Store result to variable
result = output[0] + output[1];
// result = output[0];
for(int i = 0; i < B; i++)
std::cout << "output: " << output[i] << '\n';
}
else if(version == 'c'){
hipEventRecord(start,0);
hipLaunchKernelGGL(( reduce), dim3(B),dim3(T),memorySize*sizeof(int), 0, g_in, g_out, n);
hipEventRecord(end, 0);
hipEventSynchronize(end);
err = hipMemcpy(output, g_out, B * sizeof(int), hipMemcpyDeviceToHost);
if (err != hipSuccess) {
std::cerr << "Out Copy Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
// std::cout << "output[0]" << output[0] << std::endl;
result = 0;
for(int i = 0; i < B; i++){
result += output[i];
// std::cout << output[i] << std::endl;
}
hipEventElapsedTime( &calcTime, start, end );
hipEventRecord( m_end, 0 );
hipEventSynchronize( m_end );
hipEventElapsedTime( &memTransTime, m_start, m_end );
// Correctness check
result = 0;
for(int i = 0; i < B; i++){
result += output[i];
std::cout << output[i] << std::endl;
}
}
else if(version == 'b'){
// Recursive Kernel
hipEventRecord(start,0);
hipLaunchKernelGGL(( rReduce), dim3(B),dim3(T),memorySize*sizeof(int), 0, g_in, g_out, n);
hipEventRecord(end, 0);
hipEventSynchronize(end);
err = hipMemcpy(output, g_in, B * sizeof(int), hipMemcpyDeviceToHost);
if (err != hipSuccess) {
std::cerr << "Out Copy Error: " << hipGetErrorString(err) << std::endl;
exit(1);
}
// std::cout << "output[0]" << output[0] << std::endl;
result = 0;
for(int i = 0; i < B; i++){
result += output[i];
// std::cout << output[i] << std::endl;
}
hipEventElapsedTime( &calcTime, start, end );
hipEventRecord( m_end, 0 );
hipEventSynchronize( m_end );
hipEventElapsedTime( &memTransTime, m_start, m_end );
// Correctness check
// Store result to variable
result = output[0] + output[1];
// result = output[0];
for(int i = 0; i < B; i++)
std::cout << "output: " << output[i] << '\n';
}
// result = 0;
// for(int i = 0; i < B; i++){
// result += output[i];
// // std::cout << output[i] << std::endl;
// }
check = 0;
for(int i = 0; i < n; i++){
check += input[i];
}
if(check != result){
std::cerr << "Oh no! Something went wrong. You Suck. :(" << std::endl;
std::cerr << result << " " << check << std::endl;
// std::cerr << result - check << std::endl;
hipFree(g_out);
hipFree(g_in);
free(input);
free(output);
exit(1);
}
// std::cout << "Yay! Your program's results are correct." << std::endl;
std::cout << std::endl;
std::cout << "Your program took: " << memTransTime/1000 << " seconds With Memory Transfer on " << n << " inputs" << std::endl;
std::cout << "Your program took: " << calcTime/1000 << " seconds Without Memory Transfer on " << n << " inputs" << std::endl;
// Cleanup in the event of success.
hipEventDestroy( start );
hipEventDestroy( end );
hipEventDestroy( m_start );
hipEventDestroy( m_end );
// // write to file
// int threads = i;
// int blocks = j;
// double memThrough = n / memTransTime;
// double calcThrough = n / calcTime;
// out << memThrough << ',' << calcThrough << ',' << T << ',' << B << '\n' ;
// out << memThrough << ',' << calcThrough << ',' << n << '\n' ;
// std::cout << memThrough << ',' << calcThrough << ',' << T << ',' << B << '\n' ;
// std::cout << std::endl;
// std::cout << "Throughput: " << memThrough << " ms. With Memory Transfer on " << n << " inputs" << std::endl;
// std::cout << "Throughput: " << calcThrough << " ms. Without Memory Transfer on " << n << " inputs" << std::endl;
hipFree(g_in);
hipFree(g_out);
free(input);
free(output);
// out.close();
// }
}
| 827506ae44ff43ffe22231b89806011ced072d6c.cu | /*
PROGRAMMING ASSIGNMENT 1
JESSICA SMITH
CS791V
*/
#include <iostream>
#include "stdio.h"
#include "kernel.h"
#include <fstream>
int main() {
int n, T, B;
// n = pow(2,21);
// n = 2097087;
int check = 0;
int result = 0;
float calcTime, memTransTime;
char version;
// std::ofstream out("ParVSSeq.csv");
// std::ofstream out("test.csv");
// Handle I/O from user
printf("Input program version (a, b, c):\na: Multiple cpu kernel calls\nb: Kernel calls kernel\nc: CPU finishes the sum\n");
scanf(" %c", &version);
if(version != 'a' && version != 'b' && version != 'c'){
printf("Error: Invalid input.\n");
return 0;
}
// Input size for N, B and T
printf("Input Size of N (0 < N < 100,000,000): ");
scanf(" %d", &n);
// printf("N: %d \n", size);
if(n <= 0)
printf("Error: Wrong input value for N \n");
else if( n > 100000000){
printf("Error: Size too large, setting N = 100,000,000 \n");
n = 100000000;
}
printf("Input Size of T (0 < T < 1024): ");
scanf(" %d", &T);
if(T <= 0)
printf("Error: Wrong input value for T \n");
else if( T > 1024){
printf("Error: Size too large, setting T = 1024 \n");
T = 1024;
}
printf("Input Size of B (0 < B < 1024): ");
scanf(" %d", &B);
if(B <= 0){
printf("Error: Wrong input value for B \n");
}
else if( B > 56000){
printf("Error: Size too large");
B = 56000;
}
int memorySize = B*2;
if(B <= 32)
memorySize = 64;
int *input, *output;
input = (int*) malloc(n*sizeof(int));
output = (int*) malloc(B*sizeof(int));
int *g_in, *g_out;
cudaError_t err = cudaMalloc( (void**) &g_in, n * sizeof(int));
err = cudaMalloc( (void**) &g_out, B * sizeof(int));
if (err != cudaSuccess) {
std::cerr << "Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
// Populate Array for addition
for (int i = 0; i < n; ++i) {
input[i] = 1;
// input[i] = i%3;
}
// Initialize output array to avoid garbage additions
for(int i = 0; i < B; i++){
output[i] = 0;
}
// Create cuda Events
cudaEvent_t start, end, m_start, m_end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventCreate(&m_start);
cudaEventCreate(&m_end);
cudaEventRecord( m_start, 0 );
err = cudaMemcpy(g_out, output, B * sizeof(int), cudaMemcpyHostToDevice);
err = cudaMemcpy(g_in, input, n * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
std::cerr << "Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
// Different calls for each version of code
if(version == 'a'){
// n = B * T;
int s = n;
// int s = B;
// int count = 0;
cudaEventRecord(start,0);
// std::cout << memorySize << std::endl;
while( s > 1){
std::cout << "S IS: " << s << std::endl;
// Call Kernel:
reduce<<<B,T,B*T*sizeof(int)>>>(g_in, g_out, s);
// Adjust new processing size:
s = ceil(s / (T*2));
if(s > B)
s = B;
// s = (s + (T*2-1)) / (T*2);
// Swap pointers to loop
int *temp = g_in;
g_in = g_out;
g_out = temp;
// Count the number of iterations to determine which variable holds the final sum
// std::cout << count << std::endl;
// s = 0;
// count++;
err = cudaMemcpy(output, g_in, B * sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
std::cerr << "Out Copy Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
std::cout << "OUTPUT VECTOR: " << std::endl;
for(int i = 0; i < B; i++){
std::cout << output[i] << std::endl;
}
}
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
err = cudaMemcpy(output, g_in, B * sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
std::cerr << "Out Copy Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
cudaEventElapsedTime( &calcTime, start, end );
cudaEventRecord( m_end, 0 );
cudaEventSynchronize( m_end );
cudaEventElapsedTime( &memTransTime, m_start, m_end );
// Store result to variable
result = output[0] + output[1];
// result = output[0];
for(int i = 0; i < B; i++)
std::cout << "output: " << output[i] << '\n';
}
else if(version == 'c'){
cudaEventRecord(start,0);
reduce<<<B,T,memorySize*sizeof(int)>>>(g_in, g_out, n);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
err = cudaMemcpy(output, g_out, B * sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
std::cerr << "Out Copy Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
// std::cout << "output[0]" << output[0] << std::endl;
result = 0;
for(int i = 0; i < B; i++){
result += output[i];
// std::cout << output[i] << std::endl;
}
cudaEventElapsedTime( &calcTime, start, end );
cudaEventRecord( m_end, 0 );
cudaEventSynchronize( m_end );
cudaEventElapsedTime( &memTransTime, m_start, m_end );
// Correctness check
result = 0;
for(int i = 0; i < B; i++){
result += output[i];
std::cout << output[i] << std::endl;
}
}
else if(version == 'b'){
// Recursive Kernel
cudaEventRecord(start,0);
rReduce<<<B,T,memorySize*sizeof(int)>>>(g_in, g_out, n);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
err = cudaMemcpy(output, g_in, B * sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
std::cerr << "Out Copy Error: " << cudaGetErrorString(err) << std::endl;
exit(1);
}
// std::cout << "output[0]" << output[0] << std::endl;
result = 0;
for(int i = 0; i < B; i++){
result += output[i];
// std::cout << output[i] << std::endl;
}
cudaEventElapsedTime( &calcTime, start, end );
cudaEventRecord( m_end, 0 );
cudaEventSynchronize( m_end );
cudaEventElapsedTime( &memTransTime, m_start, m_end );
// Correctness check
// Store result to variable
result = output[0] + output[1];
// result = output[0];
for(int i = 0; i < B; i++)
std::cout << "output: " << output[i] << '\n';
}
// result = 0;
// for(int i = 0; i < B; i++){
// result += output[i];
// // std::cout << output[i] << std::endl;
// }
check = 0;
for(int i = 0; i < n; i++){
check += input[i];
}
if(check != result){
std::cerr << "Oh no! Something went wrong. You Suck. :(" << std::endl;
std::cerr << result << " " << check << std::endl;
// std::cerr << result - check << std::endl;
cudaFree(g_out);
cudaFree(g_in);
free(input);
free(output);
exit(1);
}
// std::cout << "Yay! Your program's results are correct." << std::endl;
std::cout << std::endl;
std::cout << "Your program took: " << memTransTime/1000 << " seconds With Memory Transfer on " << n << " inputs" << std::endl;
std::cout << "Your program took: " << calcTime/1000 << " seconds Without Memory Transfer on " << n << " inputs" << std::endl;
// Cleanup in the event of success.
cudaEventDestroy( start );
cudaEventDestroy( end );
cudaEventDestroy( m_start );
cudaEventDestroy( m_end );
// // write to file
// int threads = i;
// int blocks = j;
// double memThrough = n / memTransTime;
// double calcThrough = n / calcTime;
// out << memThrough << ',' << calcThrough << ',' << T << ',' << B << '\n' ;
// out << memThrough << ',' << calcThrough << ',' << n << '\n' ;
// std::cout << memThrough << ',' << calcThrough << ',' << T << ',' << B << '\n' ;
// std::cout << std::endl;
// std::cout << "Throughput: " << memThrough << " ms. With Memory Transfer on " << n << " inputs" << std::endl;
// std::cout << "Throughput: " << calcThrough << " ms. Without Memory Transfer on " << n << " inputs" << std::endl;
cudaFree(g_in);
cudaFree(g_out);
free(input);
free(output);
// out.close();
// }
}
|
fb131ae989cbd190a3c6fdf776ce8d1861083634.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <aggregation/aggregation_amg_level.h>
#include <matrix_analysis.h>
#ifdef _WIN32
#pragma warning (push)
#pragma warning (disable : 4244 4267 4521)
#endif
#ifdef _WIN32
#pragma warning (pop)
#endif
#include <basic_types.h>
#include <util.h>
#include <fstream>
#include <cutil.h>
#include <multiply.h>
#include <transpose.h>
#include <blas.h>
#include <string>
#include <string.h>
#include <iostream>
#include <algorithm>
#include <amgx_timer.h>
#include <amgx_types/util.h>
#include <thrust/sort.h>
#include <thrust/remove.h>
#include <thrust/transform.h>
#include <thrust/binary_search.h>
#include <thrust/unique.h>
#include <thrust/inner_product.h>
#include <thrust/iterator/constant_iterator.h>
namespace amgx
{
namespace aggregation
{
// ----------------------
// Kernels
// ----------------------
template <typename IndexType, typename ValueType>
__global__
void set_to_one_kernel(IndexType start, IndexType end, IndexType *ind, ValueType *v)
{
for (int tid = start + blockDim.x * blockIdx.x + threadIdx.x; tid < end; tid += gridDim.x * blockDim.x)
{
v[ind[tid]] = types::util<ValueType>::get_one();
}
}
template <typename IndexType>
__global__
void renumberAggregatesKernel(const IndexType *renumbering, const int interior_offset, const int bdy_offset, IndexType *aggregates, const int num_aggregates, const int n_interior, const int renumbering_size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < num_aggregates)
{
IndexType new_agg_id;
if (renumbering_size == 0)
{
new_agg_id = aggregates[tid];
}
else
{
new_agg_id = renumbering[aggregates[tid]];
}
//if (aggregates[tid] > num_aggregates)
//{
//printf("ID %d old %d + %d = %d\n", tid, new_agg_id, ((new_agg_id >= n_interior) ? bdy_offset : interior_offset), new_agg_id + ((new_agg_id >= n_interior) ? bdy_offset : interior_offset));
//}
new_agg_id += ((new_agg_id >= n_interior) ? bdy_offset : interior_offset);
aggregates[tid] = new_agg_id;
tid += gridDim.x * blockDim.x;
}
}
// Kernel to restrict residual using csr_format
template <typename IndexType, typename ValueType>
__global__
void restrictResidualKernel(const IndexType *row_offsets, const IndexType *column_indices, const ValueType *r, ValueType *rr, const int num_aggregates)
{
int jmin, jmax;
for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_aggregates; tid += gridDim.x * blockDim.x)
{
ValueType temp(types::util<ValueType>::get_zero());
jmin = row_offsets[tid];
jmax = row_offsets[tid + 1];
for (int j = jmin; j < jmax; j++)
{
int j_col = column_indices[j];
temp = temp + r[j_col];
}
rr[tid] = temp;
}
}
// Kernel to restrict residual using block_dia_csr_format
template <typename IndexType, typename ValueType, int bsize>
__global__
void restrictResidualBlockDiaCsrKernel(const IndexType *row_offsets, const IndexType *column_indices, const ValueType *r, ValueType *rr, const int num_aggregates)
{
ValueType rr_temp[bsize];
int offset, jmin, jmax;
for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_aggregates; tid += gridDim.x * blockDim.x)
{
// Initialize to zero
#pragma unroll
for (int m = 0; m < bsize; m++)
{
rr_temp[m] = types::util<ValueType>::get_zero();
}
jmin = row_offsets[tid];
jmax = row_offsets[tid + 1];
for (int j = jmin; j < jmax; j++)
{
int jcol = column_indices[j];
offset = jcol * bsize;
#pragma unroll
for (int m = 0; m < bsize; m++)
{
rr_temp[m] = rr_temp[m] + r[offset + m];
}
}
offset = tid * bsize;
#pragma unroll
for (int m = 0; m < bsize; m++)
{
rr[offset + m] = rr_temp[m];
};
}
}
// Kernel to prolongate and apply the correction for csr format
template <typename IndexType, typename ValueType>
__global__
void prolongateAndApplyCorrectionKernel(const ValueType alpha, const int num_rows, ValueType *x, const ValueType *e, const IndexType *aggregates, IndexType num_aggregates)
{
for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_rows; tid += gridDim.x * blockDim.x)
{
IndexType I = aggregates[tid];
x[tid] = x[tid] + alpha * e[I];
}
}
// Kernel to prolongate and apply the correction for block-dia-csr format
template <typename IndexType, typename ValueType>
__global__
void prolongateAndApplyCorrectionBlockDiaCsrKernel(const ValueType alpha, const int num_block_rows, ValueType *x, const ValueType *e, const IndexType *aggregates, IndexType num_aggregates, const int bsize)
{
for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_block_rows; tid += gridDim.x * blockDim.x)
{
IndexType I = aggregates[tid];
for (int m = 0; m < bsize; m++)
{
x[tid * bsize + m] = x[tid * bsize + m] + alpha * e[I * bsize + m];
}
}
}
template <typename IndexType, typename ValueType>
__global__
void prolongateVector(const IndexType *aggregates, const ValueType *in, ValueType *out, IndexType fine_rows, IndexType coarse_rows, int blocksize)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
while ( tid < fine_rows * blocksize )
{
int i = tid / blocksize;
int e = tid % blocksize;
IndexType I = aggregates[i];
out[tid] = in[ I * blocksize + e ];
tid += gridDim.x * blockDim.x;
}
}
template <typename IndexType, typename ValueType>
__global__
void applyCorrection(ValueType lambda, const ValueType *e, ValueType *x, IndexType numRows )
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
while ( tid < numRows )
{
x[tid] = x[tid] + lambda * e[tid];
tid += gridDim.x * blockDim.x;
}
}
// -------------------------------
// Methods
// ------------------------------
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::transfer_level(AMG_Level<TConfig1> *ref_lvl)
{
Aggregation_AMG_Level_Base<TConfig1> *ref_agg_lvl = dynamic_cast<Aggregation_AMG_Level_Base<TConfig1>*>(ref_lvl);
this->scale_counter = ref_agg_lvl->scale_counter;
this->scale = ref_agg_lvl->scale;
this->m_R_row_offsets.copy(ref_agg_lvl->m_R_row_offsets);
this->m_R_column_indices.copy(ref_agg_lvl->m_R_column_indices);
this->m_aggregates.copy(ref_agg_lvl->m_aggregates);
this->m_aggregates_fine_idx.copy(ref_agg_lvl->m_aggregates_fine_idx);
this->m_num_aggregates = ref_agg_lvl->m_num_aggregates;
this->m_num_all_aggregates = ref_agg_lvl->m_num_all_aggregates;
}
typedef std::pair<int, int> mypair;
bool comparator ( const mypair &l, const mypair &r) { return l.first < r.first; }
// Method to compute R
// General path
// TODO: this could be merged with selector to save some computations
template <typename T_Config>
void Aggregation_AMG_Level_Base<T_Config>::computeRestrictionOperator_common()
{
m_R_row_offsets.resize(m_num_all_aggregates + 1); //create one more row for the pseudo aggregate
IVector R_row_indices(m_aggregates);
#if AMGX_ASYNCCPU_PROOF_OF_CONCEPT
bool use_cpu = m_aggregates.size() < 4096;
if (use_cpu)
{
struct computeRestrictionTask : public task
{
Aggregation_AMG_Level_Base<T_Config> *self;
IVector *R_row_indices;
void run()
{
int N = self->m_aggregates.size();
IVector_h R_row_indices_host(self->m_aggregates);
std::vector<mypair> pairs(N);
for (int i = 0; i < N; i++)
{
pairs[i].first = R_row_indices_host[i];
pairs[i].second = i;
}
std::stable_sort(pairs.begin(), pairs.end(), comparator);
IVector_h R_column_indices(self->A->get_num_rows());
for (int i = 0; i < N; i++)
{
R_column_indices[i] = pairs[i].second;
R_row_indices_host[i] = pairs[i].first;
}
self->m_R_column_indices = R_column_indices;
*R_row_indices = R_row_indices_host;
}
};
computeRestrictionTask *t = new computeRestrictionTask();
t->self = this;
t->R_row_indices = &R_row_indices;
t->run();
delete t;
}
else
#endif
{
m_R_column_indices.resize(this->A->get_num_rows());
amgx::thrust::sequence(m_R_column_indices.begin(), m_R_column_indices.end());
cudaCheckError();
amgx::thrust::sort_by_key(R_row_indices.begin(), R_row_indices.end(), m_R_column_indices.begin());
cudaCheckError();
}
amgx::thrust::lower_bound(R_row_indices.begin(),
R_row_indices.end(),
amgx::thrust::counting_iterator<typename IVector::value_type>(0),
amgx::thrust::counting_iterator<typename IVector::value_type>(m_R_row_offsets.size()),
m_R_row_offsets.begin());
cudaCheckError();
}
// two methods below could be merged
// Method to compute R on HOST using csr format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Aggregation_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeRestrictionOperator_1x1()
{
this->m_R_row_offsets.resize(this->m_num_all_aggregates + 1);
this->m_R_column_indices.resize(this->A->get_num_rows());
this->fillRowOffsetsAndColIndices(this->A->get_num_rows());
}
// Method to compute R on HOST using block dia-csr format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Aggregation_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeRestrictionOperator_4x4()
{
this->m_R_row_offsets.resize(this->m_num_all_aggregates + 1);
this->m_R_column_indices.resize(this->A->get_num_rows());
this->fillRowOffsetsAndColIndices(this->A->get_num_rows());
}
// Method to create R_row_offsest and R_column_indices array on HOST using csr or block dia-csr format
template <typename T_Config>
void Aggregation_AMG_Level_Base<T_Config>::fillRowOffsetsAndColIndices(const int R_num_cols)
{
for (int i = 0; i < m_num_all_aggregates + 1; i++)
{
m_R_row_offsets[i] = 0;
}
// Count number of neighbors for each row
for (int i = 0; i < R_num_cols; i++)
{
int I = m_aggregates[i];
m_R_row_offsets[I]++;
}
m_R_row_offsets[m_num_all_aggregates] = R_num_cols;
for (int i = m_num_all_aggregates - 1; i >= 0; i--)
{
m_R_row_offsets[i] = m_R_row_offsets[i + 1] - m_R_row_offsets[i];
}
/* Set column indices. */
for (int i = 0; i < R_num_cols; i++)
{
int I = m_aggregates[i];
int Ip = m_R_row_offsets[I]++;
m_R_column_indices[Ip] = i;
}
/* Reset r[i] to start of row memory. */
for (int i = m_num_all_aggregates - 1; i > 0; i--)
{
m_R_row_offsets[i] = m_R_row_offsets[i - 1];
}
m_R_row_offsets[0] = 0;
}
// Method to compute R on DEVICE using block dia-csr format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Aggregation_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeRestrictionOperator_4x4()
{
this->computeRestrictionOperator_common();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Aggregation_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeRestrictionOperator_1x1()
{
this->computeRestrictionOperator_common();
}
// Method to restrict Residual on host using csr_matrix format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Aggregation_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::restrictResidual_1x1(const VVector &r, VVector &rr)
{
ValueTypeB temp;
for (int i = 0; i < this->m_num_aggregates; i++)
{
temp = types::util<ValueTypeB>::get_zero();
for (int j = this->m_R_row_offsets[i]; j < this->m_R_row_offsets[i + 1]; j++)
{
int j_col = this->m_R_column_indices[j];
temp = temp + r[j_col];
}
rr[i] = temp;
}
}
// Method to restrict Residual on host using block_dia_csr_matrix format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Aggregation_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::restrictResidual_4x4(const VVector &r, VVector &rr)
{
IndexType bsize = this->A->get_block_dimy();
ValueTypeB *temp = new ValueTypeB[bsize];
for (int i = 0; i < this->m_num_aggregates; i++)
{
// Initialize temp to 0
for (int k = 0; k < bsize; k++)
{
temp[k] = types::util<ValueTypeB>::get_zero();
}
// Add contributions from each fine point
for (int j = this->m_R_row_offsets[i]; j < this->m_R_row_offsets[i + 1]; j++)
{
int j_col = this->m_R_column_indices[j];
for (int k = 0; k < bsize; k++)
{
temp[k] = temp[k] + r[j_col * bsize + k];
}
}
// Store result
for (int k = 0; k < bsize; k++)
{
rr[i * bsize + k] = temp[k];
}
}
}
// Method to restrict Residual on device using csr_matrix format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Aggregation_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::restrictResidual_1x1(const VVector &r, VVector &rr)
{
int block_size = 64;
int max_threads;;
if (!this->isConsolidationLevel())
{
max_threads = this->m_num_aggregates;
}
else
{
max_threads = this->m_num_all_aggregates;
}
int num_blocks = min( AMGX_GRID_MAX_SIZE, (max_threads - 1) / block_size + 1);
const IndexType *R_row_offsets_ptr = this->m_R_row_offsets.raw();
const IndexType *R_column_indices_ptr = this->m_R_column_indices.raw();
const ValueTypeB *r_ptr = r.raw();
ValueTypeB *rr_ptr = rr.raw();
hipLaunchKernelGGL(( restrictResidualKernel) , dim3(num_blocks), dim3(block_size), 0, 0, R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads);
cudaCheckError();
}
// Method to restrict Residual on device using block_dia_csr_matrix format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Aggregation_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::restrictResidual_4x4(const VVector &r, VVector &rr)
{
int block_size = 64;
int max_threads;
if (!this->isConsolidationLevel())
{
max_threads = this->m_num_aggregates;
}
else
{
max_threads = this->m_num_all_aggregates;
};
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (max_threads + block_size - 1) / block_size);
const IndexType *R_row_offsets_ptr = this->m_R_row_offsets.raw();
const IndexType *R_column_indices_ptr = this->m_R_column_indices.raw();
const ValueTypeB *r_ptr = r.raw();
ValueTypeB *rr_ptr = rr.raw();
cudaCheckError();
switch ( this->getA().get_block_dimy() )
{
case 2:
hipLaunchKernelGGL(( restrictResidualBlockDiaCsrKernel<IndexType, ValueTypeB, 2>) , dim3(num_blocks), dim3(block_size), 0, 0, R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads);
break;
case 3:
hipLaunchKernelGGL(( restrictResidualBlockDiaCsrKernel<IndexType, ValueTypeB, 3>) , dim3(num_blocks), dim3(block_size), 0, 0, R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads);
break;
case 4:
hipLaunchKernelGGL(( restrictResidualBlockDiaCsrKernel<IndexType, ValueTypeB, 4>) , dim3(num_blocks), dim3(block_size), 0, 0, R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads);
break;
case 5:
hipLaunchKernelGGL(( restrictResidualBlockDiaCsrKernel<IndexType, ValueTypeB, 5>) , dim3(num_blocks), dim3(block_size), 0, 0, R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads);
break;
case 8:
hipLaunchKernelGGL(( restrictResidualBlockDiaCsrKernel<IndexType, ValueTypeB, 8>) , dim3(num_blocks), dim3(block_size), 0, 0, R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads);
break;
case 10:
hipLaunchKernelGGL(( restrictResidualBlockDiaCsrKernel<IndexType, ValueTypeB, 10>) , dim3(num_blocks), dim3(block_size), 0, 0, R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads);
break;
default:
FatalError( "Unsupported block size in restrictResidual_4x4!!!", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE );
}
cudaCheckError();
}
__inline__ float getAlpha(float &nom, float &denom)
{
float alpha;
if (nom * denom <= 0. || std::abs(nom) < std::abs(denom))
{
alpha = 1.;
}
else if (std::abs(nom) > 2.*std::abs(denom))
{
alpha = 2.;
}
else
{
alpha = nom / denom;
}
return alpha;
}
__inline__ double getAlpha(double &nom, double &denom)
{
double alpha;
if (nom * denom <= 0. || std::abs(nom) < std::abs(denom))
{
alpha = 1.;
}
else if (std::abs(nom) > 2.*std::abs(denom))
{
alpha = 2.;
}
else
{
alpha = nom / denom;
}
return alpha;
}
__inline__ hipComplex getAlpha(hipComplex &nom, hipComplex &denom)
{
hipComplex alpha;
if (types::util<hipComplex>::abs(nom) < types::util<hipComplex>::abs(denom))
{
alpha = make_cuComplex(1.f, 0.f);
}
else if (types::util<hipComplex>::abs(nom) > 2.*types::util<hipComplex>::abs(denom))
{
alpha = make_cuComplex(2.f, 0.f);
}
else
{
alpha = nom / denom;
}
return alpha;
}
__inline__ hipDoubleComplex getAlpha(hipDoubleComplex &nom, hipDoubleComplex &denom)
{
hipDoubleComplex alpha;
if (types::util<hipDoubleComplex>::abs(nom) < types::util<hipDoubleComplex>::abs(denom))
{
alpha = make_cuDoubleComplex(1., 0.);
}
else if (types::util<hipDoubleComplex>::abs(nom) > 2.*types::util<hipDoubleComplex>::abs(denom))
{
alpha = make_cuDoubleComplex(2., 0.);
}
else
{
alpha = nom / denom;
}
return alpha;
}
template< class T_Config>
typename T_Config::VecPrec Aggregation_AMG_Level_Base<T_Config>::computeAlpha(const Vector<T_Config> &e, const Vector<T_Config> &bc, const Vector<T_Config> &tmp)
{
typename T_Config::VecPrec alpha = types::util<ValueTypeB>::get_one();
Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA();
int size = Ac.get_num_rows();
VVector v(2, types::util<ValueTypeB>::get_zero());
v[0] = amgx::thrust::inner_product(e.begin(), e.begin() + size, bc.begin(), types::util<ValueTypeB>::get_zero());
v[1] = amgx::thrust::inner_product(e.begin(), e.begin() + size, tmp.begin(), types::util<ValueTypeB>::get_zero());
cudaCheckError();
return getAlpha(v[0], v[1]);
}
// Method to prolongate the error on HOST using csr format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Aggregation_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::prolongateAndApplyCorrection_1x1(Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &e, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &bc, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &x, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &tmp)
{
Matrix<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &A = this->getA();
Matrix<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &C = this->next_h->getA();
if ( this->m_error_scaling >= 2 )
{
FatalError("error_scaling=2,3 is not implemented on host", AMGX_ERR_NOT_IMPLEMENTED );
}
ValueTypeB alpha = types::util<ValueTypeB>::get_one();
if (this->m_error_scaling)
{
multiply(this->next_h->getA(), e, tmp);
alpha = this->computeAlpha (e, bc, tmp);
}
// Apply correction on all (interior and exterior) equations.
for (int i = 0; i < A.get_num_cols(); i++)
{
int I = this->m_aggregates[i];
x[i] = x[i] + alpha * e[I];
}
}
// Method to prolongate the error on HOST using block_dia_csr format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Aggregation_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::prolongateAndApplyCorrection_4x4(Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &e, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &bc, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &x, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &tmp)
{
if (this->A->get_block_dimy() != this->A->get_block_dimx())
{
FatalError("Aggregation_AMG_Level not implemented for non square blocks, exiting", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
if ( this->m_error_scaling >= 2 )
{
FatalError("error_scaling=2,3 is not implemented on host", AMGX_ERR_NOT_IMPLEMENTED );
}
Matrix<TConfig> &C = this->next_h->getA();
ValueTypeB alpha = types::util<ValueTypeB>::get_one();
if (this->m_error_scaling)
{
multiply(this->next_h->getA(), e, tmp);
alpha = this->computeAlpha (e, bc, tmp);
}
// Apply correction on all equations.
for (int i = 0; i < this->A->get_num_rows(); i++)
{
int I = this->m_aggregates[i];
for (int k = 0; k < this->A->get_block_dimy(); k++)
{
x[i * this->A->get_block_dimy() + k] = x[i * this->A->get_block_dimy() + k] + alpha * e[I * this->A->get_block_dimy() + k];
}
}
}
// Prolongate the error on DEVICE using csr format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Aggregation_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::prolongateAndApplyCorrection_1x1(Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &e, Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &bc, Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &x, Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &tmp)
{
ValueTypeB alpha = types::util<ValueTypeB>::get_one();
const int block_size = 64;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (int) ( (this->A->get_num_rows() + block_size - 1) / block_size ) );
const IndexType *aggregates_ptr = this->m_aggregates.raw();
ValueTypeB *x_ptr = x.raw();
const ValueTypeB *e_ptr = e.raw();
if (this->m_error_scaling)
{
FatalError("error_scaling=1 is deprecated", AMGX_ERR_NOT_IMPLEMENTED );
}
hipLaunchKernelGGL(( prolongateAndApplyCorrectionKernel) , dim3(num_blocks), dim3(block_size), 0, 0, alpha, (int)this->A->get_num_rows(), x_ptr, e_ptr, aggregates_ptr, this->m_num_aggregates);
cudaCheckError();
}
// Prolongate the error on DEVICE using block dia-csr format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Aggregation_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::prolongateAndApplyCorrection_4x4(Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &ec,
Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &bf,
Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &xf,
Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &rf)
{
if ( this->m_error_scaling >= 2 )
{
if ( this->scale_counter > 0 )
{
const IndexType *aggregates_ptr = this->m_aggregates.raw();
ValueTypeB *x_ptr = xf.raw();
const ValueTypeB *e_ptr = ec.raw();
const int block_size = 64;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (int) ((this->A->get_num_rows() - 1) / block_size + 1));
hipLaunchKernelGGL(( prolongateAndApplyCorrectionBlockDiaCsrKernel) , dim3(num_blocks), dim3(block_size), 0, 0, this->scale, (int)this->getA().get_num_rows(), x_ptr, e_ptr, aggregates_ptr, this->m_num_aggregates, this->getA().get_block_dimy());
cudaCheckError();
this->scale_counter--;
return;
}
bool vanek_scaling = this->m_error_scaling > 3;
IndexType numRowsCoarse = this->next_d->getA().get_num_rows();
IndexType numRowsFine = this->A->get_num_rows();
IndexType blockdim = this->A->get_block_dimx();
if ( blockdim != this->A->get_block_dimy() )
{
FatalError("Unsupported dimension for aggregation amg level", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
VVector ef( rf.size() );
VVector Aef( rf.size() );
ef.set_block_dimy( blockdim );
Aef.set_block_dimy( blockdim );
// prolongate e
const int threads_per_block = 256;
const int num_block_values = min( AMGX_GRID_MAX_SIZE, (numRowsFine * blockdim - 1) / threads_per_block + 1);
const hipStream_t stream = nullptr;
hipLaunchKernelGGL(( prolongateVector) , dim3(num_block_values), dim3(threads_per_block), 0, stream, this->m_aggregates.raw(), ec.raw(), ef.raw(), numRowsFine, numRowsCoarse, blockdim );
ef.dirtybit = 1;
hipStreamSynchronize(stream);
cudaCheckError();
int preSmooth;
if ( vanek_scaling )
{
preSmooth = this->amg->getNumPostsweeps();
}
else
{
preSmooth = this->scaling_smoother_steps;
}
//smooth error
this->smoother->setTolerance( 0.0 );
this->smoother->set_max_iters( preSmooth );
if ( vanek_scaling )
{
amgx::thrust::fill( Aef.begin(), Aef.end(), types::util<ValueTypeB>::get_zero() );
cudaCheckError();
this->smoother->solve( Aef, ef, false ); //smooth correction with rhs 0
this->smoother->solve( bf, xf, false ); // smooth x with rhs residual
//recompute residual
int offset, size;
this->getA().getOffsetAndSizeForView(OWNED, &offset, &size);
axmb( this->getA(), xf, bf, rf, offset, size );
}
else
{
this->smoother->solve( rf, ef, false ); //smooth correction with rhs residual
}
// multiply for lambda computation
multiply(this->getA(), ef, Aef, OWNED);
ValueTypeB nominator, denominator;
int offset = 0, size = 0;
this->A->getOffsetAndSizeForView(OWNED, &offset, &size);
if ( this->m_error_scaling == 2 || this->m_error_scaling == 4 )
{
// compute lambda=<rf,Aef>/<Aef,Aef>
nominator = amgx::thrust::inner_product( rf.begin(), rf.end(), Aef.begin(), types::util<ValueTypeB>::get_zero() );
denominator = amgx::thrust::inner_product( Aef.begin(), Aef.end(), Aef.begin(), types::util<ValueTypeB>::get_zero() );
cudaCheckError();
}
if ( this->m_error_scaling == 3 || this->m_error_scaling == 5)
{
// compute lambda=<rf,ef>/<ef,Aef>
nominator = amgx::thrust::inner_product( rf.begin(), rf.begin() + size * blockdim, ef.begin(), types::util<ValueTypeB>::get_zero() );
denominator = amgx::thrust::inner_product( ef.begin(), ef.begin() + size * blockdim, Aef.begin(), types::util<ValueTypeB>::get_zero() );
if (!this->A->is_matrix_singleGPU())
{
this->A->getManager()->global_reduce_sum(&nominator);
this->A->getManager()->global_reduce_sum(&denominator);
}
cudaCheckError();
}
if (types::util<ValueTypeB>::abs(denominator) == 0.0)
{
nominator = denominator = types::util<ValueTypeB>::get_one();
}
// apply correction x <- x + lambda*e
const int num_block_fine = min( AMGX_GRID_MAX_SIZE, (numRowsFine * blockdim - 1) / threads_per_block + 1 );
ValueTypeB alpha = nominator / denominator;
if ( types::util<ValueTypeB>::abs(alpha) < .3 )
{
alpha = (alpha / types::util<ValueTypeB>::abs(alpha)) * .3; // it was this before: alpha = .3, which is not 100% equal
}
if ( types::util<ValueTypeB>::abs(alpha) > 10 )
{
alpha = (alpha / types::util<ValueTypeB>::abs(alpha)) * 10.; // it was this before: alpha = 10., which is not 100% equal
}
hipLaunchKernelGGL(( applyCorrection) , dim3(num_block_fine), dim3(threads_per_block), 0, stream, alpha, ef.raw(), xf.raw(), numRowsFine * blockdim );
cudaCheckError();
this->scale_counter = this->reuse_scale; //reuse this scale scale_counter times
this->scale = alpha;
return;
}
ValueTypeB alpha = types::util<ValueTypeB>::get_one();
const int block_size = 64;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (int) ((this->A->get_num_rows() - 1) / block_size + 1));
const IndexType *aggregates_ptr = this->m_aggregates.raw();
ValueTypeB *x_ptr = xf.raw();
const ValueTypeB *e_ptr = ec.raw();
if (this->m_error_scaling == 1)
{
FatalError("error_scaling=1 is deprecated", AMGX_ERR_NOT_IMPLEMENTED );
}
hipLaunchKernelGGL(( prolongateAndApplyCorrectionBlockDiaCsrKernel) , dim3(num_blocks), dim3(block_size), 0, 0, alpha, (int)this->A->get_num_rows(), x_ptr, e_ptr, aggregates_ptr, this->m_num_aggregates, this->A->get_block_dimy());
cudaCheckError();
}
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config >::prolongateAndApplyCorrection(VVector &e, VVector &bf, VVector &x, VVector &tmp)
{
Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA();
//this is dirty, but error scaling 2 and 3 do not have a specialized version. Instead, the general version sits in the 4x4 function
if ( this->m_error_scaling >= 2 )
{
prolongateAndApplyCorrection_4x4(e, bf, x, tmp);
}
else if (this->A->get_block_size() == 1)
{
prolongateAndApplyCorrection_1x1(e, bf, x, tmp);
}
else if (this->A->get_block_dimx() == this->A->get_block_dimy() )
{
prolongateAndApplyCorrection_4x4(e, bf, x, tmp);
}
else
{
FatalError("Unsupported dimension for aggregation amg level", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
x.dirtybit = 1;
if (!this->A->is_matrix_singleGPU() && x.delayed_send == 0)
{
if (x.in_transfer & RECEIVING) { this->A->manager->exchange_halo_wait(x, x.tag); }
this->A->manager->exchange_halo_async(x, x.tag);
}
}
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::restrictResidual(VVector &r, VVector &rr)
{
if (this->A->get_block_size() == 1)
{
restrictResidual_1x1(r, rr);
}
else if (this->A->get_block_dimx() == this->A->get_block_dimy() )
{
restrictResidual_4x4(r, rr);
}
else
{
FatalError("Unsupported dimension for aggregation amg level", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
//TODO: check level transfer between host and device for multiGPU
if (!this->A->is_matrix_singleGPU())
{
Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA();
rr.dirtybit = 1;
if (!Ac.is_matrix_singleGPU() && !this->isConsolidationLevel() && rr.delayed_send == 0)
{
Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); //TODO problem in memoryspace transfer is here
if (rr.in_transfer & RECEIVING) { Ac.manager->exchange_halo_wait(rr, rr.tag); }
Ac.manager->exchange_halo_async(rr, rr.tag);
}
}
}
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::computeRestrictionOperator()
{
if (this->A->get_block_size() == 1)
{
computeRestrictionOperator_1x1();
}
else if (this->A->get_block_dimx() == 4 && this->A->get_block_dimy() == 4)
{
computeRestrictionOperator_4x4();
}
else
{
this->computeRestrictionOperator_common();
}
}
template <typename IndexType>
__global__ void coarse_to_global(IndexType *aggregates, IndexType *aggregates_global, IndexType *renumbering, IndexType num_elements, int64_t offset)
{
int element = blockIdx.x * blockDim.x + threadIdx.x;
while (element < num_elements)
{
renumbering[aggregates[element]] = aggregates_global[element] + offset; //this won't be a problem, because we are overwriting the same thing
element += blockDim.x * gridDim.x;
}
}
template <typename T, typename IndexType>
__global__ void export_matrix_elements(IndexType *row_offsets, IndexType *col_indices, T *values, IndexType *maps, IndexType *renumbering, IndexType *new_row_offsets, IndexType *new_col_indices, T *new_values, IndexType bsize, IndexType size)
{
int idx = blockIdx.x * blockDim.x / 32 + threadIdx.x / 32;
int coopIdx = threadIdx.x % 32;
while (idx < size)
{
int row = maps[idx];
INDEX_TYPE src_base = row_offsets[row];
INDEX_TYPE dst_base = new_row_offsets[idx];
for (int m = coopIdx; m < row_offsets[row + 1]*bsize - src_base * bsize; m += 32)
{
new_values[dst_base * bsize + m] = values[src_base * bsize + m];
}
for (int m = coopIdx; m < row_offsets[row + 1] - src_base; m += 32)
{
new_col_indices[dst_base + m] = renumbering[col_indices[src_base + m]];
}
idx += gridDim.x * blockDim.x / 32;
}
}
template <class T>
__global__ void export_matrix_diagonal(T *values, INDEX_TYPE bsize, INDEX_TYPE *maps, T *output, INDEX_TYPE size)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
while (idx < size)
{
int row = maps[idx];
INDEX_TYPE src_base = row;
INDEX_TYPE dst_base = idx;
for (int m = 0; m < bsize; m++)
{
output[dst_base * bsize + m] = values[src_base * bsize + m];
}
idx += gridDim.x * blockDim.x;
}
}
__global__ void remove_boundary(INDEX_TYPE *flags, INDEX_TYPE *maps, INDEX_TYPE size)
{
int element = blockIdx.x * blockDim.x + threadIdx.x;
while (element < size)
{
flags[maps[element]] = 0; //this won't be a problem, because we are overwriting the same thing
element += blockDim.x * gridDim.x;
}
}
__global__ void calc_inverse_renumbering(INDEX_TYPE *renum, INDEX_TYPE *irenum, INDEX_TYPE *renum_gbl, INDEX_TYPE base_index, INDEX_TYPE max_element)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < max_element)
{
irenum[renum[idx]] = renum_gbl[idx] - base_index;
idx += blockDim.x * gridDim.x;
}
}
__global__ void create_halo_mapping(INDEX_TYPE *mapping, INDEX_TYPE *node_list, INDEX_TYPE base_index, INDEX_TYPE map_offset, INDEX_TYPE size)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
while (row < size)
{
int idx = node_list[row] - base_index;
mapping[idx] = map_offset + row;
row += blockDim.x * gridDim.x;
}
}
__global__ void map_col_indices_and_count_rowlen(INDEX_TYPE *row_offsets, INDEX_TYPE *col_indices, INDEX_TYPE *row_length, INDEX_TYPE *renumbering, INDEX_TYPE *mapping, INDEX_TYPE *map_offsets, int64_t *index_ranges, INDEX_TYPE part_id, INDEX_TYPE my_id, INDEX_TYPE base_index, INDEX_TYPE my_range, INDEX_TYPE num_neighbors, INDEX_TYPE num_rows)
{
extern __shared__ volatile int reduction[];
int row = blockIdx.x * blockDim.x / 4 + threadIdx.x / 4;
int coopIdx = threadIdx.x % 4;
while (row < num_rows)
{
int valid = 0;
for (int idx = row_offsets[row] + coopIdx; idx < row_offsets[row + 1]; idx += 4) //this may look horrible, but I expect low branch divergence, because col indices in a row usually belong to the same partition (or at most one more)
{
int colIdx = col_indices[idx];
int part = -2;
if (colIdx >= index_ranges[2 * part_id] && colIdx < index_ranges[2 * part_id + 1]) //the col index probably belongs to the partition I am working on
{
part = part_id;
}
else if (colIdx >= base_index && colIdx < base_index + my_range) //or points back to the owned partition
{
part = -1;
}
else //or else it points to a third partition
{
for (int i = 0; i < num_neighbors; i++)
{
if (colIdx >= index_ranges[2 * i] && colIdx < index_ranges[2 * i + 1])
{
part = i;
}
}
}
if (part == -2)
{
col_indices[idx] = -1;
#ifdef DEBUG
printf("Column index encountered that does not belong to any of my neighbors!! %d\n", colIdx);
#endif
}
else
{
if (part == -1)
{
col_indices[idx] = renumbering[colIdx - base_index];
valid++;
}
else
{
int new_col_idx = mapping[map_offsets[part] + colIdx - index_ranges[2 * part]];
if (new_col_idx >= 0)
{
valid++;
col_indices[idx] = new_col_idx;
}
else
{
col_indices[idx] = -1;
}
}
}
}
reduction[threadIdx.x] = valid;
for (int s = 2; s > 0; s >>= 1)
{
if (coopIdx < s)
{
reduction[threadIdx.x] += reduction[threadIdx.x + s];
}
__syncthreads();
}
if (coopIdx == 0)
{
row_length[row] = reduction[threadIdx.x];
}
row += gridDim.x * blockDim.x / 4;
}
}
__global__ void map_col_indices(INDEX_TYPE *row_offsets, INDEX_TYPE *col_indices, int64_t *halo_ranges, INDEX_TYPE *halo_renumbering, INDEX_TYPE *halo_rows, INDEX_TYPE *global_renumbering, INDEX_TYPE num_neighbors, INDEX_TYPE num_rows, INDEX_TYPE num_rows_processed)
{
int row = blockIdx.x * blockDim.x / 4 + threadIdx.x / 4;
int coopIdx = threadIdx.x % 4;
while (row < num_rows_processed)
{
for (int idx = row_offsets[row] + coopIdx; idx < row_offsets[row + 1]; idx += 4)
{
int colIdx = col_indices[idx];
int part = 0;
if (colIdx < num_rows)
{
part = -1;
}
else
{
colIdx = global_renumbering[colIdx];
for (int i = 0; i < num_neighbors; i++)
{
if (colIdx >= halo_ranges[2 * i] && colIdx < halo_ranges[2 * i + 1])
{
part = i;
break;
}
}
}
if (part == -1)
{
col_indices[idx] = colIdx;
}
else
{
col_indices[idx] = halo_renumbering[halo_rows[part] + colIdx - halo_ranges[2 * part]];
}
}
row += gridDim.x * blockDim.x / 4;
}
}
template <class T>
__global__ void reorder_whole_matrix(INDEX_TYPE *old_rows, INDEX_TYPE *old_cols, T *old_vals, INDEX_TYPE *rows, INDEX_TYPE *cols, T *vals, INDEX_TYPE bsize, INDEX_TYPE num_rows)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
while (row < num_rows)
{
INDEX_TYPE dst_row = row;
INDEX_TYPE src_base = old_rows[row];
INDEX_TYPE dst = rows[dst_row];
for (int i = 0; i < old_rows[row + 1] - src_base; i++)
{
INDEX_TYPE colIdx = old_cols[src_base + i];
if (colIdx >= 0)
{
cols[dst] = colIdx;
for (int j = 0; j < bsize; j++) { vals[dst * bsize + j] = old_vals[(src_base + i) * bsize + j]; }
dst++;
}
}
row += blockDim.x * gridDim.x;
}
}
__global__ void calc_gbl_renumbering(INDEX_TYPE *inv_renum, INDEX_TYPE *gbl_renum, INDEX_TYPE size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < size)
{
gbl_renum[inv_renum[idx]] = idx;
idx += blockDim.x * gridDim.x;
}
}
template <typename ValueType>
__global__ void write_diagonals(ValueType *values, INDEX_TYPE *diag, INDEX_TYPE *map, ValueType *output, INDEX_TYPE bsize, INDEX_TYPE size)
{
int nzPerBlock = blockDim.x / bsize;
int row = blockIdx.x * nzPerBlock + threadIdx.x / bsize;
int vecIdx = threadIdx.x % bsize;
if (threadIdx.x >= (blockDim.x / bsize)*bsize) { return; }
while (row < size)
{
output[row * bsize + vecIdx] = values[diag[map[row]] * bsize + vecIdx];
row += gridDim.x * nzPerBlock;
}
}
template <typename ValueType>
__global__ void write_diagonals_back(ValueType *values, INDEX_TYPE *diag, ValueType *source, INDEX_TYPE bsize, INDEX_TYPE size)
{
int nzPerBlock = blockDim.x / bsize;
int row = blockIdx.x * nzPerBlock + threadIdx.x / bsize;
int vecIdx = threadIdx.x % bsize;
if (threadIdx.x >= (blockDim.x / bsize)*bsize) { return; }
while (row < size)
{
values[diag[row]*bsize + vecIdx] = source[row * bsize + vecIdx];
row += gridDim.x * nzPerBlock;
}
}
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::prepareNextLevelMatrix_full(const Matrix<TConfig> &A, Matrix<TConfig> &Ac)
{
if (A.is_matrix_singleGPU()) { return; }
int num_neighbors = A.manager->neighbors.size();
if (TConfig::memSpace == AMGX_host)
{
FatalError("Aggregation AMG Not implemented for host", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
int c_size = Ac.get_num_rows();
int f_size = A.get_num_rows();
int diag = Ac.hasProps(DIAG);
if (A.manager->B2L_rings[0].size() > 2) { FatalError("Aggregation_AMG_Level prepareNextLevelMatrix not implemented >1 halo rings", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); }
//get coarse -> fine global renumbering
IVector renumbering(c_size);
int num_blocks = min(4096, (c_size + 127) / 128);
hipLaunchKernelGGL(( coarse_to_global) , dim3(num_blocks), dim3(128), 0, 0, this->m_aggregates.raw(), this->m_aggregates_fine_idx.raw(), renumbering.raw(), f_size, 0);
cudaCheckError();
//
// Step 0 - form halo matrices that are exported to neighbors
//
std::vector<Matrix<TConfig> > halo_rows(num_neighbors);
std::vector<DistributedManager<TConfig> > halo_btl(num_neighbors);
for (int i = 0; i < num_neighbors; i++ )
{
int num_unique = Ac.manager->B2L_rings[i][1];
//prepare export halo matrices
halo_btl[i].resize(1, 1);
halo_btl[i].set_global_id(Ac.manager->global_id());
halo_btl[i].B2L_maps[0].resize(num_unique);
halo_btl[i].B2L_rings[0].resize(2);
halo_btl[i].B2L_rings[0][0] = 0;
halo_btl[i].B2L_rings[0][1] = num_unique;
halo_btl[i].set_index_range(A.manager->index_range());
halo_btl[i].set_base_index(A.manager->base_index());
//global indices of rows of the halo matrix
amgx::thrust::copy(amgx::thrust::make_permutation_iterator( renumbering.begin(), Ac.manager->B2L_maps[i].begin()),
amgx::thrust::make_permutation_iterator( renumbering.begin(), Ac.manager->B2L_maps[i].begin() + num_unique),
halo_btl[i].B2L_maps[0].begin());
cudaCheckError();
halo_rows[i].addProps(CSR);
if (diag) { halo_rows[i].addProps(DIAG); }
//calculate row length and row_offsets
halo_rows[i].row_offsets.resize(num_unique + 1);
amgx::thrust::transform(amgx::thrust::make_permutation_iterator(Ac.row_offsets.begin() + 1, Ac.manager->B2L_maps[i].begin()),
amgx::thrust::make_permutation_iterator(Ac.row_offsets.begin() + 1, Ac.manager->B2L_maps[i].end()),
amgx::thrust::make_permutation_iterator(Ac.row_offsets.begin(), Ac.manager->B2L_maps[i].begin()),
halo_rows[i].row_offsets.begin(),
amgx::thrust::minus<IndexType>());
cudaCheckError();
amgx::thrust::exclusive_scan(halo_rows[i].row_offsets.begin(), halo_rows[i].row_offsets.end(), halo_rows[i].row_offsets.begin());
cudaCheckError();
//resize halo matrix
IndexType num_nz = halo_rows[i].row_offsets[num_unique];
halo_rows[i].resize(num_unique, num_unique, num_nz, Ac.get_block_dimy(), Ac.get_block_dimx(), 1);
//copy relevant rows and renumber their column indices
num_blocks = min(4096, (num_unique + 127) / 128);
hipLaunchKernelGGL(( export_matrix_elements) , dim3(num_blocks), dim3(128), 0, 0, Ac.row_offsets.raw(), Ac.col_indices.raw(), Ac.values.raw(), Ac.manager->B2L_maps[i].raw(), renumbering.raw(), halo_rows[i].row_offsets.raw(), halo_rows[i].col_indices.raw(), halo_rows[i].values.raw(), A.get_block_size(), num_unique);
cudaCheckError();
if (diag)
{
hipLaunchKernelGGL(( export_matrix_diagonal) , dim3(num_blocks), dim3(128), 0, 0, Ac.values.raw() + Ac.row_offsets[Ac.get_num_rows()]*Ac.get_block_size(), Ac.get_block_size(), Ac.manager->B2L_maps[i].raw(), halo_rows[i].values.raw() + halo_rows[i].row_offsets[halo_rows[i].get_num_rows()]*Ac.get_block_size(), num_unique);
cudaCheckError();
}
}
Ac.manager->getComms()->exchange_matrix_halo(halo_rows, halo_btl, Ac);
//--------------------- renumbering/reordering matrix, integrating halo -----------------------------
Ac.set_initialized(0);
//number of owned rows
c_size = Ac.manager->halo_offsets[0];
f_size = A.manager->halo_offsets[0];
num_blocks = min(4096, (c_size + 511) / 512);
int rings = 1;
//
// Step 1 - calculate inverse renumbering (to global indices - base_index)
//
Ac.manager->inverse_renumbering.resize(c_size);
amgx::thrust::transform(renumbering.begin(),
renumbering.begin() + c_size,
amgx::thrust::constant_iterator<IndexType>(A.manager->base_index()),
Ac.manager->inverse_renumbering.begin(),
amgx::thrust::minus<IndexType>());
cudaCheckError();
//big renumbering table for going from global index to owned local index
IVector global_to_coarse_local(Ac.manager->index_range());
amgx::thrust::fill(global_to_coarse_local.begin(), global_to_coarse_local.begin() + Ac.manager->index_range(), -1);
cudaCheckError();
hipLaunchKernelGGL(( calc_gbl_renumbering) , dim3(num_blocks), dim3(512), 0, 0, Ac.manager->inverse_renumbering.raw(), global_to_coarse_local.raw(), c_size);
cudaCheckError();
Ac.manager->set_num_halo_rows(Ac.manager->halo_offsets[Ac.manager->halo_offsets.size() - 1] - c_size);
cudaCheckError();
//
// Step 2 - create big mapping table of all halo indices we received (this may use a little too much memory sum(fine nodes per neighbor)
//
amgx::thrust::host_vector<INDEX_TYPE> neighbor_rows(num_neighbors + 1);
int max_num_rows = 0;
for (int i = 0; i < num_neighbors; i++)
{
neighbor_rows[i] = halo_rows[i].manager->index_range();
max_num_rows = max_num_rows > halo_rows[i].get_num_rows() ? max_num_rows : halo_rows[i].get_num_rows();
}
amgx::thrust::exclusive_scan(neighbor_rows.begin(), neighbor_rows.end(), neighbor_rows.begin());
cudaCheckError();
int total_rows_of_neighbors = neighbor_rows[num_neighbors];
IVector halo_mapping(total_rows_of_neighbors);
amgx::thrust::fill(halo_mapping.begin(), halo_mapping.end(), -1);
cudaCheckError();
for (int ring = 0; ring < rings; ring++)
{
for (int i = 0; i < num_neighbors; i++)
{
int size = halo_btl[i].B2L_rings[0][ring + 1] - halo_btl[i].B2L_rings[0][ring];
int num_blocks = min(4096, (size + 127) / 128);
hipLaunchKernelGGL(( create_halo_mapping) , dim3(num_blocks), dim3(128), 0, 0, halo_mapping.raw() + neighbor_rows[i],
halo_btl[i].B2L_maps[0].raw() + halo_btl[i].B2L_rings[0][ring],
halo_btl[i].base_index(),
Ac.manager->halo_offsets[ring * num_neighbors + i], size);
}
}
cudaCheckError();
//
// Step 3 - renumber halo matrices and calculate row length (to eventually append to the big matrix)
//
INDEX_TYPE owned_nnz = Ac.row_offsets[c_size];
IVector neighbor_rows_d(num_neighbors + 1);
amgx::thrust::copy(neighbor_rows.begin(), neighbor_rows.end(), neighbor_rows_d.begin());
cudaCheckError();
//map column indices of my own matrix (the ones that point outward)
hipLaunchKernelGGL(( map_col_indices) , dim3(num_blocks), dim3(512), 0, 0, Ac.row_offsets.raw() + Ac.manager->num_interior_nodes(),
Ac.col_indices.raw(),
Ac.manager->halo_ranges.raw(),
halo_mapping.raw(),
neighbor_rows_d.raw(),
renumbering.raw(),
num_neighbors, c_size, c_size - Ac.manager->num_interior_nodes());
cudaCheckError();
IVector temp_row_len(max_num_rows);
for (int i = 0; i < num_neighbors; i++)
{
//map column indices of halo matrices
int size = halo_rows[i].get_num_rows();
int num_blocks = min(4096, (size + 127) / 128);
hipLaunchKernelGGL(( map_col_indices_and_count_rowlen) , dim3(num_blocks), dim3(128), 128 * sizeof(INDEX_TYPE), 0,
halo_rows[i].row_offsets.raw(),
halo_rows[i].col_indices.raw(),
temp_row_len.raw(),
global_to_coarse_local.raw(),
halo_mapping.raw(),
neighbor_rows_d.raw(),
Ac.manager->halo_ranges.raw(),
i,
Ac.manager->global_id(),
Ac.manager->base_index(),
Ac.manager->index_range(),
num_neighbors,
size);
for (int ring = 0; ring < rings; ring++)
{
amgx::thrust::copy(temp_row_len.begin() + halo_btl[i].B2L_rings[0][ring], temp_row_len.begin() + halo_btl[i].B2L_rings[0][ring + 1], Ac.row_offsets.begin() + Ac.manager->halo_offsets[ring * num_neighbors + i]);
}
}
cudaCheckError();
INDEX_TYPE old_nnz = Ac.row_offsets[Ac.row_offsets.size() - 1];
amgx::thrust::exclusive_scan(Ac.row_offsets.begin() + c_size, Ac.row_offsets.end(), Ac.row_offsets.begin() + c_size, owned_nnz);
cudaCheckError();
//
// Step 4 - consolidate column indices and values
//
int new_nnz = Ac.row_offsets[Ac.row_offsets.size() - 1];
Ac.col_indices.resize(new_nnz);
Ac.values.resize((new_nnz + 1 + diag * (Ac.row_offsets.size() - 2)) * A.get_block_size());
if (diag)
{
MVector diags(c_size * Ac.get_block_size());
amgx::thrust::copy(Ac.values.begin() + old_nnz * Ac.get_block_size(),
Ac.values.begin() + old_nnz * Ac.get_block_size() + c_size * Ac.get_block_size(),
diags.begin());
amgx::thrust::copy(diags.begin(), diags.begin() + c_size * Ac.get_block_size(),
Ac.values.begin() + Ac.row_offsets[Ac.get_num_rows()]*Ac.get_block_size());
cudaCheckError();
}
int cumulative_num_rows = c_size;
for (int i = 0; i < num_neighbors; i++)
{
for (int ring = 0; ring < rings; ring++)
{
int num_rows = halo_btl[i].B2L_rings[0][ring + 1] - halo_btl[i].B2L_rings[0][ring];
int num_blocks = min(4096, (num_rows + 127) / 128);
hipLaunchKernelGGL(( reorder_whole_matrix) , dim3(num_blocks), dim3(128), 0, 0, halo_rows[i].row_offsets.raw() + halo_btl[i].B2L_rings[0][ring], halo_rows[i].col_indices.raw(), halo_rows[i].values.raw(), Ac.row_offsets.raw() + Ac.manager->halo_offsets[ring * num_neighbors + i], Ac.col_indices.raw(), Ac.values.raw(), Ac.get_block_size(), num_rows);
if (diag)
{
amgx::thrust::copy(halo_rows[i].values.begin() + (halo_rows[i].row_offsets[halo_rows[i].get_num_rows()] + halo_btl[i].B2L_rings[0][ring])*Ac.get_block_size(),
halo_rows[i].values.begin() + (halo_rows[i].row_offsets[halo_rows[i].get_num_rows()] + halo_btl[i].B2L_rings[0][ring + 1])*Ac.get_block_size(),
Ac.values.begin() + (Ac.row_offsets[Ac.get_num_rows()] + cumulative_num_rows)*Ac.get_block_size());
cumulative_num_rows += num_rows;
}
}
}
cudaCheckError();
Ac.set_num_cols(Ac.manager->halo_offsets[Ac.manager->halo_offsets.size() - 1]);
Ac.set_num_rows(Ac.get_num_cols());
Ac.set_num_nz(new_nnz);
Ac.delProps(COO);
Ac.set_initialized(1);
Ac.computeDiagonal();
}
}
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::prepareNextLevelMatrix_diag(const Matrix<TConfig> &A, Matrix<TConfig> &Ac)
{
if (A.is_matrix_singleGPU()) { return; }
int num_neighbors = A.manager->neighbors.size();
if (TConfig::memSpace == AMGX_host)
{
FatalError("Aggregation AMG Not implemented for host", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
int c_size = Ac.manager->halo_offsets[0];
int f_size = A.manager->halo_offsets[0];
int diag = Ac.hasProps(DIAG);
Ac.manager->inverse_renumbering.resize(c_size);
//get coarse -> fine renumbering
int num_blocks = min(4096, (c_size + 127) / 128);
hipLaunchKernelGGL(( coarse_to_global) , dim3(num_blocks), dim3(128), 0, 0, this->m_aggregates.raw(), this->m_aggregates_fine_idx.raw(), Ac.manager->inverse_renumbering.raw(), f_size, -1 * A.manager->base_index());
cudaCheckError();
Ac.manager->set_num_halo_rows(Ac.manager->halo_offsets[Ac.manager->halo_offsets.size() - 1] - c_size);
if (!diag) { Ac.computeDiagonal(); }
Ac.set_initialized(1);
std::vector<MVector> diagonals(num_neighbors);
for (int i = 0; i < num_neighbors; i++)
{
int size = Ac.manager->B2L_rings[i][Ac.manager->B2L_rings.size() - 1];
diagonals[i].resize(Ac.get_block_size()*size);
int num_blocks = min(4096, (size + 127) / 128);
hipLaunchKernelGGL(( write_diagonals) , dim3(num_blocks), dim3(128), 0, 0, Ac.values.raw(), Ac.diag.raw(), Ac.manager->B2L_maps[i].raw(), diagonals[i].raw(), Ac.get_block_size(), size);
}
cudaCheckError();
Ac.manager->getComms()->exchange_vectors(diagonals, Ac, this->tag * 100 + 10 + 2);
for (int i = 0; i < num_neighbors; i++)
{
int size = Ac.manager->halo_offsets[i + 1] - Ac.manager->halo_offsets[i];
if (Ac.hasProps(DIAG)) { amgx::thrust::copy(diagonals[i].begin(), diagonals[i].begin() + Ac.get_block_size()*size, Ac.values.begin() + Ac.get_block_size() * (Ac.diagOffset() + Ac.manager->halo_offsets[i])); }
else
{
int num_blocks = min(4096, (size + 127) / 128);
hipLaunchKernelGGL(( write_diagonals_back) , dim3(num_blocks), dim3(128), 0, 0, Ac.values.raw(), Ac.diag.raw() + Ac.manager->halo_offsets[i], diagonals[i].raw(), Ac.get_block_size(), size);
}
}
cudaCheckError();
}
}
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::prepareNextLevelMatrix_none(const Matrix<TConfig> &A, Matrix<TConfig> &Ac)
{
if (A.is_matrix_singleGPU()) { return; }
int num_neighbors = A.manager->neighbors.size();
if (TConfig::memSpace == AMGX_host)
{
FatalError("Aggregation AMG Not implemented for host", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
int c_size = Ac.manager->halo_offsets[0];
int f_size = A.manager->halo_offsets[0];
int diag = Ac.hasProps(DIAG);
Ac.manager->inverse_renumbering.resize(c_size);
//get coarse -> fine renumbering
int num_blocks = min(4096, (c_size + 127) / 128);
hipLaunchKernelGGL(( coarse_to_global) , dim3(num_blocks), dim3(128), 0, 0, this->m_aggregates.raw(), this->m_aggregates_fine_idx.raw(), Ac.manager->inverse_renumbering.raw(), f_size, 0);
cudaCheckError();
Ac.manager->set_num_halo_rows(Ac.manager->halo_offsets[Ac.manager->halo_offsets.size() - 1] - c_size);
Ac.set_initialized(1);
if (!diag) { Ac.computeDiagonal(); }
}
}
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::prepareNextLevelMatrix(const Matrix<TConfig> &A, Matrix<TConfig> &Ac)
{
if (m_matrix_halo_exchange == 0)
{
this->prepareNextLevelMatrix_none(A, Ac);
}
else if (m_matrix_halo_exchange == 1)
{
this->prepareNextLevelMatrix_diag(A, Ac);
}
else if (m_matrix_halo_exchange == 2)
{
this->prepareNextLevelMatrix_full(A, Ac);
}
else
{
FatalError("Invalid Aggregation matrix_halo_exchange parameter", AMGX_ERR_NOT_IMPLEMENTED);
}
}
__global__ void set_halo_rowlen(INDEX_TYPE *work, INDEX_TYPE *output, INDEX_TYPE size, INDEX_TYPE diag)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < size)
{
if (work[idx + 1] - work[idx] > 0)
{
output[idx] += work[idx + 1] - work[idx] - (1 - diag);
}
idx += blockDim.x * gridDim.x;
}
}
template <typename T>
__global__ void append_halo_nz(INDEX_TYPE *row_offsets, INDEX_TYPE *new_row_offsets, INDEX_TYPE *col_indices, INDEX_TYPE *new_col_indices, T *values, T *new_values, INDEX_TYPE size, INDEX_TYPE diag, INDEX_TYPE halo_offset, INDEX_TYPE block_size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < size)
{
int add_diag = !diag;
if (!diag && new_col_indices[new_row_offsets[idx]] != -1) { add_diag = 0; } //if diag or there is already soimething in the row, then don't add diagonal nonzero (inside diag)
int append_offset = -1;
for (int i = new_row_offsets[idx]; i < new_row_offsets[idx + 1]; i++)
{
if (new_col_indices[i] == -1) {append_offset = i; break;}
}
for (int i = row_offsets[idx]; i < row_offsets[idx + 1]; i++)
{
if (diag && i == row_offsets[idx]) //if outside diag and this is the first nonzero in a non-empty row, overwrite diagonal value
{
for (int j = 0; j < block_size; j++)
{
new_values[(new_row_offsets[size] + halo_offset + idx)*block_size + j] = values[(row_offsets[size] + halo_offset + idx) * block_size + j];
}
}
int col_idx = col_indices[i];
if (append_offset == -1 && (col_idx != halo_offset + idx)) {printf("ERROR: append offset is -1 but row has nonzeros in it old %d to %d new %d to %d\n", row_offsets[idx], row_offsets[idx + 1], new_row_offsets[idx], new_row_offsets[idx + 1]); append_offset = 0;}
if (col_idx != halo_offset + idx || add_diag)
{
new_col_indices[append_offset] = col_idx;
for (int j = 0; j < block_size; j++)
{
new_values[append_offset * block_size + j] = values[i * block_size + j];
}
append_offset++;
}
}
idx += blockDim.x * gridDim.x;
}
}
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::createCoarseB2LMaps(std::vector<IVector> &in_coarse_B2L_maps)
{
Matrix<TConfig> &A = this->getA();
m_num_all_aggregates = m_num_aggregates;
int num_neighbors = A.manager->neighbors.size();
IndexType max_b2l = 0;
for (int i = 0; i < num_neighbors; i++ ) { max_b2l = max_b2l > A.manager->B2L_rings[i][1] ? max_b2l : A.manager->B2L_rings[i][1]; }
IVector B2L_aggregates(max_b2l);
IVector indices(max_b2l);
for (int i = 0; i < num_neighbors; i++ )
{
int size = A.manager->B2L_rings[i][1];
amgx::thrust::fill(B2L_aggregates.begin(), B2L_aggregates.begin() + size, 0);
amgx::thrust::sequence(indices.begin(), indices.begin() + size);
//substitute coarse aggregate indices for fine boundary nodes
amgx::thrust::copy(amgx::thrust::make_permutation_iterator(this->m_aggregates.begin(), A.manager->B2L_maps[i].begin()),
amgx::thrust::make_permutation_iterator(this->m_aggregates.begin(), A.manager->B2L_maps[i].begin() + size),
B2L_aggregates.begin());
//find the unique ones
amgx::thrust::sort_by_key(B2L_aggregates.begin(), B2L_aggregates.begin() + size, indices.begin());
IndexType num_unique = amgx::thrust::unique_by_key(B2L_aggregates.begin(), B2L_aggregates.begin() + size, indices.begin()).first - B2L_aggregates.begin();
in_coarse_B2L_maps[i].resize(num_unique);
//sort it back so we have the original ordering
amgx::thrust::sort_by_key(indices.begin(), indices.begin() + num_unique, B2L_aggregates.begin());
amgx::thrust::copy(B2L_aggregates.begin(), B2L_aggregates.begin() + num_unique, in_coarse_B2L_maps[i].begin());
}
cudaCheckError();
}
__global__ void populate_coarse_boundary(INDEX_TYPE *flags, INDEX_TYPE *indices, INDEX_TYPE *maps, INDEX_TYPE *output, INDEX_TYPE size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < size)
{
output[flags[maps[indices[idx]]]] = maps[indices[idx]];
idx += blockDim.x * gridDim.x;
}
}
__global__ void flag_coarse_boundary(INDEX_TYPE *flags, INDEX_TYPE *indices, INDEX_TYPE *maps, INDEX_TYPE size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < size)
{
flags[maps[indices[idx]]] = 1;
idx += blockDim.x * gridDim.x;
}
}
__global__ void flag_halo_indices(INDEX_TYPE *flags, INDEX_TYPE *indices, INDEX_TYPE offset, INDEX_TYPE size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < size)
{
flags[indices[idx] - offset] = 1;
idx += blockDim.x * gridDim.x;
}
}
__global__ void apply_halo_aggregate_indices(INDEX_TYPE *flags, INDEX_TYPE *indices, INDEX_TYPE *output, INDEX_TYPE offset, INDEX_TYPE aggregates_offset, INDEX_TYPE size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < size)
{
output[idx] = flags[indices[idx] - offset] + aggregates_offset;
idx += blockDim.x * gridDim.x;
}
}
// renumbering the aggregates/communicationg with neighbors
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::setNeighborAggregates()
{
Matrix<TConfig> &A = this->getA();
Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA();
m_num_all_aggregates = m_num_aggregates;
/* WARNING: the matrix reordering always happens inside createRenumbering routine. There are three ways to get to this routine
1. matrix_upload_all -> uploadMatrix -> initializeUploadReorderAll -> reorder_matrix -> createRenumbering
2. read_system_distributed -> renumberMatrixOneRing -> reorder_matrix_owned -> createRenumbering
3. solver_setup -> ... -> AMG_Level::setup -> createCoarseMatrices -> setNeighborAggregates -> createRenumbering
If you are reading the renumbering from file you might need to add intercept code in if statement below,
otherwise this routine will exit before calling createRenumbering routine (in case of single or disjoint partitions).
*/
if (this->getA().is_matrix_singleGPU()) { return; }
int num_neighbors = A.manager->neighbors.size();
//
// Step 0 - set up coarse matrix metadata
//
if (Ac.manager == NULL) { Ac.manager = new DistributedManager<T_Config>(); }
Ac.manager->resize(A.manager->neighbors.size(), 1);
Ac.manager->A = &Ac;
int f_size = A.get_num_rows();
Ac.manager->setComms(A.manager->getComms());
Ac.manager->set_global_id(A.manager->global_id());
Ac.manager->neighbors = A.manager->neighbors;
Ac.manager->set_base_index(A.manager->base_index());
Ac.manager->halo_ranges = A.manager->halo_ranges;
Ac.manager->set_index_range(A.manager->index_range());
//-------------------------------------- Section 1 - renumbering -----------------------------------------------------------
//
// Step 1 - calculate coarse level B2L maps - any aggregate that has a fine boundary node, becomes a coarse boundary node
//
m_num_all_aggregates = m_num_aggregates;
int vec_size = m_num_aggregates + 1; //A.manager->num_boundary_nodes()+1;
IVector B2L_aggregates(vec_size);
for (int i = 0; i < A.manager->neighbors.size(); i++)
{
amgx::thrust::fill(B2L_aggregates.begin(), B2L_aggregates.begin() + vec_size, 0);
int size = A.manager->B2L_rings[i][1];
int block_size = 128;
int grid_size = ::min( 4096, ( size + block_size - 1 ) / block_size);
hipLaunchKernelGGL(( flag_coarse_boundary) , dim3(grid_size), dim3(block_size), 0, 0, B2L_aggregates.raw(), A.manager->B2L_maps[i].raw(), this->m_aggregates.raw(), size);
amgx::thrust::exclusive_scan(B2L_aggregates.begin(), B2L_aggregates.begin() + vec_size, B2L_aggregates.begin());
(Ac.manager->B2L_maps)[i].resize(B2L_aggregates[vec_size - 1]);
hipLaunchKernelGGL(( populate_coarse_boundary) , dim3(grid_size), dim3(block_size), 0, 0, B2L_aggregates.raw(), A.manager->B2L_maps[i].raw(), this->m_aggregates.raw(), Ac.manager->B2L_maps[i].raw(), size);
}
cudaCheckError();
for (int i = 0; i < num_neighbors; i++)
{
Ac.manager->B2L_rings[i].resize(2);
Ac.manager->B2L_rings[i][0] = 0;
Ac.manager->B2L_rings[i][1] = Ac.manager->B2L_maps[i].size();
}
DistributedArranger<T_Config> *prep = new DistributedArranger<T_Config>;
prep->initialize_B2L_maps_offsets(Ac, 1);
delete prep;
Ac.set_num_rows(m_num_aggregates);
IVector renumbering(m_num_aggregates + 1); /* +1 is actually not needed, it will be resized in createRenumbering */
Ac.manager->createRenumbering(renumbering);
//
// Step 2 - renumber aggregates, so boundary nodes will have higher index than interior ones (based on the renumberiong we have been calculating)
//
/* WARNING: 1. Thrust scatter and gather routines seem more appropriate here, but they implicitly assume that the input
and output have certain size correlation, which is not matched by vectors in our case. The only remaining option
is to use make_permutation as is done below. Example of Thrust scatter and gather calls
IVector ttt(f_size,-1);
amgx::thrust::scatter(this->m_aggregates.begin(), this->m_aggregates.begin()+f_size, renumbering.begin(), ttt.begin());
amgx::thrust::gather(renumbering.begin(), renumbering.end(), this->m_aggregates.begin(), ttt.begin());
amgx::thrust::copy(ttt.begin(), ttt.end(), this->m_aggregates.begin());
2. The original thrust composite call is illegal because it uses the same array (m_aggregates) for input and output.
amgx::thrust::copy(amgx::thrust::make_permutation_iterator(renumbering.begin(), this->m_aggregates.begin()),
amgx::thrust::make_permutation_iterator(renumbering.begin(), this->m_aggregates.begin()+f_size),
this->m_aggregates.begin());
Although it somehow still works, it is much safer to use explicit temporary storage for the intermediate result.
*/
/* WARNING: must save unreordered aggregates for later use before reordering them. */
IVector unreordered_aggregates(this->m_aggregates);
/* WARNING: change Thrust call to explicitly use temporary storage for the intermediate result. The earlier version is illegal, but somehow still works. */
IVector ttt(f_size, -1);
amgx::thrust::copy(amgx::thrust::make_permutation_iterator(renumbering.begin(), this->m_aggregates.begin()),
amgx::thrust::make_permutation_iterator(renumbering.begin(), this->m_aggregates.begin() + f_size),
ttt.begin());
amgx::thrust::copy(ttt.begin(), ttt.end(), this->m_aggregates.begin());
cudaCheckError();
//we don't need renumbering anymore, it will be identity on the coarse level
//-------------------------------------- Section 2 - communication -----------------------------------------------------------
//
// Step 3 - populate aggregates_fine_idx, which stores for every fine node the original global index of the aggregate (which is lowest global index of nodes aggregated together)
//
//
// These are different when we do /don't do matrix halo exchanges - when we do we need global indices to match nodes,
// and in this case Ac after computeA will not have the same ordering of halo nodes as after prepareNextLevel_full.
// However when we do not do matrix halo exchange we are only interested in the ordering of halo nodes on the coarse level,
// and we can get that by exchanging the (already renumbered) aggregates vector.
//
if (m_matrix_halo_exchange == 2)
{
//Find original global indices of nodes that have the minimum id in the aggregates.
amgx::thrust::copy(amgx::thrust::make_permutation_iterator(A.manager->inverse_renumbering.begin(), this->m_aggregates_fine_idx.begin()),
amgx::thrust::make_permutation_iterator(A.manager->inverse_renumbering.begin(), this->m_aggregates_fine_idx.begin() + f_size),
this->m_aggregates_fine_idx.begin());
amgx::thrust::transform(this->m_aggregates_fine_idx.begin(),
this->m_aggregates_fine_idx.begin() + f_size,
amgx::thrust::constant_iterator<IndexType>(A.manager->base_index()),
this->m_aggregates_fine_idx.begin(),
amgx::thrust::plus<IndexType>());
//communicate
this->m_aggregates_fine_idx.set_block_dimx(1);
this->m_aggregates_fine_idx.set_block_dimy(1);
m_aggregates_fine_idx.dirtybit = 1;
A.manager->exchange_halo(m_aggregates_fine_idx, this->tag * 100 + 1 * 10 + 0);
}
else
{
//communicate
this->m_aggregates.set_block_dimx(1);
this->m_aggregates.set_block_dimy(1);
m_aggregates.dirtybit = 1;
/* WARNING: you should exchange unreordered aggregates, and append them to your own reordered aggregates, to conform to asusmptions done by distributed_mamanger. */
//A.manager->exchange_halo(m_aggregates, this->tag*100+1*10+0); //wrong
A.manager->exchange_halo(unreordered_aggregates, this->tag * 100 + 1 * 10 + 0);
amgx::thrust::copy(unreordered_aggregates.begin() + f_size, unreordered_aggregates.end(), this->m_aggregates.begin() + f_size);
}
cudaCheckError();
//
// Step 4 - consolidate neighbors' aggregates into own list to be able to perform Galerkin product with the n-ring halo
//
IVector &exchanged_aggregates = m_matrix_halo_exchange == 2 ? this->m_aggregates_fine_idx : this->m_aggregates;
int min_index = amgx::thrust::reduce(exchanged_aggregates.begin() + A.manager->halo_offsets[0], exchanged_aggregates.begin() + A.manager->halo_offsets[num_neighbors], (int)0xFFFFFFF, amgx::thrust::minimum<int>());
int max_index = amgx::thrust::reduce(exchanged_aggregates.begin() + A.manager->halo_offsets[0], exchanged_aggregates.begin() + A.manager->halo_offsets[num_neighbors], (int)0, amgx::thrust::maximum<int>());
cudaCheckError();
int s_size = max_index - min_index + 2;
IVector scratch(s_size);
for (int i = 0; i < num_neighbors; i++)
{
int size = A.manager->halo_offsets[i + 1] - A.manager->halo_offsets[i];
//Could also use local minimums to perform the same operation. The results are the same.
//int min_local = amgx::thrust::reduce(exchanged_aggregates.begin()+A.manager->halo_offsets[i], exchanged_aggregates.begin()+A.manager->halo_offsets[i+1], (int)0xFFFFFFF, amgx::thrust::minimum<int>());
amgx::thrust::fill(scratch.begin(), scratch.begin() + s_size, 0);
int block_size = 128;
int grid_size = ::min( 4096, ( size + block_size - 1 ) / block_size);
hipLaunchKernelGGL(( flag_halo_indices) , dim3(grid_size), dim3(block_size), 0, 0, scratch.raw(), exchanged_aggregates.raw() + A.manager->halo_offsets[i], min_index /*min_local*/, size);
amgx::thrust::exclusive_scan(scratch.begin(), scratch.begin() + s_size, scratch.begin());
hipLaunchKernelGGL(( apply_halo_aggregate_indices) , dim3(grid_size), dim3(block_size), 0, 0, scratch.raw(), exchanged_aggregates.raw() + A.manager->halo_offsets[i], this->m_aggregates.raw() + A.manager->halo_offsets[i], min_index /*min_local*/, m_num_all_aggregates, size);
Ac.manager->halo_offsets[i] = m_num_all_aggregates;
m_num_all_aggregates += scratch[s_size - 1];
}
cudaCheckError();
Ac.manager->halo_offsets[num_neighbors] = m_num_all_aggregates;
}
//TODO: The consolidate and unconsolidate parts could be made more efficient by only sending the
// nonzero values
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::consolidateVector(VVector &x)
{
int my_id = this->getA().manager->global_id();
if (this->getA().manager->isRootPartition())
{
// Here all partitions being consolidated should have same vector size, see TODO above
INDEX_TYPE num_parts = this->getA().manager->getNumPartsToConsolidate();
for (int i = 0; i < num_parts; i++)
{
int current_part = this->getA().manager->getPartsToConsolidate()[i];
// Vector has been set to correct size
if (current_part != my_id)
{
//printf("Root partition %d receiving %d -> %d and %d -> %d (total %d)\n", this->getA().manager->global_id(), this->getA().manager->getConsolidationArrayOffsets()[i], this->getA().manager->getConsolidationArrayOffsets()[i+1], this->getA().manager->getConsolidationArrayOffsets()[num_parts+i], this->getA().manager->getConsolidationArrayOffsets()[num_parts+i+1], (int)x.size()/x.get_block_size());
this->getA().manager->getComms()->recv_vector(x, current_part, 10000 + current_part, x.get_block_size()*this->getA().manager->getConsolidationArrayOffsets()[i], x.get_block_size() * (this->getA().manager->getConsolidationArrayOffsets()[i + 1] - this->getA().manager->getConsolidationArrayOffsets()[i]));
this->getA().manager->getComms()->recv_vector(x, current_part, 20000 + current_part, x.get_block_size()*this->getA().manager->getConsolidationArrayOffsets()[num_parts + i], x.get_block_size() * (this->getA().manager->getConsolidationArrayOffsets()[num_parts + i + 1] - this->getA().manager->getConsolidationArrayOffsets()[num_parts + i]));
}
}
}
else
{
int my_destination_part = this->getA().manager->getMyDestinationPartition();
int i_off, i_size, b_off, b_size;
this->getA().manager->getConsolidationOffsets(&i_off, &i_size, &b_off, &b_size);
// Here all partitions being consolidated should have same vector size, see TODO above
this->getA().manager->getComms()->send_vector_async(x, my_destination_part, 10000 + my_id, i_off * x.get_block_size(), i_size * x.get_block_size());
this->getA().manager->getComms()->send_vector_async(x, my_destination_part, 20000 + my_id, b_off * x.get_block_size(), b_size * x.get_block_size());
}
}
//TODO: The consolidate and unconsolidate parts could be made more efficient by only sending the
// nonzero values
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::unconsolidateVector(VVector &x)
{
if (this->getA().manager->isRootPartition())
{
INDEX_TYPE num_parts = this->getA().manager->getNumPartsToConsolidate();
for (int i = 0; i < num_parts; i++)
{
int current_part = this->getA().manager->getPartsToConsolidate()[i];
// Vector has been set to correct size
if (current_part != this->getA().manager->global_id())
{
this->getA().manager->getComms()->send_vector_async(x, current_part, 30000 + current_part, x.get_block_size()*this->getA().manager->getConsolidationArrayOffsets()[i], x.get_block_size() * (this->getA().manager->getConsolidationArrayOffsets()[i + 1] - this->getA().manager->getConsolidationArrayOffsets()[i]));
this->getA().manager->getComms()->send_vector_async(x, current_part, 40000 + current_part, x.get_block_size()*this->getA().manager->getConsolidationArrayOffsets()[num_parts + i], x.get_block_size() * (this->getA().manager->getConsolidationArrayOffsets()[num_parts + i + 1] - this->getA().manager->getConsolidationArrayOffsets()[num_parts + i]));
}
}
}
else
{
int my_destination_part = this->getA().manager->getMyDestinationPartition();
// Vector x is of unknown size
int i_off, i_size, b_off, b_size;
this->getA().manager->getConsolidationOffsets(&i_off, &i_size, &b_off, &b_size);
this->getA().manager->getComms()->recv_vector(x, my_destination_part, 30000 + this->getA().manager->global_id(), i_off * x.get_block_size(), i_size * x.get_block_size());
this->getA().manager->getComms()->recv_vector(x, my_destination_part, 40000 + this->getA().manager->global_id(), b_off * x.get_block_size(), b_size * x.get_block_size());
}
}
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::createCoarseVertices()
{
//Set the aggregates
this->m_selector->setAggregates(this->getA(), this->m_aggregates, this->m_aggregates_fine_idx, this->m_num_aggregates);
if ( this->m_print_aggregation_info )
{
this->m_selector->printAggregationInfo( this->m_aggregates, this->m_aggregates_fine_idx, this->m_num_aggregates );
}
this->getA().template setParameter< int > ("aggregates_num", this->m_num_aggregates); // ptr to aaggregates
}
// Creating the next level
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::createCoarseMatrices()
{
Matrix<TConfig> &A = this->getA();
Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA();
/* WARNING: do not recompute prolongation (P) and restriction (R) when you
are reusing the level structure (structure_reuse_levels > 0).
Notice that in aggregation path, prolongation P is implicit,
and is used through the aggregates array. */
bool const consolidation_level = !A.is_matrix_singleGPU() && this->isConsolidationLevel();
// bookkeeping for the coarse grid: renumber aggregates,
// if consolidation compute consolidated halo-offsets, etc
if (!this->isReuseLevel())
{
if (consolidation_level)
{
// Consolidation-path steps 1-9
this->consolidationBookKeeping();
}
else
{
this->setNeighborAggregates();
}
}
this->getA().setView(ALL);
// Compute restriction operator
// TODO: computing the restriction operator could be merged with the selector to save some work
// If we reuse the level we keep the previous restriction operator
if (this->isReuseLevel() == false)
{
computeRestrictionOperator();
}
Ac.set_initialized(0);
Ac.copyAuxData(&A);
this->m_coarseAGenerator->computeAOperator(A, Ac, this->m_aggregates, this->m_R_row_offsets, this->m_R_column_indices, this->m_num_all_aggregates);
Ac.setColsReorderedByColor(false);
Ac.setView(FULL);
if (consolidation_level)
{
// Consolidation-path Steps 11-12, send matrices to root, consolidate, final bookkeeping
this->consolidateCoarseGridMatrix();
}
else
{
this->prepareNextLevelMatrix(A, Ac);
}
A.setView(OWNED);
Ac.setView(OWNED);
this->m_next_level_size = this->m_num_all_aggregates * Ac.get_block_dimy();
if (this->m_print_aggregation_info)
{
MatrixAnalysis<TConfig> ana(&Ac);
ana.aggregatesQuality2(this->m_aggregates, this->m_num_aggregates, A);
}
}
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::consolidationBookKeeping()
{
Matrix<TConfig> &A = this->getA();
Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA();
int num_parts, num_fine_neighbors, my_id;
if (!A.is_matrix_singleGPU())
{
num_parts = A.manager->getComms()->get_num_partitions();
num_fine_neighbors = A.manager->neighbors.size();
my_id = A.manager->global_id();
}
else
{
num_parts = 1;
num_fine_neighbors = 0;
my_id = 0;
}
// ----------------------------------------------------
// Consolidate multiple fine matrices into one coarse matrix
// ----------------------------------------------------
// ----------------
// Step 1
// Decide which partitions should be merged together, store in destination_partitions vector
// ---------------
IVector_h &destination_part = A.manager->getDestinationPartitions();
int my_destination_part = A.manager->getMyDestinationPartition();
if (my_destination_part >= num_parts)
{
FatalError("During consolidation, sending data to partition that doesn't exist", AMGX_ERR_NOT_IMPLEMENTED);
}
// Create mapping from coarse partition indices (ranks on the coarse consolidated level) to partition indices on the fine level (ranks on the fine level)
IVector_h coarse_part_to_fine_part = destination_part;
thrust::sort(coarse_part_to_fine_part.begin(), coarse_part_to_fine_part.end());
cudaCheckError();
coarse_part_to_fine_part.erase(thrust::unique(coarse_part_to_fine_part.begin(), coarse_part_to_fine_part.end()), coarse_part_to_fine_part.end());
cudaCheckError();
//Then, the number of coarse partitions is simply the size of this vector
int num_coarse_partitions = coarse_part_to_fine_part.size();
// Create mapping from fine partition indices to coarse partition indices, with fine partitions that are merging together having the same coarse indices
IVector_h fine_part_to_coarse_part(num_parts);
thrust::lower_bound(coarse_part_to_fine_part.begin(), coarse_part_to_fine_part.end(), destination_part.begin(), destination_part.end(), fine_part_to_coarse_part.begin());
cudaCheckError();
// Create mapping from this specific partition's neighbors to consolidated coarse neighbors, but using their fine index (aka. destination partition indices for my neighbors)
IVector_h fine_neigh_to_fine_part;
A.manager->createNeighToDestPartMap(fine_neigh_to_fine_part, A.manager->neighbors, destination_part, num_fine_neighbors);
// Create mapping from consolidated coarse neighbors to fine partition indices (even if the current partition is not going to be a root)
IVector_h coarse_neigh_to_fine_part;
int num_coarse_neighbors;
A.manager->createConsolidatedNeighToPartMap(coarse_neigh_to_fine_part, fine_neigh_to_fine_part, my_destination_part, destination_part, num_coarse_neighbors);
// Create mapping from fine neighbors to coarse neighbors, with fine neighbors this partition is merging with labeled with -1
IVector_h fine_neigh_to_coarse_neigh;
A.manager->createNeighToConsNeigh(fine_neigh_to_coarse_neigh, coarse_neigh_to_fine_part, fine_neigh_to_fine_part, my_destination_part, num_fine_neighbors);
/*
EXAMPLE
Take the following partition graph (that describes connections between partitions, vertices are the partitions themselves), this is the same graph that is used in the setup example
number of partitions num_parts=12
CSR row_offsets [0 4 8 13 21 25 32 36 41 46 50 57 61]
CSR col_indices [0 1 3 8
0 1 2 3
1 2 3 4 5
0 1 2 3 4 5 8 10
2 4 5 6
2 3 4 5 6 7 10
4 5 6 7
5 6 7 9 10
0 3 8 10 11
7 9 10 11
3 5 7 8 9 10 11
8 9 10 11]
destination_part = [0 0 0 0 4 4 4 4 8 8 8 8]
coarse_part_to_fine_part = [0 4 8] num_coarse_partitions = 3
fine_part_to_coarse_part = [0 0 0 0 1 1 1 1 2 2 2 2]
original neighbor lists correspond to the rows of the matrix, minus the diagonal elements: (part 0)[1 3 8] (part 3)[0 1 2 4 5 8 10] (part 10)[3 5 7 8 9 11]
fine_neigh_to_fine_part (part 0)[0 0 2] (part 3)[0 0 0 0 1 2 2] (part 10)[0 1 1 2 2 2]
coarse_neigh_to_fine_part (part 0)[8] (part 3)[4 8] (part 10)[0 4]
fine_neigh_to_coarse_neigh (part 0)[-1 -1 0] (part 3)[-1 -1 -1 0 0 1 1] (part 10)[0 1 1 -1 -1 -1]
*/
// --------------------------
// Step 2
// Create coarse B2L_maps, by mapping fine B2L maps to coarse indices using this->m_aggregates and eliminating duplicates
// --------------------------
std::vector<IVector> coarse_B2L_maps(num_fine_neighbors);
m_num_all_aggregates = m_num_aggregates;
int num_neighbors_temp = A.manager->neighbors.size();
int num_rings = A.manager->B2L_rings[0].size() - 1;
if (num_rings != 1)
{
FatalError("num_rings > 1 not supported in consolidation\n", AMGX_ERR_NOT_IMPLEMENTED);
}
IndexType max_b2l = 0;
for (int i = 0; i < num_neighbors_temp; i++ ) { max_b2l = max_b2l > A.manager->B2L_rings[i][1] ? max_b2l : A.manager->B2L_rings[i][1]; }
IVector B2L_aggregates(max_b2l);
IVector indices(max_b2l);
//TODO: use the algorithm from setNeighborAggregates()
for (int i = 0; i < num_neighbors_temp; i++ )
{
int size = A.manager->B2L_rings[i][1];
thrust::fill(B2L_aggregates.begin(), B2L_aggregates.begin() + size, 0);
thrust::sequence(indices.begin(), indices.begin() + size);
//substitute coarse aggregate indices for fine boundary nodes
thrust::copy(thrust::make_permutation_iterator(this->m_aggregates.begin(), A.manager->B2L_maps[i].begin()),
thrust::make_permutation_iterator(this->m_aggregates.begin(), A.manager->B2L_maps[i].begin() + size),
B2L_aggregates.begin());
//find the unique ones
thrust::sort_by_key(B2L_aggregates.begin(), B2L_aggregates.begin() + size, indices.begin());
IndexType num_unique = thrust::unique_by_key(B2L_aggregates.begin(), B2L_aggregates.begin() + size, indices.begin()).first - B2L_aggregates.begin();
coarse_B2L_maps[i].resize(num_unique);
//sort it back so we have the original ordering
thrust::sort_by_key(indices.begin(), indices.begin() + num_unique, B2L_aggregates.begin());
thrust::copy(B2L_aggregates.begin(), B2L_aggregates.begin() + num_unique, coarse_B2L_maps[i].begin());
}
cudaCheckError();
/*
* EXAMPLE
say, partition 3 has the following coarse B2L_maps:
neighbors [0 1 2 4 5 8 10]
B2L_maps[0(=0)] = [6 7 8]
B2L_maps[1(=1)] = [8 9 10]
B2L_maps[2(=2)] = [10 11 12 13]
B2L_maps[3(=4)] = [13 14 15]
B2L_maps[4(=5)] = [15 16 17]
B2L_maps[5(=8)] = [6 18 19]
B2L_maps[6(=10)] = [17 20 19]
*/
// ---------------------------------------------------
// Step 3
// create new B2L maps for each merged destination neighbor and drop B2L maps to neighbors we are merging with
// ---------------------------------------------------
std::vector<IVector> dest_coarse_B2L_maps;
A.manager->consolidateB2Lmaps(dest_coarse_B2L_maps, coarse_B2L_maps, fine_neigh_to_coarse_neigh, num_coarse_neighbors, num_fine_neighbors);
/*
* EXAMPLE
Then, merging the coarse B2L maps on partition 3, we get:
coarse_neigh_to_fine_part [4 8]
dest_coarse_B2L_maps[0(=4)] = [13 14 15 16 17]
dest_coarse_B2L_maps[1(=8)] = [6 17 18 19 20]
*/
// -----------------------
// Step 4
// Create interior-boundary renumbering of aggregates according to dest_coarse_B2L_maps
// -----------------------
// Now renumber the aggregates with all interior aggregates first, boundary aggregates second
int num_interior_aggregates; //returned by createAggregatesRenumbering
int num_boundary_aggregates; //returned by createAggregatesRenumbering
IVector renumbering; //returned by createAggregatesRenumbering
// Following calls create renumbering array and modifies B2L_maps
A.manager->createAggregatesRenumbering(renumbering, dest_coarse_B2L_maps, this->m_num_aggregates, num_coarse_neighbors, num_interior_aggregates, num_boundary_aggregates, num_rings);
/*
* EXAMPLE
Partition 3 will get a renumbering vector of size 21, for the 21 owned agggregates:
[0 1 2 3 4 5 17 6 7 8 9 10 11 12 13 14 15 16 18 19 20]
num_interior_aggregates = 12
num_boundary_aggregates = 9
*/
// -------------------------------------------------
// Step 5
// Determine whether root partition, make list of partitions merged into one
// ------------------------------------------------
// Check if I'm root partition and how fine partitions (including myself) are merging into me
// bool is_root_partition = false;
bool &is_root_partition = this->m_is_root_partition;
is_root_partition = false;
int num_fine_parts_to_consolidate = 0;
// IVector_h fine_parts_to_consolidate;
IVector_h &fine_parts_to_consolidate = this->m_fine_parts_to_consolidate;
for (int i = 0; i < num_parts; i++)
{
if (destination_part[i] == my_id)
{
is_root_partition = true;
num_fine_parts_to_consolidate++;
}
}
fine_parts_to_consolidate.resize(num_fine_parts_to_consolidate);
int count = 0;
for (int i = 0; i < num_parts; i++)
{
if (destination_part[i] == my_id)
{
fine_parts_to_consolidate[count] = i;
count++;
}
}
//save this information as state, as this will also be required during solve for restriction/prolongation
A.manager->setIsRootPartition(is_root_partition);
A.manager->setNumPartsToConsolidate(num_fine_parts_to_consolidate);
A.manager->setPartsToConsolidate(fine_parts_to_consolidate);
// Create a new distributed communicator for coarse levels that only contains active partitions
if (Ac.manager == NULL)
{
Ac.manager = new DistributedManager<TConfig>();
}
Ac.manager->setComms(A.manager->getComms()->Clone());
Ac.manager->getComms()->createSubComm(coarse_part_to_fine_part, is_root_partition);
/*
* EXAMPLE
isRootPartition is true for partitions 0,4,8 false for others
num_fine_parts_to_consolidate = 4 for partitions 0,4,8
fine_parts_to_consolidate (part 0)[0 1 2 3] (part 4)[4 5 6 7] (part 8)[8 9 10 11]
*/
// ----------------------
// Step 6
// Compute number of interior, boundary and total nodes in the consolidated coarse matrix. Create offsets so that partitions being merged together will have their aggregate indices ordered like this:
// [num_interior(fine_parts_to_consolidate[0]] num_interior(fine_parts_to_consolidate[1]] ... num_interior(fine_parts_to_consolidate[num_fine_parts_to_consolidate]
// num_boundary(fine_parts_to_consolidate[0]] num_boundary(fine_parts_to_consolidate[1]] ... num_boundary(fine_parts_to_consolidate[num_fine_parts_to_consolidate] ]
// ----------------------
// Gather to get number of interior/boundary aggregates of neighbors I will merge with
// std::vector<IVector_h> vertex_counts;
std::vector<IVector_h> &vertex_counts = this->m_vertex_counts;
// int interior_offset, boundary_offset, total_interior_rows_in_merged, total_boundary_rows_in_merged;
int interior_offset, boundary_offset;
int &total_interior_rows_in_merged = this->m_total_interior_rows_in_merged;
int &total_boundary_rows_in_merged = this->m_total_boundary_rows_in_merged;
int total_rows_in_merged;
//Computes these offsets on the root, sends them back
A.manager->computeConsolidatedOffsets(my_id, my_destination_part, is_root_partition, num_interior_aggregates, num_boundary_aggregates, vertex_counts, fine_parts_to_consolidate, num_fine_parts_to_consolidate, interior_offset, boundary_offset, total_interior_rows_in_merged, total_boundary_rows_in_merged, total_rows_in_merged, A.manager->getComms());
//Partitions save these offsets, as it will be required during solve restriction/prolongation
A.manager->setConsolidationOffsets(interior_offset, num_interior_aggregates, boundary_offset + num_interior_aggregates, num_boundary_aggregates);
/*
* EXAMPLE
For root partition 0, say we have the following interior/boundary counts (note that partition 1 has 0 boundary, as it is only connected to partitions it is merging with)
part 0 - interior: 10 boundary 3
part 1 - interior: 18
part 2 - interior: 10 boundary 16
part 3 - interior: 12 boundary 9
interior_offset for partitions 0,1,2,3: 0 10 28 38 (total_interior_rows_in_merged 50)
boundary_offset for partitions 0,1,2,3: 0 3 3 19 (total_boundary_rows_in_merged 28)
*/
// ----------------------
// Step 7
// Each partition renumbers its aggregates and dest_coarse_B2L_maps using offsets computed in Step 6 and permutation in Step 4
// ----------------------
// Kernel to renumber the aggregates
int block_size = 128;
int grid_size = ::min( 4096, ( A.manager->halo_offsets[0] + block_size - 1 ) / block_size);
hipLaunchKernelGGL(( renumberAggregatesKernel) , dim3(grid_size), dim3(block_size) , 0, 0, renumbering.raw(), interior_offset, boundary_offset, this->m_aggregates.raw(), A.manager->halo_offsets[0], num_interior_aggregates, renumbering.size());
cudaCheckError();
for (int i = 0; i < num_coarse_neighbors; i++)
{
thrust::transform(dest_coarse_B2L_maps[i].begin(),
dest_coarse_B2L_maps[i].end(),
thrust::constant_iterator<IndexType>(boundary_offset),
dest_coarse_B2L_maps[i].begin(),
thrust::plus<IndexType>());
}
cudaCheckError();
/*
* EXAMPLE
Partition 3 had a renumbering vector:
[0 1 2 3 4 5 17 6 7 8 9 10 11 12 13 14 15 16 18 19 20]
which is now adjusted to account for the consolidated coarse matrices' indices:
[38 39 40 41 42 43 74 44 45 46 47 48 49 69 70 71 72 73 75 76 77]
And the dest_coarse_B2L_maps, which looked like:
dest_coarse_B2L_maps[0(=4)] = [13 14 15 16 17]
dest_coarse_B2L_maps[1(=8)] = [6 17 18 19 20]
is now:
dest_coarse_B2L_maps[0(=4)] = [69 70 71 72 73]
dest_coarse_B2L_maps[1(=8)] = [74 73 75 76 77]
*/
// -------------------------------------------------
// Step 8
// Send dest_coarse_B2L_maps to root partitions
// ------------------------------------------------
// Each fine partition sends to its root the number of coarse neighbors it has, their ids, and the number of boundary nodes for each coarse neighbor
IVector_h num_bdy_per_coarse_neigh(num_coarse_neighbors);
for (int i = 0; i < num_coarse_neighbors; i++)
{
num_bdy_per_coarse_neigh[i] = dest_coarse_B2L_maps[i].size();
}
IVector_h consolidated_coarse_neigh_to_fine_part; //consolidated list of coarse neighbors for the root partition, using fine partition indices
int num_consolidated_neighbors = 0;
// std::vector<IVector> consolidated_B2L_maps; //concatenates dest_coarse_B2L_maps received from partitions that are merging into the same root and pointing to the same destination coarse neighbor
std::vector<IVector> &consolidated_B2L_maps = this->m_consolidated_B2L_maps;
A.manager->consolidateB2LmapsOnRoot(num_consolidated_neighbors, consolidated_B2L_maps, consolidated_coarse_neigh_to_fine_part, dest_coarse_B2L_maps, coarse_neigh_to_fine_part, num_bdy_per_coarse_neigh, fine_parts_to_consolidate, num_fine_parts_to_consolidate, my_id, my_destination_part, is_root_partition, num_coarse_neighbors, A.manager->getComms());
//
// Step 9 - figuring out halo aggregate IDs
//
//Now we need to update halo aggregate IDs - this is just a halo exchange on this->m_aggregates between partitions
//that are being merged together, but we need to send other halos to the root to come up with the halo renumbering
//TODO: separate transactions, send "real halo" to the root nodes (coarse neighbors) immediately
//Step 9.1: takes care of synchronizing the aggregate IDs between partitions we are merging together and got consistent halo aggregate IDs for neighbor we are not merging with (which are going to be sent to the root in 9.2)
A.manager->exchange_halo(this->m_aggregates, 6666);
/*
* EXAMPLE 2
This example is independent from the previous ones.
Say partition 0 and 1 are merging (into 0) partition 0 is neighbors with 1,2,3 and partition 1 is neighbors with 0,3,4
Partitions 3 and 4 are merging (into partition 3) and partition 2 is not merging with anyone.
This example details the renumbering of halo indices on partition 0 and partition 1.
After the exchange halo, we have:
this->m_aggregates on partition 0:
[(fine interior nodes) (fine boundary nodes) (fine halo from part 1) (fine halo from part 2) (fine halo from part 3)]
[(fine interior nodes) (fine boundary nodes) (13 13 15) (12 15 17) (14 16 18)]
aggregates on partition 1:
[(fine interior nodes) (fine boundary nodes) (fine halo from part 0) (fine halo from part 3) (fine halo from part 4)]
[(fine interior nodes) (fine boundary nodes) (14 16 17) (18 19 19) (15 15 17)]
indices in (fine halo from part 0) and (fine halo from part 1) actually contain interior aggregate indices (if they are not connected to partitions 2,3 or 4), because the boundary is disappearing there.
Indices in halo regions contain remote-local indices.
This example is used throughout consolidateAndRenumberHalos
*/
//Step 9.2 - 9.5
// IVector_h halo_offsets(num_consolidated_neighbors + 1, 0);
IVector_h &halo_offsets = this->m_consolidated_halo_offsets;
halo_offsets = IVector_h(num_consolidated_neighbors + 1, 0);
A.manager->consolidateAndRenumberHalos(this->m_aggregates, A.manager->halo_offsets, halo_offsets, A.manager->neighbors, num_fine_neighbors, consolidated_coarse_neigh_to_fine_part, num_consolidated_neighbors, destination_part, my_destination_part, is_root_partition, fine_parts_to_consolidate, num_fine_parts_to_consolidate, num_parts, my_id, total_rows_in_merged, this->m_num_all_aggregates, A.manager->getComms());
if (is_root_partition)
{
for (int i = 0; i < consolidated_B2L_maps.size(); i++)
{
thrust::sort(consolidated_B2L_maps[i].begin(), consolidated_B2L_maps[i].end());
}
this->m_consolidated_neighbors.resize(num_consolidated_neighbors);
for (int i = 0; i < num_consolidated_neighbors; i++)
{
this->m_consolidated_neighbors[i] = fine_part_to_coarse_part[consolidated_coarse_neigh_to_fine_part[i]];
}
cudaCheckError();
}
}
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::consolidateCoarseGridMatrix()
{
Matrix<TConfig> &A = this->getA();
Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA();
int num_parts, num_fine_neighbors, my_id;
num_parts = A.manager->getComms()->get_num_partitions();
num_fine_neighbors = A.manager->neighbors.size();
my_id = A.manager->global_id();
IVector_h &destination_part = A.manager->getDestinationPartitions();
int my_destination_part = A.manager->getMyDestinationPartition();
// bookkeeping stored in AMG_Level_Base
std::vector<IVector_h> &vertex_counts = this->m_vertex_counts;
IVector_h &fine_parts_to_consolidate = this->m_fine_parts_to_consolidate;
// bookkeeping stored in either AMG_Level_Base or Acs' DistributedManager
IVector_h &halo_offsets = this->isReuseLevel() ? Ac.manager->getHaloOffsets() : this->m_consolidated_halo_offsets;
std::vector<IVector> &consolidated_B2L_maps = this->isReuseLevel() ? Ac.manager->getB2Lmaps() : this->m_consolidated_B2L_maps;
int num_consolidated_neighbors = this->isRootPartition() ? this->m_consolidated_neighbors.size() : 0;
if (!this->isRootPartition())
{
A.manager->getComms()->send_vector_async(Ac.row_offsets, my_destination_part, 1111);
A.manager->getComms()->send_vector_async(Ac.col_indices, my_destination_part, 1112);
A.manager->getComms()->send_vector_async(Ac.values, my_destination_part, 1113);
}
else
{
int num_fine_parts_to_consolidate = fine_parts_to_consolidate.size();
int total_num_rows = this->m_num_all_aggregates;
IVector new_row_offsets(total_num_rows + 1, 0);
//if diags are inside then we won't be counting those twice when computing halo row length
if (!Ac.hasProps(DIAG))
{
thrust::fill(new_row_offsets.begin() + halo_offsets[0], new_row_offsets.begin() + halo_offsets[num_consolidated_neighbors], 1);
cudaCheckError();
}
std::vector<IVector> recv_row_offsets(num_fine_parts_to_consolidate);
std::vector<VecInt_t> num_nz(num_fine_parts_to_consolidate);
IVector *work_row_offsets;
std::vector<VecInt_t> index_offset_array(2 * num_fine_parts_to_consolidate + 1);
int interior_offset = 0;
int boundary_offset = 0;
for (int i = 0; i < num_fine_parts_to_consolidate; i++)
{
boundary_offset += vertex_counts[i][0];
}
int max_num_nz = 0;
for (int i = 0; i < num_fine_parts_to_consolidate; i++)
{
int current_part = fine_parts_to_consolidate[i];
//receive row offsets
if (current_part != my_id)
{
recv_row_offsets[i].resize(total_num_rows + 1);
A.manager->getComms()->recv_vector(recv_row_offsets[i], current_part, 1111);
work_row_offsets = &(recv_row_offsets[i]);
num_nz[i] = (*work_row_offsets)[work_row_offsets->size() - 1];
max_num_nz = max_num_nz > num_nz[i] ? max_num_nz : num_nz[i];
}
else
{
work_row_offsets = &(Ac.row_offsets);
num_nz[i] = Ac.get_num_nz();
}
//Get interior row length
thrust::transform(work_row_offsets->begin() + interior_offset + 1,
work_row_offsets->begin() + interior_offset + vertex_counts[i][0] + 1,
work_row_offsets->begin() + interior_offset,
new_row_offsets.begin() + interior_offset,
thrust::minus<IndexType>());
cudaCheckError();
//Get boundary row length
thrust::transform(work_row_offsets->begin() + boundary_offset + 1,
work_row_offsets->begin() + boundary_offset + vertex_counts[i][1] + 1,
work_row_offsets->begin() + boundary_offset,
new_row_offsets.begin() + boundary_offset,
thrust::minus<IndexType>());
cudaCheckError();
//Increment halo row length by one for every nonzero that is an edge from the halo into this partition
int size = halo_offsets[num_consolidated_neighbors] - halo_offsets[0];
const int block_size = 128;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (size - 1) / block_size + 1);
hipLaunchKernelGGL(( set_halo_rowlen) , dim3(num_blocks), dim3(block_size), 0, 0, work_row_offsets->raw() + halo_offsets[0], new_row_offsets.raw() + halo_offsets[0], size, Ac.hasProps(DIAG));
cudaCheckError();
index_offset_array[i] = interior_offset;
index_offset_array[num_fine_parts_to_consolidate + i] = boundary_offset;
interior_offset += vertex_counts[i][0];
boundary_offset += vertex_counts[i][1];
index_offset_array[i + 1] = interior_offset;
index_offset_array[num_fine_parts_to_consolidate + i + 1] = boundary_offset;
}
A.manager->setConsolidationArrayOffsets(index_offset_array);
//Exclusive scan row length array to get row offsets
thrust::exclusive_scan(new_row_offsets.begin(), new_row_offsets.end(), new_row_offsets.begin());
cudaCheckError();
//Prepare to receive column indices and values
int num_nz_consolidated = new_row_offsets[new_row_offsets.size() - 1];
IVector recv_col_indices(max_num_nz);
IVector new_col_indices(num_nz_consolidated);
MVector recv_values((max_num_nz + 1 + Ac.hasProps(DIAG) * (halo_offsets[num_consolidated_neighbors] - 1))*Ac.get_block_size());
MVector new_values((num_nz_consolidated + 1 + Ac.hasProps(DIAG) * (halo_offsets[num_consolidated_neighbors] - 1))*Ac.get_block_size());
thrust::fill(new_col_indices.begin() + new_row_offsets[halo_offsets[0]], new_col_indices.end(), -1); //Set all the halo col indices to -1
if (!Ac.hasProps(DIAG)) { thrust::fill(new_values.begin() + num_nz_consolidated * Ac.get_block_size(), new_values.end(), types::util<ValueTypeA>::get_zero()); }
cudaCheckError();
IVector *work_col_indices;
MVector *work_values;
interior_offset = 0;
boundary_offset = 0;
for (int i = 0; i < num_fine_parts_to_consolidate; i++)
{
int current_part = fine_parts_to_consolidate[i];
boundary_offset += vertex_counts[i][0];
}
for (int i = 0; i < num_fine_parts_to_consolidate; i++)
{
int current_part = fine_parts_to_consolidate[i];
if (current_part != my_id)
{
A.manager->getComms()->recv_vector(recv_col_indices, current_part, 1112, 0, num_nz[i]);
A.manager->getComms()->recv_vector(recv_values, current_part, 1113, 0, (num_nz[i] + 1 + Ac.hasProps(DIAG) * (halo_offsets[num_consolidated_neighbors] - 1))*Ac.get_block_size());
work_col_indices = &(recv_col_indices);
work_row_offsets = &(recv_row_offsets[i]);
work_values = &(recv_values);
}
else
{
work_row_offsets = &(Ac.row_offsets);
work_col_indices = &(Ac.col_indices);
work_values = &(Ac.values);
}
//Put interior rows in place
thrust::copy(work_col_indices->begin() + (*work_row_offsets)[interior_offset],
work_col_indices->begin() + (*work_row_offsets)[interior_offset + vertex_counts[i][0]],
new_col_indices.begin() + new_row_offsets[interior_offset]);
cudaCheckError();
thrust::copy(work_values->begin() + (*work_row_offsets)[interior_offset]*Ac.get_block_size(),
work_values->begin() + ((*work_row_offsets)[interior_offset + vertex_counts[i][0]])*Ac.get_block_size(),
new_values.begin() + new_row_offsets[interior_offset]*Ac.get_block_size());
cudaCheckError();
//Put boundary rows in place
thrust::copy(work_col_indices->begin() + (*work_row_offsets)[boundary_offset],
work_col_indices->begin() + (*work_row_offsets)[boundary_offset + vertex_counts[i][1]],
new_col_indices.begin() + new_row_offsets[boundary_offset]);
cudaCheckError();
thrust::copy(work_values->begin() + (*work_row_offsets)[boundary_offset]*Ac.get_block_size(),
work_values->begin() + ((*work_row_offsets)[boundary_offset + vertex_counts[i][1]])*Ac.get_block_size(),
new_values.begin() + new_row_offsets[boundary_offset]*Ac.get_block_size());
cudaCheckError();
//Process halo rows (merge)
int size = halo_offsets[num_consolidated_neighbors] - halo_offsets[0];
const int block_size = 128;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (size - 1) / block_size + 1);
//TODO: vectorise this kernel, will be inefficient for larger block sizes
hipLaunchKernelGGL(( append_halo_nz) , dim3(num_blocks), dim3(block_size), 0, 0, work_row_offsets->raw() + halo_offsets[0],
new_row_offsets.raw() + halo_offsets[0],
work_col_indices->raw(),
new_col_indices.raw(),
work_values->raw(),
new_values.raw(),
size, Ac.hasProps(DIAG), halo_offsets[0], Ac.get_block_size());
cudaCheckError();
// Diagonals
if (Ac.hasProps(DIAG))
{
// Diagonal corresponding to interior rows
thrust::copy(work_values->begin() + (num_nz[i] + interior_offset)*Ac.get_block_size(),
work_values->begin() + (num_nz[i] + interior_offset + vertex_counts[i][0])*Ac.get_block_size(),
new_values.begin() + (new_row_offsets[halo_offsets[halo_offsets.size() - 1]] + interior_offset)*Ac.get_block_size());
// Diagonal corresponding to boundary rows
thrust::copy(work_values->begin() + (num_nz[i] + boundary_offset)*Ac.get_block_size(),
work_values->begin() + (num_nz[i] + boundary_offset + vertex_counts[i][1])*Ac.get_block_size(),
new_values.begin() + (new_row_offsets[halo_offsets[halo_offsets.size() - 1]] + boundary_offset)*Ac.get_block_size());
cudaCheckError();
}
interior_offset += vertex_counts[i][0];
boundary_offset += vertex_counts[i][1];
}
Ac.set_initialized(0);
Ac.row_offsets = new_row_offsets;
Ac.col_indices = new_col_indices;
Ac.values = new_values;
}
// A new distributed communicator for coarse levels that only contains active partitions
// has already been created in consolidatedBookKeeping!
//
// Step 12 - finalizing, bookkeping
//
if (this->isRootPartition())
{
// int my_consolidated_id = fine_part_to_coarse_part[my_id];
int my_consolidated_id = Ac.manager->getComms()->get_global_id();
if (!this->isReuseLevel())
{
Ac.manager->initializeAfterConsolidation(
my_consolidated_id,
Ac,
this->m_consolidated_neighbors,
this->m_total_interior_rows_in_merged,
this->m_total_boundary_rows_in_merged,
this->m_num_all_aggregates,
this->m_consolidated_halo_offsets,
this->m_consolidated_B2L_maps,
1,
true);
// this is now stored in Acs DistributedManager
this->m_consolidated_neighbors.resize(0);
this->m_consolidated_halo_offsets.resize(0);
this->m_consolidated_B2L_maps.resize(0);
Ac.manager->B2L_rings.resize(num_consolidated_neighbors + 1);
for (int i = 0; i < num_consolidated_neighbors; i++)
{
Ac.manager->B2L_rings[i].resize(2);
Ac.manager->B2L_rings[i][0] = 0;
Ac.manager->B2L_rings[i][1] = consolidated_B2L_maps[i].size();
}
}
Ac.manager->set_initialized(Ac.row_offsets);
Ac.manager->getComms()->set_neighbors(num_consolidated_neighbors);
int new_nnz = Ac.row_offsets[Ac.row_offsets.size() - 1];
Ac.set_num_nz(new_nnz);
Ac.set_num_cols(Ac.manager->halo_offsets[Ac.manager->halo_offsets.size() - 1]);
Ac.set_num_rows(Ac.get_num_cols());
if (A.hasProps(DIAG)) { Ac.addProps(DIAG); }
Ac.computeDiagonal();
Ac.set_initialized(1);
}
else
{
this->getA().manager->getComms()->send_vector_wait_all(Ac.row_offsets);
this->getA().manager->getComms()->send_vector_wait_all(Ac.col_indices);
this->getA().manager->getComms()->send_vector_wait_all(Ac.values);
Ac.set_initialized(0);
// set size of Ac to be zero
Ac.resize(0, 0, 0, 1);
Ac.set_initialized(1);
}
}
// -------------------------------------------------------------
// Explicit instantiations
// -------------------------------------------------------------
#define AMGX_CASE_LINE(CASE) template class Aggregation_AMG_Level<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}
}
| fb131ae989cbd190a3c6fdf776ce8d1861083634.cu | /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <aggregation/aggregation_amg_level.h>
#include <matrix_analysis.h>
#ifdef _WIN32
#pragma warning (push)
#pragma warning (disable : 4244 4267 4521)
#endif
#ifdef _WIN32
#pragma warning (pop)
#endif
#include <basic_types.h>
#include <util.h>
#include <fstream>
#include <cutil.h>
#include <multiply.h>
#include <transpose.h>
#include <blas.h>
#include <string>
#include <string.h>
#include <iostream>
#include <algorithm>
#include <amgx_timer.h>
#include <amgx_types/util.h>
#include <thrust/sort.h>
#include <thrust/remove.h>
#include <thrust/transform.h>
#include <thrust/binary_search.h>
#include <thrust/unique.h>
#include <thrust/inner_product.h>
#include <thrust/iterator/constant_iterator.h>
namespace amgx
{
namespace aggregation
{
// ----------------------
// Kernels
// ----------------------
template <typename IndexType, typename ValueType>
__global__
void set_to_one_kernel(IndexType start, IndexType end, IndexType *ind, ValueType *v)
{
for (int tid = start + blockDim.x * blockIdx.x + threadIdx.x; tid < end; tid += gridDim.x * blockDim.x)
{
v[ind[tid]] = types::util<ValueType>::get_one();
}
}
template <typename IndexType>
__global__
void renumberAggregatesKernel(const IndexType *renumbering, const int interior_offset, const int bdy_offset, IndexType *aggregates, const int num_aggregates, const int n_interior, const int renumbering_size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < num_aggregates)
{
IndexType new_agg_id;
if (renumbering_size == 0)
{
new_agg_id = aggregates[tid];
}
else
{
new_agg_id = renumbering[aggregates[tid]];
}
//if (aggregates[tid] > num_aggregates)
//{
//printf("ID %d old %d + %d = %d\n", tid, new_agg_id, ((new_agg_id >= n_interior) ? bdy_offset : interior_offset), new_agg_id + ((new_agg_id >= n_interior) ? bdy_offset : interior_offset));
//}
new_agg_id += ((new_agg_id >= n_interior) ? bdy_offset : interior_offset);
aggregates[tid] = new_agg_id;
tid += gridDim.x * blockDim.x;
}
}
// Kernel to restrict residual using csr_format
template <typename IndexType, typename ValueType>
__global__
void restrictResidualKernel(const IndexType *row_offsets, const IndexType *column_indices, const ValueType *r, ValueType *rr, const int num_aggregates)
{
int jmin, jmax;
for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_aggregates; tid += gridDim.x * blockDim.x)
{
ValueType temp(types::util<ValueType>::get_zero());
jmin = row_offsets[tid];
jmax = row_offsets[tid + 1];
for (int j = jmin; j < jmax; j++)
{
int j_col = column_indices[j];
temp = temp + r[j_col];
}
rr[tid] = temp;
}
}
// Kernel to restrict residual using block_dia_csr_format
template <typename IndexType, typename ValueType, int bsize>
__global__
void restrictResidualBlockDiaCsrKernel(const IndexType *row_offsets, const IndexType *column_indices, const ValueType *r, ValueType *rr, const int num_aggregates)
{
ValueType rr_temp[bsize];
int offset, jmin, jmax;
for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_aggregates; tid += gridDim.x * blockDim.x)
{
// Initialize to zero
#pragma unroll
for (int m = 0; m < bsize; m++)
{
rr_temp[m] = types::util<ValueType>::get_zero();
}
jmin = row_offsets[tid];
jmax = row_offsets[tid + 1];
for (int j = jmin; j < jmax; j++)
{
int jcol = column_indices[j];
offset = jcol * bsize;
#pragma unroll
for (int m = 0; m < bsize; m++)
{
rr_temp[m] = rr_temp[m] + r[offset + m];
}
}
offset = tid * bsize;
#pragma unroll
for (int m = 0; m < bsize; m++)
{
rr[offset + m] = rr_temp[m];
};
}
}
// Kernel to prolongate and apply the correction for csr format
template <typename IndexType, typename ValueType>
__global__
void prolongateAndApplyCorrectionKernel(const ValueType alpha, const int num_rows, ValueType *x, const ValueType *e, const IndexType *aggregates, IndexType num_aggregates)
{
for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_rows; tid += gridDim.x * blockDim.x)
{
IndexType I = aggregates[tid];
x[tid] = x[tid] + alpha * e[I];
}
}
// Kernel to prolongate and apply the correction for block-dia-csr format
template <typename IndexType, typename ValueType>
__global__
void prolongateAndApplyCorrectionBlockDiaCsrKernel(const ValueType alpha, const int num_block_rows, ValueType *x, const ValueType *e, const IndexType *aggregates, IndexType num_aggregates, const int bsize)
{
for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_block_rows; tid += gridDim.x * blockDim.x)
{
IndexType I = aggregates[tid];
for (int m = 0; m < bsize; m++)
{
x[tid * bsize + m] = x[tid * bsize + m] + alpha * e[I * bsize + m];
}
}
}
template <typename IndexType, typename ValueType>
__global__
void prolongateVector(const IndexType *aggregates, const ValueType *in, ValueType *out, IndexType fine_rows, IndexType coarse_rows, int blocksize)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
while ( tid < fine_rows * blocksize )
{
int i = tid / blocksize;
int e = tid % blocksize;
IndexType I = aggregates[i];
out[tid] = in[ I * blocksize + e ];
tid += gridDim.x * blockDim.x;
}
}
template <typename IndexType, typename ValueType>
__global__
void applyCorrection(ValueType lambda, const ValueType *e, ValueType *x, IndexType numRows )
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
while ( tid < numRows )
{
x[tid] = x[tid] + lambda * e[tid];
tid += gridDim.x * blockDim.x;
}
}
// -------------------------------
// Methods
// ------------------------------
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::transfer_level(AMG_Level<TConfig1> *ref_lvl)
{
Aggregation_AMG_Level_Base<TConfig1> *ref_agg_lvl = dynamic_cast<Aggregation_AMG_Level_Base<TConfig1>*>(ref_lvl);
this->scale_counter = ref_agg_lvl->scale_counter;
this->scale = ref_agg_lvl->scale;
this->m_R_row_offsets.copy(ref_agg_lvl->m_R_row_offsets);
this->m_R_column_indices.copy(ref_agg_lvl->m_R_column_indices);
this->m_aggregates.copy(ref_agg_lvl->m_aggregates);
this->m_aggregates_fine_idx.copy(ref_agg_lvl->m_aggregates_fine_idx);
this->m_num_aggregates = ref_agg_lvl->m_num_aggregates;
this->m_num_all_aggregates = ref_agg_lvl->m_num_all_aggregates;
}
typedef std::pair<int, int> mypair;
bool comparator ( const mypair &l, const mypair &r) { return l.first < r.first; }
// Method to compute R
// General path
// TODO: this could be merged with selector to save some computations
template <typename T_Config>
void Aggregation_AMG_Level_Base<T_Config>::computeRestrictionOperator_common()
{
m_R_row_offsets.resize(m_num_all_aggregates + 1); //create one more row for the pseudo aggregate
IVector R_row_indices(m_aggregates);
#if AMGX_ASYNCCPU_PROOF_OF_CONCEPT
bool use_cpu = m_aggregates.size() < 4096;
if (use_cpu)
{
struct computeRestrictionTask : public task
{
Aggregation_AMG_Level_Base<T_Config> *self;
IVector *R_row_indices;
void run()
{
int N = self->m_aggregates.size();
IVector_h R_row_indices_host(self->m_aggregates);
std::vector<mypair> pairs(N);
for (int i = 0; i < N; i++)
{
pairs[i].first = R_row_indices_host[i];
pairs[i].second = i;
}
std::stable_sort(pairs.begin(), pairs.end(), comparator);
IVector_h R_column_indices(self->A->get_num_rows());
for (int i = 0; i < N; i++)
{
R_column_indices[i] = pairs[i].second;
R_row_indices_host[i] = pairs[i].first;
}
self->m_R_column_indices = R_column_indices;
*R_row_indices = R_row_indices_host;
}
};
computeRestrictionTask *t = new computeRestrictionTask();
t->self = this;
t->R_row_indices = &R_row_indices;
t->run();
delete t;
}
else
#endif
{
m_R_column_indices.resize(this->A->get_num_rows());
amgx::thrust::sequence(m_R_column_indices.begin(), m_R_column_indices.end());
cudaCheckError();
amgx::thrust::sort_by_key(R_row_indices.begin(), R_row_indices.end(), m_R_column_indices.begin());
cudaCheckError();
}
amgx::thrust::lower_bound(R_row_indices.begin(),
R_row_indices.end(),
amgx::thrust::counting_iterator<typename IVector::value_type>(0),
amgx::thrust::counting_iterator<typename IVector::value_type>(m_R_row_offsets.size()),
m_R_row_offsets.begin());
cudaCheckError();
}
// two methods below could be merged
// Method to compute R on HOST using csr format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Aggregation_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeRestrictionOperator_1x1()
{
this->m_R_row_offsets.resize(this->m_num_all_aggregates + 1);
this->m_R_column_indices.resize(this->A->get_num_rows());
this->fillRowOffsetsAndColIndices(this->A->get_num_rows());
}
// Method to compute R on HOST using block dia-csr format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Aggregation_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeRestrictionOperator_4x4()
{
this->m_R_row_offsets.resize(this->m_num_all_aggregates + 1);
this->m_R_column_indices.resize(this->A->get_num_rows());
this->fillRowOffsetsAndColIndices(this->A->get_num_rows());
}
// Method to create R_row_offsest and R_column_indices array on HOST using csr or block dia-csr format
template <typename T_Config>
void Aggregation_AMG_Level_Base<T_Config>::fillRowOffsetsAndColIndices(const int R_num_cols)
{
for (int i = 0; i < m_num_all_aggregates + 1; i++)
{
m_R_row_offsets[i] = 0;
}
// Count number of neighbors for each row
for (int i = 0; i < R_num_cols; i++)
{
int I = m_aggregates[i];
m_R_row_offsets[I]++;
}
m_R_row_offsets[m_num_all_aggregates] = R_num_cols;
for (int i = m_num_all_aggregates - 1; i >= 0; i--)
{
m_R_row_offsets[i] = m_R_row_offsets[i + 1] - m_R_row_offsets[i];
}
/* Set column indices. */
for (int i = 0; i < R_num_cols; i++)
{
int I = m_aggregates[i];
int Ip = m_R_row_offsets[I]++;
m_R_column_indices[Ip] = i;
}
/* Reset r[i] to start of row memory. */
for (int i = m_num_all_aggregates - 1; i > 0; i--)
{
m_R_row_offsets[i] = m_R_row_offsets[i - 1];
}
m_R_row_offsets[0] = 0;
}
// Method to compute R on DEVICE using block dia-csr format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Aggregation_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeRestrictionOperator_4x4()
{
this->computeRestrictionOperator_common();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Aggregation_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeRestrictionOperator_1x1()
{
this->computeRestrictionOperator_common();
}
// Method to restrict Residual on host using csr_matrix format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Aggregation_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::restrictResidual_1x1(const VVector &r, VVector &rr)
{
ValueTypeB temp;
for (int i = 0; i < this->m_num_aggregates; i++)
{
temp = types::util<ValueTypeB>::get_zero();
for (int j = this->m_R_row_offsets[i]; j < this->m_R_row_offsets[i + 1]; j++)
{
int j_col = this->m_R_column_indices[j];
temp = temp + r[j_col];
}
rr[i] = temp;
}
}
// Method to restrict Residual on host using block_dia_csr_matrix format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Aggregation_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::restrictResidual_4x4(const VVector &r, VVector &rr)
{
IndexType bsize = this->A->get_block_dimy();
ValueTypeB *temp = new ValueTypeB[bsize];
for (int i = 0; i < this->m_num_aggregates; i++)
{
// Initialize temp to 0
for (int k = 0; k < bsize; k++)
{
temp[k] = types::util<ValueTypeB>::get_zero();
}
// Add contributions from each fine point
for (int j = this->m_R_row_offsets[i]; j < this->m_R_row_offsets[i + 1]; j++)
{
int j_col = this->m_R_column_indices[j];
for (int k = 0; k < bsize; k++)
{
temp[k] = temp[k] + r[j_col * bsize + k];
}
}
// Store result
for (int k = 0; k < bsize; k++)
{
rr[i * bsize + k] = temp[k];
}
}
}
// Method to restrict Residual on device using csr_matrix format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Aggregation_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::restrictResidual_1x1(const VVector &r, VVector &rr)
{
int block_size = 64;
int max_threads;;
if (!this->isConsolidationLevel())
{
max_threads = this->m_num_aggregates;
}
else
{
max_threads = this->m_num_all_aggregates;
}
int num_blocks = min( AMGX_GRID_MAX_SIZE, (max_threads - 1) / block_size + 1);
const IndexType *R_row_offsets_ptr = this->m_R_row_offsets.raw();
const IndexType *R_column_indices_ptr = this->m_R_column_indices.raw();
const ValueTypeB *r_ptr = r.raw();
ValueTypeB *rr_ptr = rr.raw();
restrictResidualKernel <<< num_blocks, block_size>>>(R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads);
cudaCheckError();
}
// Method to restrict Residual on device using block_dia_csr_matrix format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Aggregation_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::restrictResidual_4x4(const VVector &r, VVector &rr)
{
int block_size = 64;
int max_threads;
if (!this->isConsolidationLevel())
{
max_threads = this->m_num_aggregates;
}
else
{
max_threads = this->m_num_all_aggregates;
};
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (max_threads + block_size - 1) / block_size);
const IndexType *R_row_offsets_ptr = this->m_R_row_offsets.raw();
const IndexType *R_column_indices_ptr = this->m_R_column_indices.raw();
const ValueTypeB *r_ptr = r.raw();
ValueTypeB *rr_ptr = rr.raw();
cudaCheckError();
switch ( this->getA().get_block_dimy() )
{
case 2:
restrictResidualBlockDiaCsrKernel<IndexType, ValueTypeB, 2> <<< num_blocks, block_size>>>(R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads);
break;
case 3:
restrictResidualBlockDiaCsrKernel<IndexType, ValueTypeB, 3> <<< num_blocks, block_size>>>(R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads);
break;
case 4:
restrictResidualBlockDiaCsrKernel<IndexType, ValueTypeB, 4> <<< num_blocks, block_size>>>(R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads);
break;
case 5:
restrictResidualBlockDiaCsrKernel<IndexType, ValueTypeB, 5> <<< num_blocks, block_size>>>(R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads);
break;
case 8:
restrictResidualBlockDiaCsrKernel<IndexType, ValueTypeB, 8> <<< num_blocks, block_size>>>(R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads);
break;
case 10:
restrictResidualBlockDiaCsrKernel<IndexType, ValueTypeB, 10> <<< num_blocks, block_size>>>(R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads);
break;
default:
FatalError( "Unsupported block size in restrictResidual_4x4!!!", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE );
}
cudaCheckError();
}
__inline__ float getAlpha(float &nom, float &denom)
{
float alpha;
if (nom * denom <= 0. || std::abs(nom) < std::abs(denom))
{
alpha = 1.;
}
else if (std::abs(nom) > 2.*std::abs(denom))
{
alpha = 2.;
}
else
{
alpha = nom / denom;
}
return alpha;
}
__inline__ double getAlpha(double &nom, double &denom)
{
double alpha;
if (nom * denom <= 0. || std::abs(nom) < std::abs(denom))
{
alpha = 1.;
}
else if (std::abs(nom) > 2.*std::abs(denom))
{
alpha = 2.;
}
else
{
alpha = nom / denom;
}
return alpha;
}
__inline__ cuComplex getAlpha(cuComplex &nom, cuComplex &denom)
{
cuComplex alpha;
if (types::util<cuComplex>::abs(nom) < types::util<cuComplex>::abs(denom))
{
alpha = make_cuComplex(1.f, 0.f);
}
else if (types::util<cuComplex>::abs(nom) > 2.*types::util<cuComplex>::abs(denom))
{
alpha = make_cuComplex(2.f, 0.f);
}
else
{
alpha = nom / denom;
}
return alpha;
}
__inline__ cuDoubleComplex getAlpha(cuDoubleComplex &nom, cuDoubleComplex &denom)
{
cuDoubleComplex alpha;
if (types::util<cuDoubleComplex>::abs(nom) < types::util<cuDoubleComplex>::abs(denom))
{
alpha = make_cuDoubleComplex(1., 0.);
}
else if (types::util<cuDoubleComplex>::abs(nom) > 2.*types::util<cuDoubleComplex>::abs(denom))
{
alpha = make_cuDoubleComplex(2., 0.);
}
else
{
alpha = nom / denom;
}
return alpha;
}
template< class T_Config>
typename T_Config::VecPrec Aggregation_AMG_Level_Base<T_Config>::computeAlpha(const Vector<T_Config> &e, const Vector<T_Config> &bc, const Vector<T_Config> &tmp)
{
typename T_Config::VecPrec alpha = types::util<ValueTypeB>::get_one();
Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA();
int size = Ac.get_num_rows();
VVector v(2, types::util<ValueTypeB>::get_zero());
v[0] = amgx::thrust::inner_product(e.begin(), e.begin() + size, bc.begin(), types::util<ValueTypeB>::get_zero());
v[1] = amgx::thrust::inner_product(e.begin(), e.begin() + size, tmp.begin(), types::util<ValueTypeB>::get_zero());
cudaCheckError();
return getAlpha(v[0], v[1]);
}
// Method to prolongate the error on HOST using csr format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Aggregation_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::prolongateAndApplyCorrection_1x1(Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &e, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &bc, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &x, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &tmp)
{
Matrix<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &A = this->getA();
Matrix<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &C = this->next_h->getA();
if ( this->m_error_scaling >= 2 )
{
FatalError("error_scaling=2,3 is not implemented on host", AMGX_ERR_NOT_IMPLEMENTED );
}
ValueTypeB alpha = types::util<ValueTypeB>::get_one();
if (this->m_error_scaling)
{
multiply(this->next_h->getA(), e, tmp);
alpha = this->computeAlpha (e, bc, tmp);
}
// Apply correction on all (interior and exterior) equations.
for (int i = 0; i < A.get_num_cols(); i++)
{
int I = this->m_aggregates[i];
x[i] = x[i] + alpha * e[I];
}
}
// Method to prolongate the error on HOST using block_dia_csr format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Aggregation_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::prolongateAndApplyCorrection_4x4(Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &e, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &bc, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &x, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &tmp)
{
if (this->A->get_block_dimy() != this->A->get_block_dimx())
{
FatalError("Aggregation_AMG_Level not implemented for non square blocks, exiting", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
if ( this->m_error_scaling >= 2 )
{
FatalError("error_scaling=2,3 is not implemented on host", AMGX_ERR_NOT_IMPLEMENTED );
}
Matrix<TConfig> &C = this->next_h->getA();
ValueTypeB alpha = types::util<ValueTypeB>::get_one();
if (this->m_error_scaling)
{
multiply(this->next_h->getA(), e, tmp);
alpha = this->computeAlpha (e, bc, tmp);
}
// Apply correction on all equations.
for (int i = 0; i < this->A->get_num_rows(); i++)
{
int I = this->m_aggregates[i];
for (int k = 0; k < this->A->get_block_dimy(); k++)
{
x[i * this->A->get_block_dimy() + k] = x[i * this->A->get_block_dimy() + k] + alpha * e[I * this->A->get_block_dimy() + k];
}
}
}
// Prolongate the error on DEVICE using csr format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Aggregation_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::prolongateAndApplyCorrection_1x1(Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &e, Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &bc, Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &x, Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &tmp)
{
ValueTypeB alpha = types::util<ValueTypeB>::get_one();
const int block_size = 64;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (int) ( (this->A->get_num_rows() + block_size - 1) / block_size ) );
const IndexType *aggregates_ptr = this->m_aggregates.raw();
ValueTypeB *x_ptr = x.raw();
const ValueTypeB *e_ptr = e.raw();
if (this->m_error_scaling)
{
FatalError("error_scaling=1 is deprecated", AMGX_ERR_NOT_IMPLEMENTED );
}
prolongateAndApplyCorrectionKernel <<< num_blocks, block_size>>>(alpha, (int)this->A->get_num_rows(), x_ptr, e_ptr, aggregates_ptr, this->m_num_aggregates);
cudaCheckError();
}
// Prolongate the error on DEVICE using block dia-csr format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Aggregation_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::prolongateAndApplyCorrection_4x4(Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &ec,
Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &bf,
Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &xf,
Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &rf)
{
if ( this->m_error_scaling >= 2 )
{
if ( this->scale_counter > 0 )
{
const IndexType *aggregates_ptr = this->m_aggregates.raw();
ValueTypeB *x_ptr = xf.raw();
const ValueTypeB *e_ptr = ec.raw();
const int block_size = 64;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (int) ((this->A->get_num_rows() - 1) / block_size + 1));
prolongateAndApplyCorrectionBlockDiaCsrKernel <<< num_blocks, block_size>>>(this->scale, (int)this->getA().get_num_rows(), x_ptr, e_ptr, aggregates_ptr, this->m_num_aggregates, this->getA().get_block_dimy());
cudaCheckError();
this->scale_counter--;
return;
}
bool vanek_scaling = this->m_error_scaling > 3;
IndexType numRowsCoarse = this->next_d->getA().get_num_rows();
IndexType numRowsFine = this->A->get_num_rows();
IndexType blockdim = this->A->get_block_dimx();
if ( blockdim != this->A->get_block_dimy() )
{
FatalError("Unsupported dimension for aggregation amg level", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
VVector ef( rf.size() );
VVector Aef( rf.size() );
ef.set_block_dimy( blockdim );
Aef.set_block_dimy( blockdim );
// prolongate e
const int threads_per_block = 256;
const int num_block_values = min( AMGX_GRID_MAX_SIZE, (numRowsFine * blockdim - 1) / threads_per_block + 1);
const cudaStream_t stream = nullptr;
prolongateVector <<< num_block_values, threads_per_block, 0, stream>>>( this->m_aggregates.raw(), ec.raw(), ef.raw(), numRowsFine, numRowsCoarse, blockdim );
ef.dirtybit = 1;
cudaStreamSynchronize(stream);
cudaCheckError();
int preSmooth;
if ( vanek_scaling )
{
preSmooth = this->amg->getNumPostsweeps();
}
else
{
preSmooth = this->scaling_smoother_steps;
}
//smooth error
this->smoother->setTolerance( 0.0 );
this->smoother->set_max_iters( preSmooth );
if ( vanek_scaling )
{
amgx::thrust::fill( Aef.begin(), Aef.end(), types::util<ValueTypeB>::get_zero() );
cudaCheckError();
this->smoother->solve( Aef, ef, false ); //smooth correction with rhs 0
this->smoother->solve( bf, xf, false ); // smooth x with rhs residual
//recompute residual
int offset, size;
this->getA().getOffsetAndSizeForView(OWNED, &offset, &size);
axmb( this->getA(), xf, bf, rf, offset, size );
}
else
{
this->smoother->solve( rf, ef, false ); //smooth correction with rhs residual
}
// multiply for lambda computation
multiply(this->getA(), ef, Aef, OWNED);
ValueTypeB nominator, denominator;
int offset = 0, size = 0;
this->A->getOffsetAndSizeForView(OWNED, &offset, &size);
if ( this->m_error_scaling == 2 || this->m_error_scaling == 4 )
{
// compute lambda=<rf,Aef>/<Aef,Aef>
nominator = amgx::thrust::inner_product( rf.begin(), rf.end(), Aef.begin(), types::util<ValueTypeB>::get_zero() );
denominator = amgx::thrust::inner_product( Aef.begin(), Aef.end(), Aef.begin(), types::util<ValueTypeB>::get_zero() );
cudaCheckError();
}
if ( this->m_error_scaling == 3 || this->m_error_scaling == 5)
{
// compute lambda=<rf,ef>/<ef,Aef>
nominator = amgx::thrust::inner_product( rf.begin(), rf.begin() + size * blockdim, ef.begin(), types::util<ValueTypeB>::get_zero() );
denominator = amgx::thrust::inner_product( ef.begin(), ef.begin() + size * blockdim, Aef.begin(), types::util<ValueTypeB>::get_zero() );
if (!this->A->is_matrix_singleGPU())
{
this->A->getManager()->global_reduce_sum(&nominator);
this->A->getManager()->global_reduce_sum(&denominator);
}
cudaCheckError();
}
if (types::util<ValueTypeB>::abs(denominator) == 0.0)
{
nominator = denominator = types::util<ValueTypeB>::get_one();
}
// apply correction x <- x + lambda*e
const int num_block_fine = min( AMGX_GRID_MAX_SIZE, (numRowsFine * blockdim - 1) / threads_per_block + 1 );
ValueTypeB alpha = nominator / denominator;
if ( types::util<ValueTypeB>::abs(alpha) < .3 )
{
alpha = (alpha / types::util<ValueTypeB>::abs(alpha)) * .3; // it was this before: alpha = .3, which is not 100% equal
}
if ( types::util<ValueTypeB>::abs(alpha) > 10 )
{
alpha = (alpha / types::util<ValueTypeB>::abs(alpha)) * 10.; // it was this before: alpha = 10., which is not 100% equal
}
applyCorrection <<< num_block_fine, threads_per_block, 0, stream>>>( alpha, ef.raw(), xf.raw(), numRowsFine * blockdim );
cudaCheckError();
this->scale_counter = this->reuse_scale; //reuse this scale scale_counter times
this->scale = alpha;
return;
}
ValueTypeB alpha = types::util<ValueTypeB>::get_one();
const int block_size = 64;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (int) ((this->A->get_num_rows() - 1) / block_size + 1));
const IndexType *aggregates_ptr = this->m_aggregates.raw();
ValueTypeB *x_ptr = xf.raw();
const ValueTypeB *e_ptr = ec.raw();
if (this->m_error_scaling == 1)
{
FatalError("error_scaling=1 is deprecated", AMGX_ERR_NOT_IMPLEMENTED );
}
prolongateAndApplyCorrectionBlockDiaCsrKernel <<< num_blocks, block_size>>>(alpha, (int)this->A->get_num_rows(), x_ptr, e_ptr, aggregates_ptr, this->m_num_aggregates, this->A->get_block_dimy());
cudaCheckError();
}
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config >::prolongateAndApplyCorrection(VVector &e, VVector &bf, VVector &x, VVector &tmp)
{
Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA();
//this is dirty, but error scaling 2 and 3 do not have a specialized version. Instead, the general version sits in the 4x4 function
if ( this->m_error_scaling >= 2 )
{
prolongateAndApplyCorrection_4x4(e, bf, x, tmp);
}
else if (this->A->get_block_size() == 1)
{
prolongateAndApplyCorrection_1x1(e, bf, x, tmp);
}
else if (this->A->get_block_dimx() == this->A->get_block_dimy() )
{
prolongateAndApplyCorrection_4x4(e, bf, x, tmp);
}
else
{
FatalError("Unsupported dimension for aggregation amg level", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
x.dirtybit = 1;
if (!this->A->is_matrix_singleGPU() && x.delayed_send == 0)
{
if (x.in_transfer & RECEIVING) { this->A->manager->exchange_halo_wait(x, x.tag); }
this->A->manager->exchange_halo_async(x, x.tag);
}
}
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::restrictResidual(VVector &r, VVector &rr)
{
if (this->A->get_block_size() == 1)
{
restrictResidual_1x1(r, rr);
}
else if (this->A->get_block_dimx() == this->A->get_block_dimy() )
{
restrictResidual_4x4(r, rr);
}
else
{
FatalError("Unsupported dimension for aggregation amg level", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
//TODO: check level transfer between host and device for multiGPU
if (!this->A->is_matrix_singleGPU())
{
Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA();
rr.dirtybit = 1;
if (!Ac.is_matrix_singleGPU() && !this->isConsolidationLevel() && rr.delayed_send == 0)
{
Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); //TODO problem in memoryspace transfer is here
if (rr.in_transfer & RECEIVING) { Ac.manager->exchange_halo_wait(rr, rr.tag); }
Ac.manager->exchange_halo_async(rr, rr.tag);
}
}
}
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::computeRestrictionOperator()
{
if (this->A->get_block_size() == 1)
{
computeRestrictionOperator_1x1();
}
else if (this->A->get_block_dimx() == 4 && this->A->get_block_dimy() == 4)
{
computeRestrictionOperator_4x4();
}
else
{
this->computeRestrictionOperator_common();
}
}
template <typename IndexType>
__global__ void coarse_to_global(IndexType *aggregates, IndexType *aggregates_global, IndexType *renumbering, IndexType num_elements, int64_t offset)
{
int element = blockIdx.x * blockDim.x + threadIdx.x;
while (element < num_elements)
{
renumbering[aggregates[element]] = aggregates_global[element] + offset; //this won't be a problem, because we are overwriting the same thing
element += blockDim.x * gridDim.x;
}
}
template <typename T, typename IndexType>
__global__ void export_matrix_elements(IndexType *row_offsets, IndexType *col_indices, T *values, IndexType *maps, IndexType *renumbering, IndexType *new_row_offsets, IndexType *new_col_indices, T *new_values, IndexType bsize, IndexType size)
{
int idx = blockIdx.x * blockDim.x / 32 + threadIdx.x / 32;
int coopIdx = threadIdx.x % 32;
while (idx < size)
{
int row = maps[idx];
INDEX_TYPE src_base = row_offsets[row];
INDEX_TYPE dst_base = new_row_offsets[idx];
for (int m = coopIdx; m < row_offsets[row + 1]*bsize - src_base * bsize; m += 32)
{
new_values[dst_base * bsize + m] = values[src_base * bsize + m];
}
for (int m = coopIdx; m < row_offsets[row + 1] - src_base; m += 32)
{
new_col_indices[dst_base + m] = renumbering[col_indices[src_base + m]];
}
idx += gridDim.x * blockDim.x / 32;
}
}
template <class T>
__global__ void export_matrix_diagonal(T *values, INDEX_TYPE bsize, INDEX_TYPE *maps, T *output, INDEX_TYPE size)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
while (idx < size)
{
int row = maps[idx];
INDEX_TYPE src_base = row;
INDEX_TYPE dst_base = idx;
for (int m = 0; m < bsize; m++)
{
output[dst_base * bsize + m] = values[src_base * bsize + m];
}
idx += gridDim.x * blockDim.x;
}
}
__global__ void remove_boundary(INDEX_TYPE *flags, INDEX_TYPE *maps, INDEX_TYPE size)
{
int element = blockIdx.x * blockDim.x + threadIdx.x;
while (element < size)
{
flags[maps[element]] = 0; //this won't be a problem, because we are overwriting the same thing
element += blockDim.x * gridDim.x;
}
}
__global__ void calc_inverse_renumbering(INDEX_TYPE *renum, INDEX_TYPE *irenum, INDEX_TYPE *renum_gbl, INDEX_TYPE base_index, INDEX_TYPE max_element)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < max_element)
{
irenum[renum[idx]] = renum_gbl[idx] - base_index;
idx += blockDim.x * gridDim.x;
}
}
__global__ void create_halo_mapping(INDEX_TYPE *mapping, INDEX_TYPE *node_list, INDEX_TYPE base_index, INDEX_TYPE map_offset, INDEX_TYPE size)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
while (row < size)
{
int idx = node_list[row] - base_index;
mapping[idx] = map_offset + row;
row += blockDim.x * gridDim.x;
}
}
__global__ void map_col_indices_and_count_rowlen(INDEX_TYPE *row_offsets, INDEX_TYPE *col_indices, INDEX_TYPE *row_length, INDEX_TYPE *renumbering, INDEX_TYPE *mapping, INDEX_TYPE *map_offsets, int64_t *index_ranges, INDEX_TYPE part_id, INDEX_TYPE my_id, INDEX_TYPE base_index, INDEX_TYPE my_range, INDEX_TYPE num_neighbors, INDEX_TYPE num_rows)
{
extern __shared__ volatile int reduction[];
int row = blockIdx.x * blockDim.x / 4 + threadIdx.x / 4;
int coopIdx = threadIdx.x % 4;
while (row < num_rows)
{
int valid = 0;
for (int idx = row_offsets[row] + coopIdx; idx < row_offsets[row + 1]; idx += 4) //this may look horrible, but I expect low branch divergence, because col indices in a row usually belong to the same partition (or at most one more)
{
int colIdx = col_indices[idx];
int part = -2;
if (colIdx >= index_ranges[2 * part_id] && colIdx < index_ranges[2 * part_id + 1]) //the col index probably belongs to the partition I am working on
{
part = part_id;
}
else if (colIdx >= base_index && colIdx < base_index + my_range) //or points back to the owned partition
{
part = -1;
}
else //or else it points to a third partition
{
for (int i = 0; i < num_neighbors; i++)
{
if (colIdx >= index_ranges[2 * i] && colIdx < index_ranges[2 * i + 1])
{
part = i;
}
}
}
if (part == -2)
{
col_indices[idx] = -1;
#ifdef DEBUG
printf("Column index encountered that does not belong to any of my neighbors!! %d\n", colIdx);
#endif
}
else
{
if (part == -1)
{
col_indices[idx] = renumbering[colIdx - base_index];
valid++;
}
else
{
int new_col_idx = mapping[map_offsets[part] + colIdx - index_ranges[2 * part]];
if (new_col_idx >= 0)
{
valid++;
col_indices[idx] = new_col_idx;
}
else
{
col_indices[idx] = -1;
}
}
}
}
reduction[threadIdx.x] = valid;
for (int s = 2; s > 0; s >>= 1)
{
if (coopIdx < s)
{
reduction[threadIdx.x] += reduction[threadIdx.x + s];
}
__syncthreads();
}
if (coopIdx == 0)
{
row_length[row] = reduction[threadIdx.x];
}
row += gridDim.x * blockDim.x / 4;
}
}
__global__ void map_col_indices(INDEX_TYPE *row_offsets, INDEX_TYPE *col_indices, int64_t *halo_ranges, INDEX_TYPE *halo_renumbering, INDEX_TYPE *halo_rows, INDEX_TYPE *global_renumbering, INDEX_TYPE num_neighbors, INDEX_TYPE num_rows, INDEX_TYPE num_rows_processed)
{
int row = blockIdx.x * blockDim.x / 4 + threadIdx.x / 4;
int coopIdx = threadIdx.x % 4;
while (row < num_rows_processed)
{
for (int idx = row_offsets[row] + coopIdx; idx < row_offsets[row + 1]; idx += 4)
{
int colIdx = col_indices[idx];
int part = 0;
if (colIdx < num_rows)
{
part = -1;
}
else
{
colIdx = global_renumbering[colIdx];
for (int i = 0; i < num_neighbors; i++)
{
if (colIdx >= halo_ranges[2 * i] && colIdx < halo_ranges[2 * i + 1])
{
part = i;
break;
}
}
}
if (part == -1)
{
col_indices[idx] = colIdx;
}
else
{
col_indices[idx] = halo_renumbering[halo_rows[part] + colIdx - halo_ranges[2 * part]];
}
}
row += gridDim.x * blockDim.x / 4;
}
}
template <class T>
__global__ void reorder_whole_matrix(INDEX_TYPE *old_rows, INDEX_TYPE *old_cols, T *old_vals, INDEX_TYPE *rows, INDEX_TYPE *cols, T *vals, INDEX_TYPE bsize, INDEX_TYPE num_rows)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
while (row < num_rows)
{
INDEX_TYPE dst_row = row;
INDEX_TYPE src_base = old_rows[row];
INDEX_TYPE dst = rows[dst_row];
for (int i = 0; i < old_rows[row + 1] - src_base; i++)
{
INDEX_TYPE colIdx = old_cols[src_base + i];
if (colIdx >= 0)
{
cols[dst] = colIdx;
for (int j = 0; j < bsize; j++) { vals[dst * bsize + j] = old_vals[(src_base + i) * bsize + j]; }
dst++;
}
}
row += blockDim.x * gridDim.x;
}
}
__global__ void calc_gbl_renumbering(INDEX_TYPE *inv_renum, INDEX_TYPE *gbl_renum, INDEX_TYPE size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < size)
{
gbl_renum[inv_renum[idx]] = idx;
idx += blockDim.x * gridDim.x;
}
}
template <typename ValueType>
__global__ void write_diagonals(ValueType *values, INDEX_TYPE *diag, INDEX_TYPE *map, ValueType *output, INDEX_TYPE bsize, INDEX_TYPE size)
{
int nzPerBlock = blockDim.x / bsize;
int row = blockIdx.x * nzPerBlock + threadIdx.x / bsize;
int vecIdx = threadIdx.x % bsize;
if (threadIdx.x >= (blockDim.x / bsize)*bsize) { return; }
while (row < size)
{
output[row * bsize + vecIdx] = values[diag[map[row]] * bsize + vecIdx];
row += gridDim.x * nzPerBlock;
}
}
template <typename ValueType>
__global__ void write_diagonals_back(ValueType *values, INDEX_TYPE *diag, ValueType *source, INDEX_TYPE bsize, INDEX_TYPE size)
{
int nzPerBlock = blockDim.x / bsize;
int row = blockIdx.x * nzPerBlock + threadIdx.x / bsize;
int vecIdx = threadIdx.x % bsize;
if (threadIdx.x >= (blockDim.x / bsize)*bsize) { return; }
while (row < size)
{
values[diag[row]*bsize + vecIdx] = source[row * bsize + vecIdx];
row += gridDim.x * nzPerBlock;
}
}
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::prepareNextLevelMatrix_full(const Matrix<TConfig> &A, Matrix<TConfig> &Ac)
{
if (A.is_matrix_singleGPU()) { return; }
int num_neighbors = A.manager->neighbors.size();
if (TConfig::memSpace == AMGX_host)
{
FatalError("Aggregation AMG Not implemented for host", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
int c_size = Ac.get_num_rows();
int f_size = A.get_num_rows();
int diag = Ac.hasProps(DIAG);
if (A.manager->B2L_rings[0].size() > 2) { FatalError("Aggregation_AMG_Level prepareNextLevelMatrix not implemented >1 halo rings", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); }
//get coarse -> fine global renumbering
IVector renumbering(c_size);
int num_blocks = min(4096, (c_size + 127) / 128);
coarse_to_global <<< num_blocks, 128>>>(this->m_aggregates.raw(), this->m_aggregates_fine_idx.raw(), renumbering.raw(), f_size, 0);
cudaCheckError();
//
// Step 0 - form halo matrices that are exported to neighbors
//
std::vector<Matrix<TConfig> > halo_rows(num_neighbors);
std::vector<DistributedManager<TConfig> > halo_btl(num_neighbors);
for (int i = 0; i < num_neighbors; i++ )
{
int num_unique = Ac.manager->B2L_rings[i][1];
//prepare export halo matrices
halo_btl[i].resize(1, 1);
halo_btl[i].set_global_id(Ac.manager->global_id());
halo_btl[i].B2L_maps[0].resize(num_unique);
halo_btl[i].B2L_rings[0].resize(2);
halo_btl[i].B2L_rings[0][0] = 0;
halo_btl[i].B2L_rings[0][1] = num_unique;
halo_btl[i].set_index_range(A.manager->index_range());
halo_btl[i].set_base_index(A.manager->base_index());
//global indices of rows of the halo matrix
amgx::thrust::copy(amgx::thrust::make_permutation_iterator( renumbering.begin(), Ac.manager->B2L_maps[i].begin()),
amgx::thrust::make_permutation_iterator( renumbering.begin(), Ac.manager->B2L_maps[i].begin() + num_unique),
halo_btl[i].B2L_maps[0].begin());
cudaCheckError();
halo_rows[i].addProps(CSR);
if (diag) { halo_rows[i].addProps(DIAG); }
//calculate row length and row_offsets
halo_rows[i].row_offsets.resize(num_unique + 1);
amgx::thrust::transform(amgx::thrust::make_permutation_iterator(Ac.row_offsets.begin() + 1, Ac.manager->B2L_maps[i].begin()),
amgx::thrust::make_permutation_iterator(Ac.row_offsets.begin() + 1, Ac.manager->B2L_maps[i].end()),
amgx::thrust::make_permutation_iterator(Ac.row_offsets.begin(), Ac.manager->B2L_maps[i].begin()),
halo_rows[i].row_offsets.begin(),
amgx::thrust::minus<IndexType>());
cudaCheckError();
amgx::thrust::exclusive_scan(halo_rows[i].row_offsets.begin(), halo_rows[i].row_offsets.end(), halo_rows[i].row_offsets.begin());
cudaCheckError();
//resize halo matrix
IndexType num_nz = halo_rows[i].row_offsets[num_unique];
halo_rows[i].resize(num_unique, num_unique, num_nz, Ac.get_block_dimy(), Ac.get_block_dimx(), 1);
//copy relevant rows and renumber their column indices
num_blocks = min(4096, (num_unique + 127) / 128);
export_matrix_elements <<< num_blocks, 128>>>(Ac.row_offsets.raw(), Ac.col_indices.raw(), Ac.values.raw(), Ac.manager->B2L_maps[i].raw(), renumbering.raw(), halo_rows[i].row_offsets.raw(), halo_rows[i].col_indices.raw(), halo_rows[i].values.raw(), A.get_block_size(), num_unique);
cudaCheckError();
if (diag)
{
export_matrix_diagonal <<< num_blocks, 128>>>(Ac.values.raw() + Ac.row_offsets[Ac.get_num_rows()]*Ac.get_block_size(), Ac.get_block_size(), Ac.manager->B2L_maps[i].raw(), halo_rows[i].values.raw() + halo_rows[i].row_offsets[halo_rows[i].get_num_rows()]*Ac.get_block_size(), num_unique);
cudaCheckError();
}
}
Ac.manager->getComms()->exchange_matrix_halo(halo_rows, halo_btl, Ac);
//--------------------- renumbering/reordering matrix, integrating halo -----------------------------
Ac.set_initialized(0);
//number of owned rows
c_size = Ac.manager->halo_offsets[0];
f_size = A.manager->halo_offsets[0];
num_blocks = min(4096, (c_size + 511) / 512);
int rings = 1;
//
// Step 1 - calculate inverse renumbering (to global indices - base_index)
//
Ac.manager->inverse_renumbering.resize(c_size);
amgx::thrust::transform(renumbering.begin(),
renumbering.begin() + c_size,
amgx::thrust::constant_iterator<IndexType>(A.manager->base_index()),
Ac.manager->inverse_renumbering.begin(),
amgx::thrust::minus<IndexType>());
cudaCheckError();
//big renumbering table for going from global index to owned local index
IVector global_to_coarse_local(Ac.manager->index_range());
amgx::thrust::fill(global_to_coarse_local.begin(), global_to_coarse_local.begin() + Ac.manager->index_range(), -1);
cudaCheckError();
calc_gbl_renumbering <<< num_blocks, 512>>>(Ac.manager->inverse_renumbering.raw(), global_to_coarse_local.raw(), c_size);
cudaCheckError();
Ac.manager->set_num_halo_rows(Ac.manager->halo_offsets[Ac.manager->halo_offsets.size() - 1] - c_size);
cudaCheckError();
//
// Step 2 - create big mapping table of all halo indices we received (this may use a little too much memory sum(fine nodes per neighbor)
//
amgx::thrust::host_vector<INDEX_TYPE> neighbor_rows(num_neighbors + 1);
int max_num_rows = 0;
for (int i = 0; i < num_neighbors; i++)
{
neighbor_rows[i] = halo_rows[i].manager->index_range();
max_num_rows = max_num_rows > halo_rows[i].get_num_rows() ? max_num_rows : halo_rows[i].get_num_rows();
}
amgx::thrust::exclusive_scan(neighbor_rows.begin(), neighbor_rows.end(), neighbor_rows.begin());
cudaCheckError();
int total_rows_of_neighbors = neighbor_rows[num_neighbors];
IVector halo_mapping(total_rows_of_neighbors);
amgx::thrust::fill(halo_mapping.begin(), halo_mapping.end(), -1);
cudaCheckError();
for (int ring = 0; ring < rings; ring++)
{
for (int i = 0; i < num_neighbors; i++)
{
int size = halo_btl[i].B2L_rings[0][ring + 1] - halo_btl[i].B2L_rings[0][ring];
int num_blocks = min(4096, (size + 127) / 128);
create_halo_mapping <<< num_blocks, 128>>>(halo_mapping.raw() + neighbor_rows[i],
halo_btl[i].B2L_maps[0].raw() + halo_btl[i].B2L_rings[0][ring],
halo_btl[i].base_index(),
Ac.manager->halo_offsets[ring * num_neighbors + i], size);
}
}
cudaCheckError();
//
// Step 3 - renumber halo matrices and calculate row length (to eventually append to the big matrix)
//
INDEX_TYPE owned_nnz = Ac.row_offsets[c_size];
IVector neighbor_rows_d(num_neighbors + 1);
amgx::thrust::copy(neighbor_rows.begin(), neighbor_rows.end(), neighbor_rows_d.begin());
cudaCheckError();
//map column indices of my own matrix (the ones that point outward)
map_col_indices <<< num_blocks, 512>>>(Ac.row_offsets.raw() + Ac.manager->num_interior_nodes(),
Ac.col_indices.raw(),
Ac.manager->halo_ranges.raw(),
halo_mapping.raw(),
neighbor_rows_d.raw(),
renumbering.raw(),
num_neighbors, c_size, c_size - Ac.manager->num_interior_nodes());
cudaCheckError();
IVector temp_row_len(max_num_rows);
for (int i = 0; i < num_neighbors; i++)
{
//map column indices of halo matrices
int size = halo_rows[i].get_num_rows();
int num_blocks = min(4096, (size + 127) / 128);
map_col_indices_and_count_rowlen <<< num_blocks, 128, 128 * sizeof(INDEX_TYPE)>>>(
halo_rows[i].row_offsets.raw(),
halo_rows[i].col_indices.raw(),
temp_row_len.raw(),
global_to_coarse_local.raw(),
halo_mapping.raw(),
neighbor_rows_d.raw(),
Ac.manager->halo_ranges.raw(),
i,
Ac.manager->global_id(),
Ac.manager->base_index(),
Ac.manager->index_range(),
num_neighbors,
size);
for (int ring = 0; ring < rings; ring++)
{
amgx::thrust::copy(temp_row_len.begin() + halo_btl[i].B2L_rings[0][ring], temp_row_len.begin() + halo_btl[i].B2L_rings[0][ring + 1], Ac.row_offsets.begin() + Ac.manager->halo_offsets[ring * num_neighbors + i]);
}
}
cudaCheckError();
INDEX_TYPE old_nnz = Ac.row_offsets[Ac.row_offsets.size() - 1];
amgx::thrust::exclusive_scan(Ac.row_offsets.begin() + c_size, Ac.row_offsets.end(), Ac.row_offsets.begin() + c_size, owned_nnz);
cudaCheckError();
//
// Step 4 - consolidate column indices and values
//
int new_nnz = Ac.row_offsets[Ac.row_offsets.size() - 1];
Ac.col_indices.resize(new_nnz);
Ac.values.resize((new_nnz + 1 + diag * (Ac.row_offsets.size() - 2)) * A.get_block_size());
if (diag)
{
MVector diags(c_size * Ac.get_block_size());
amgx::thrust::copy(Ac.values.begin() + old_nnz * Ac.get_block_size(),
Ac.values.begin() + old_nnz * Ac.get_block_size() + c_size * Ac.get_block_size(),
diags.begin());
amgx::thrust::copy(diags.begin(), diags.begin() + c_size * Ac.get_block_size(),
Ac.values.begin() + Ac.row_offsets[Ac.get_num_rows()]*Ac.get_block_size());
cudaCheckError();
}
int cumulative_num_rows = c_size;
for (int i = 0; i < num_neighbors; i++)
{
for (int ring = 0; ring < rings; ring++)
{
int num_rows = halo_btl[i].B2L_rings[0][ring + 1] - halo_btl[i].B2L_rings[0][ring];
int num_blocks = min(4096, (num_rows + 127) / 128);
reorder_whole_matrix <<< num_blocks, 128>>>(halo_rows[i].row_offsets.raw() + halo_btl[i].B2L_rings[0][ring], halo_rows[i].col_indices.raw(), halo_rows[i].values.raw(), Ac.row_offsets.raw() + Ac.manager->halo_offsets[ring * num_neighbors + i], Ac.col_indices.raw(), Ac.values.raw(), Ac.get_block_size(), num_rows);
if (diag)
{
amgx::thrust::copy(halo_rows[i].values.begin() + (halo_rows[i].row_offsets[halo_rows[i].get_num_rows()] + halo_btl[i].B2L_rings[0][ring])*Ac.get_block_size(),
halo_rows[i].values.begin() + (halo_rows[i].row_offsets[halo_rows[i].get_num_rows()] + halo_btl[i].B2L_rings[0][ring + 1])*Ac.get_block_size(),
Ac.values.begin() + (Ac.row_offsets[Ac.get_num_rows()] + cumulative_num_rows)*Ac.get_block_size());
cumulative_num_rows += num_rows;
}
}
}
cudaCheckError();
Ac.set_num_cols(Ac.manager->halo_offsets[Ac.manager->halo_offsets.size() - 1]);
Ac.set_num_rows(Ac.get_num_cols());
Ac.set_num_nz(new_nnz);
Ac.delProps(COO);
Ac.set_initialized(1);
Ac.computeDiagonal();
}
}
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::prepareNextLevelMatrix_diag(const Matrix<TConfig> &A, Matrix<TConfig> &Ac)
{
if (A.is_matrix_singleGPU()) { return; }
int num_neighbors = A.manager->neighbors.size();
if (TConfig::memSpace == AMGX_host)
{
FatalError("Aggregation AMG Not implemented for host", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
int c_size = Ac.manager->halo_offsets[0];
int f_size = A.manager->halo_offsets[0];
int diag = Ac.hasProps(DIAG);
Ac.manager->inverse_renumbering.resize(c_size);
//get coarse -> fine renumbering
int num_blocks = min(4096, (c_size + 127) / 128);
coarse_to_global <<< num_blocks, 128>>>(this->m_aggregates.raw(), this->m_aggregates_fine_idx.raw(), Ac.manager->inverse_renumbering.raw(), f_size, -1 * A.manager->base_index());
cudaCheckError();
Ac.manager->set_num_halo_rows(Ac.manager->halo_offsets[Ac.manager->halo_offsets.size() - 1] - c_size);
if (!diag) { Ac.computeDiagonal(); }
Ac.set_initialized(1);
std::vector<MVector> diagonals(num_neighbors);
for (int i = 0; i < num_neighbors; i++)
{
int size = Ac.manager->B2L_rings[i][Ac.manager->B2L_rings.size() - 1];
diagonals[i].resize(Ac.get_block_size()*size);
int num_blocks = min(4096, (size + 127) / 128);
write_diagonals <<< num_blocks, 128>>>(Ac.values.raw(), Ac.diag.raw(), Ac.manager->B2L_maps[i].raw(), diagonals[i].raw(), Ac.get_block_size(), size);
}
cudaCheckError();
Ac.manager->getComms()->exchange_vectors(diagonals, Ac, this->tag * 100 + 10 + 2);
for (int i = 0; i < num_neighbors; i++)
{
int size = Ac.manager->halo_offsets[i + 1] - Ac.manager->halo_offsets[i];
if (Ac.hasProps(DIAG)) { amgx::thrust::copy(diagonals[i].begin(), diagonals[i].begin() + Ac.get_block_size()*size, Ac.values.begin() + Ac.get_block_size() * (Ac.diagOffset() + Ac.manager->halo_offsets[i])); }
else
{
int num_blocks = min(4096, (size + 127) / 128);
write_diagonals_back <<< num_blocks, 128>>>(Ac.values.raw(), Ac.diag.raw() + Ac.manager->halo_offsets[i], diagonals[i].raw(), Ac.get_block_size(), size);
}
}
cudaCheckError();
}
}
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::prepareNextLevelMatrix_none(const Matrix<TConfig> &A, Matrix<TConfig> &Ac)
{
if (A.is_matrix_singleGPU()) { return; }
int num_neighbors = A.manager->neighbors.size();
if (TConfig::memSpace == AMGX_host)
{
FatalError("Aggregation AMG Not implemented for host", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
int c_size = Ac.manager->halo_offsets[0];
int f_size = A.manager->halo_offsets[0];
int diag = Ac.hasProps(DIAG);
Ac.manager->inverse_renumbering.resize(c_size);
//get coarse -> fine renumbering
int num_blocks = min(4096, (c_size + 127) / 128);
coarse_to_global <<< num_blocks, 128>>>(this->m_aggregates.raw(), this->m_aggregates_fine_idx.raw(), Ac.manager->inverse_renumbering.raw(), f_size, 0);
cudaCheckError();
Ac.manager->set_num_halo_rows(Ac.manager->halo_offsets[Ac.manager->halo_offsets.size() - 1] - c_size);
Ac.set_initialized(1);
if (!diag) { Ac.computeDiagonal(); }
}
}
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::prepareNextLevelMatrix(const Matrix<TConfig> &A, Matrix<TConfig> &Ac)
{
if (m_matrix_halo_exchange == 0)
{
this->prepareNextLevelMatrix_none(A, Ac);
}
else if (m_matrix_halo_exchange == 1)
{
this->prepareNextLevelMatrix_diag(A, Ac);
}
else if (m_matrix_halo_exchange == 2)
{
this->prepareNextLevelMatrix_full(A, Ac);
}
else
{
FatalError("Invalid Aggregation matrix_halo_exchange parameter", AMGX_ERR_NOT_IMPLEMENTED);
}
}
__global__ void set_halo_rowlen(INDEX_TYPE *work, INDEX_TYPE *output, INDEX_TYPE size, INDEX_TYPE diag)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < size)
{
if (work[idx + 1] - work[idx] > 0)
{
output[idx] += work[idx + 1] - work[idx] - (1 - diag);
}
idx += blockDim.x * gridDim.x;
}
}
template <typename T>
__global__ void append_halo_nz(INDEX_TYPE *row_offsets, INDEX_TYPE *new_row_offsets, INDEX_TYPE *col_indices, INDEX_TYPE *new_col_indices, T *values, T *new_values, INDEX_TYPE size, INDEX_TYPE diag, INDEX_TYPE halo_offset, INDEX_TYPE block_size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < size)
{
int add_diag = !diag;
if (!diag && new_col_indices[new_row_offsets[idx]] != -1) { add_diag = 0; } //if diag or there is already soimething in the row, then don't add diagonal nonzero (inside diag)
int append_offset = -1;
for (int i = new_row_offsets[idx]; i < new_row_offsets[idx + 1]; i++)
{
if (new_col_indices[i] == -1) {append_offset = i; break;}
}
for (int i = row_offsets[idx]; i < row_offsets[idx + 1]; i++)
{
if (diag && i == row_offsets[idx]) //if outside diag and this is the first nonzero in a non-empty row, overwrite diagonal value
{
for (int j = 0; j < block_size; j++)
{
new_values[(new_row_offsets[size] + halo_offset + idx)*block_size + j] = values[(row_offsets[size] + halo_offset + idx) * block_size + j];
}
}
int col_idx = col_indices[i];
if (append_offset == -1 && (col_idx != halo_offset + idx)) {printf("ERROR: append offset is -1 but row has nonzeros in it old %d to %d new %d to %d\n", row_offsets[idx], row_offsets[idx + 1], new_row_offsets[idx], new_row_offsets[idx + 1]); append_offset = 0;}
if (col_idx != halo_offset + idx || add_diag)
{
new_col_indices[append_offset] = col_idx;
for (int j = 0; j < block_size; j++)
{
new_values[append_offset * block_size + j] = values[i * block_size + j];
}
append_offset++;
}
}
idx += blockDim.x * gridDim.x;
}
}
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::createCoarseB2LMaps(std::vector<IVector> &in_coarse_B2L_maps)
{
Matrix<TConfig> &A = this->getA();
m_num_all_aggregates = m_num_aggregates;
int num_neighbors = A.manager->neighbors.size();
IndexType max_b2l = 0;
for (int i = 0; i < num_neighbors; i++ ) { max_b2l = max_b2l > A.manager->B2L_rings[i][1] ? max_b2l : A.manager->B2L_rings[i][1]; }
IVector B2L_aggregates(max_b2l);
IVector indices(max_b2l);
for (int i = 0; i < num_neighbors; i++ )
{
int size = A.manager->B2L_rings[i][1];
amgx::thrust::fill(B2L_aggregates.begin(), B2L_aggregates.begin() + size, 0);
amgx::thrust::sequence(indices.begin(), indices.begin() + size);
//substitute coarse aggregate indices for fine boundary nodes
amgx::thrust::copy(amgx::thrust::make_permutation_iterator(this->m_aggregates.begin(), A.manager->B2L_maps[i].begin()),
amgx::thrust::make_permutation_iterator(this->m_aggregates.begin(), A.manager->B2L_maps[i].begin() + size),
B2L_aggregates.begin());
//find the unique ones
amgx::thrust::sort_by_key(B2L_aggregates.begin(), B2L_aggregates.begin() + size, indices.begin());
IndexType num_unique = amgx::thrust::unique_by_key(B2L_aggregates.begin(), B2L_aggregates.begin() + size, indices.begin()).first - B2L_aggregates.begin();
in_coarse_B2L_maps[i].resize(num_unique);
//sort it back so we have the original ordering
amgx::thrust::sort_by_key(indices.begin(), indices.begin() + num_unique, B2L_aggregates.begin());
amgx::thrust::copy(B2L_aggregates.begin(), B2L_aggregates.begin() + num_unique, in_coarse_B2L_maps[i].begin());
}
cudaCheckError();
}
__global__ void populate_coarse_boundary(INDEX_TYPE *flags, INDEX_TYPE *indices, INDEX_TYPE *maps, INDEX_TYPE *output, INDEX_TYPE size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < size)
{
output[flags[maps[indices[idx]]]] = maps[indices[idx]];
idx += blockDim.x * gridDim.x;
}
}
__global__ void flag_coarse_boundary(INDEX_TYPE *flags, INDEX_TYPE *indices, INDEX_TYPE *maps, INDEX_TYPE size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < size)
{
flags[maps[indices[idx]]] = 1;
idx += blockDim.x * gridDim.x;
}
}
__global__ void flag_halo_indices(INDEX_TYPE *flags, INDEX_TYPE *indices, INDEX_TYPE offset, INDEX_TYPE size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < size)
{
flags[indices[idx] - offset] = 1;
idx += blockDim.x * gridDim.x;
}
}
__global__ void apply_halo_aggregate_indices(INDEX_TYPE *flags, INDEX_TYPE *indices, INDEX_TYPE *output, INDEX_TYPE offset, INDEX_TYPE aggregates_offset, INDEX_TYPE size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
while (idx < size)
{
output[idx] = flags[indices[idx] - offset] + aggregates_offset;
idx += blockDim.x * gridDim.x;
}
}
// renumbering the aggregates/communicationg with neighbors
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::setNeighborAggregates()
{
Matrix<TConfig> &A = this->getA();
Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA();
m_num_all_aggregates = m_num_aggregates;
/* WARNING: the matrix reordering always happens inside createRenumbering routine. There are three ways to get to this routine
1. matrix_upload_all -> uploadMatrix -> initializeUploadReorderAll -> reorder_matrix -> createRenumbering
2. read_system_distributed -> renumberMatrixOneRing -> reorder_matrix_owned -> createRenumbering
3. solver_setup -> ... -> AMG_Level::setup -> createCoarseMatrices -> setNeighborAggregates -> createRenumbering
If you are reading the renumbering from file you might need to add intercept code in if statement below,
otherwise this routine will exit before calling createRenumbering routine (in case of single or disjoint partitions).
*/
if (this->getA().is_matrix_singleGPU()) { return; }
int num_neighbors = A.manager->neighbors.size();
//
// Step 0 - set up coarse matrix metadata
//
if (Ac.manager == NULL) { Ac.manager = new DistributedManager<T_Config>(); }
Ac.manager->resize(A.manager->neighbors.size(), 1);
Ac.manager->A = &Ac;
int f_size = A.get_num_rows();
Ac.manager->setComms(A.manager->getComms());
Ac.manager->set_global_id(A.manager->global_id());
Ac.manager->neighbors = A.manager->neighbors;
Ac.manager->set_base_index(A.manager->base_index());
Ac.manager->halo_ranges = A.manager->halo_ranges;
Ac.manager->set_index_range(A.manager->index_range());
//-------------------------------------- Section 1 - renumbering -----------------------------------------------------------
//
// Step 1 - calculate coarse level B2L maps - any aggregate that has a fine boundary node, becomes a coarse boundary node
//
m_num_all_aggregates = m_num_aggregates;
int vec_size = m_num_aggregates + 1; //A.manager->num_boundary_nodes()+1;
IVector B2L_aggregates(vec_size);
for (int i = 0; i < A.manager->neighbors.size(); i++)
{
amgx::thrust::fill(B2L_aggregates.begin(), B2L_aggregates.begin() + vec_size, 0);
int size = A.manager->B2L_rings[i][1];
int block_size = 128;
int grid_size = std::min( 4096, ( size + block_size - 1 ) / block_size);
flag_coarse_boundary <<< grid_size, block_size>>>(B2L_aggregates.raw(), A.manager->B2L_maps[i].raw(), this->m_aggregates.raw(), size);
amgx::thrust::exclusive_scan(B2L_aggregates.begin(), B2L_aggregates.begin() + vec_size, B2L_aggregates.begin());
(Ac.manager->B2L_maps)[i].resize(B2L_aggregates[vec_size - 1]);
populate_coarse_boundary <<< grid_size, block_size>>>(B2L_aggregates.raw(), A.manager->B2L_maps[i].raw(), this->m_aggregates.raw(), Ac.manager->B2L_maps[i].raw(), size);
}
cudaCheckError();
for (int i = 0; i < num_neighbors; i++)
{
Ac.manager->B2L_rings[i].resize(2);
Ac.manager->B2L_rings[i][0] = 0;
Ac.manager->B2L_rings[i][1] = Ac.manager->B2L_maps[i].size();
}
DistributedArranger<T_Config> *prep = new DistributedArranger<T_Config>;
prep->initialize_B2L_maps_offsets(Ac, 1);
delete prep;
Ac.set_num_rows(m_num_aggregates);
IVector renumbering(m_num_aggregates + 1); /* +1 is actually not needed, it will be resized in createRenumbering */
Ac.manager->createRenumbering(renumbering);
//
// Step 2 - renumber aggregates, so boundary nodes will have higher index than interior ones (based on the renumberiong we have been calculating)
//
/* WARNING: 1. Thrust scatter and gather routines seem more appropriate here, but they implicitly assume that the input
and output have certain size correlation, which is not matched by vectors in our case. The only remaining option
is to use make_permutation as is done below. Example of Thrust scatter and gather calls
IVector ttt(f_size,-1);
amgx::thrust::scatter(this->m_aggregates.begin(), this->m_aggregates.begin()+f_size, renumbering.begin(), ttt.begin());
amgx::thrust::gather(renumbering.begin(), renumbering.end(), this->m_aggregates.begin(), ttt.begin());
amgx::thrust::copy(ttt.begin(), ttt.end(), this->m_aggregates.begin());
2. The original thrust composite call is illegal because it uses the same array (m_aggregates) for input and output.
amgx::thrust::copy(amgx::thrust::make_permutation_iterator(renumbering.begin(), this->m_aggregates.begin()),
amgx::thrust::make_permutation_iterator(renumbering.begin(), this->m_aggregates.begin()+f_size),
this->m_aggregates.begin());
Although it somehow still works, it is much safer to use explicit temporary storage for the intermediate result.
*/
/* WARNING: must save unreordered aggregates for later use before reordering them. */
IVector unreordered_aggregates(this->m_aggregates);
/* WARNING: change Thrust call to explicitly use temporary storage for the intermediate result. The earlier version is illegal, but somehow still works. */
IVector ttt(f_size, -1);
amgx::thrust::copy(amgx::thrust::make_permutation_iterator(renumbering.begin(), this->m_aggregates.begin()),
amgx::thrust::make_permutation_iterator(renumbering.begin(), this->m_aggregates.begin() + f_size),
ttt.begin());
amgx::thrust::copy(ttt.begin(), ttt.end(), this->m_aggregates.begin());
cudaCheckError();
//we don't need renumbering anymore, it will be identity on the coarse level
//-------------------------------------- Section 2 - communication -----------------------------------------------------------
//
// Step 3 - populate aggregates_fine_idx, which stores for every fine node the original global index of the aggregate (which is lowest global index of nodes aggregated together)
//
//
// These are different when we do /don't do matrix halo exchanges - when we do we need global indices to match nodes,
// and in this case Ac after computeA will not have the same ordering of halo nodes as after prepareNextLevel_full.
// However when we do not do matrix halo exchange we are only interested in the ordering of halo nodes on the coarse level,
// and we can get that by exchanging the (already renumbered) aggregates vector.
//
if (m_matrix_halo_exchange == 2)
{
//Find original global indices of nodes that have the minimum id in the aggregates.
amgx::thrust::copy(amgx::thrust::make_permutation_iterator(A.manager->inverse_renumbering.begin(), this->m_aggregates_fine_idx.begin()),
amgx::thrust::make_permutation_iterator(A.manager->inverse_renumbering.begin(), this->m_aggregates_fine_idx.begin() + f_size),
this->m_aggregates_fine_idx.begin());
amgx::thrust::transform(this->m_aggregates_fine_idx.begin(),
this->m_aggregates_fine_idx.begin() + f_size,
amgx::thrust::constant_iterator<IndexType>(A.manager->base_index()),
this->m_aggregates_fine_idx.begin(),
amgx::thrust::plus<IndexType>());
//communicate
this->m_aggregates_fine_idx.set_block_dimx(1);
this->m_aggregates_fine_idx.set_block_dimy(1);
m_aggregates_fine_idx.dirtybit = 1;
A.manager->exchange_halo(m_aggregates_fine_idx, this->tag * 100 + 1 * 10 + 0);
}
else
{
//communicate
this->m_aggregates.set_block_dimx(1);
this->m_aggregates.set_block_dimy(1);
m_aggregates.dirtybit = 1;
/* WARNING: you should exchange unreordered aggregates, and append them to your own reordered aggregates, to conform to asusmptions done by distributed_mamanger. */
//A.manager->exchange_halo(m_aggregates, this->tag*100+1*10+0); //wrong
A.manager->exchange_halo(unreordered_aggregates, this->tag * 100 + 1 * 10 + 0);
amgx::thrust::copy(unreordered_aggregates.begin() + f_size, unreordered_aggregates.end(), this->m_aggregates.begin() + f_size);
}
cudaCheckError();
//
// Step 4 - consolidate neighbors' aggregates into own list to be able to perform Galerkin product with the n-ring halo
//
IVector &exchanged_aggregates = m_matrix_halo_exchange == 2 ? this->m_aggregates_fine_idx : this->m_aggregates;
int min_index = amgx::thrust::reduce(exchanged_aggregates.begin() + A.manager->halo_offsets[0], exchanged_aggregates.begin() + A.manager->halo_offsets[num_neighbors], (int)0xFFFFFFF, amgx::thrust::minimum<int>());
int max_index = amgx::thrust::reduce(exchanged_aggregates.begin() + A.manager->halo_offsets[0], exchanged_aggregates.begin() + A.manager->halo_offsets[num_neighbors], (int)0, amgx::thrust::maximum<int>());
cudaCheckError();
int s_size = max_index - min_index + 2;
IVector scratch(s_size);
for (int i = 0; i < num_neighbors; i++)
{
int size = A.manager->halo_offsets[i + 1] - A.manager->halo_offsets[i];
//Could also use local minimums to perform the same operation. The results are the same.
//int min_local = amgx::thrust::reduce(exchanged_aggregates.begin()+A.manager->halo_offsets[i], exchanged_aggregates.begin()+A.manager->halo_offsets[i+1], (int)0xFFFFFFF, amgx::thrust::minimum<int>());
amgx::thrust::fill(scratch.begin(), scratch.begin() + s_size, 0);
int block_size = 128;
int grid_size = std::min( 4096, ( size + block_size - 1 ) / block_size);
flag_halo_indices <<< grid_size, block_size>>>(scratch.raw(), exchanged_aggregates.raw() + A.manager->halo_offsets[i], min_index /*min_local*/, size);
amgx::thrust::exclusive_scan(scratch.begin(), scratch.begin() + s_size, scratch.begin());
apply_halo_aggregate_indices <<< grid_size, block_size>>>(scratch.raw(), exchanged_aggregates.raw() + A.manager->halo_offsets[i], this->m_aggregates.raw() + A.manager->halo_offsets[i], min_index /*min_local*/, m_num_all_aggregates, size);
Ac.manager->halo_offsets[i] = m_num_all_aggregates;
m_num_all_aggregates += scratch[s_size - 1];
}
cudaCheckError();
Ac.manager->halo_offsets[num_neighbors] = m_num_all_aggregates;
}
//TODO: The consolidate and unconsolidate parts could be made more efficient by only sending the
// nonzero values
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::consolidateVector(VVector &x)
{
int my_id = this->getA().manager->global_id();
if (this->getA().manager->isRootPartition())
{
// Here all partitions being consolidated should have same vector size, see TODO above
INDEX_TYPE num_parts = this->getA().manager->getNumPartsToConsolidate();
for (int i = 0; i < num_parts; i++)
{
int current_part = this->getA().manager->getPartsToConsolidate()[i];
// Vector has been set to correct size
if (current_part != my_id)
{
//printf("Root partition %d receiving %d -> %d and %d -> %d (total %d)\n", this->getA().manager->global_id(), this->getA().manager->getConsolidationArrayOffsets()[i], this->getA().manager->getConsolidationArrayOffsets()[i+1], this->getA().manager->getConsolidationArrayOffsets()[num_parts+i], this->getA().manager->getConsolidationArrayOffsets()[num_parts+i+1], (int)x.size()/x.get_block_size());
this->getA().manager->getComms()->recv_vector(x, current_part, 10000 + current_part, x.get_block_size()*this->getA().manager->getConsolidationArrayOffsets()[i], x.get_block_size() * (this->getA().manager->getConsolidationArrayOffsets()[i + 1] - this->getA().manager->getConsolidationArrayOffsets()[i]));
this->getA().manager->getComms()->recv_vector(x, current_part, 20000 + current_part, x.get_block_size()*this->getA().manager->getConsolidationArrayOffsets()[num_parts + i], x.get_block_size() * (this->getA().manager->getConsolidationArrayOffsets()[num_parts + i + 1] - this->getA().manager->getConsolidationArrayOffsets()[num_parts + i]));
}
}
}
else
{
int my_destination_part = this->getA().manager->getMyDestinationPartition();
int i_off, i_size, b_off, b_size;
this->getA().manager->getConsolidationOffsets(&i_off, &i_size, &b_off, &b_size);
// Here all partitions being consolidated should have same vector size, see TODO above
this->getA().manager->getComms()->send_vector_async(x, my_destination_part, 10000 + my_id, i_off * x.get_block_size(), i_size * x.get_block_size());
this->getA().manager->getComms()->send_vector_async(x, my_destination_part, 20000 + my_id, b_off * x.get_block_size(), b_size * x.get_block_size());
}
}
//TODO: The consolidate and unconsolidate parts could be made more efficient by only sending the
// nonzero values
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::unconsolidateVector(VVector &x)
{
if (this->getA().manager->isRootPartition())
{
INDEX_TYPE num_parts = this->getA().manager->getNumPartsToConsolidate();
for (int i = 0; i < num_parts; i++)
{
int current_part = this->getA().manager->getPartsToConsolidate()[i];
// Vector has been set to correct size
if (current_part != this->getA().manager->global_id())
{
this->getA().manager->getComms()->send_vector_async(x, current_part, 30000 + current_part, x.get_block_size()*this->getA().manager->getConsolidationArrayOffsets()[i], x.get_block_size() * (this->getA().manager->getConsolidationArrayOffsets()[i + 1] - this->getA().manager->getConsolidationArrayOffsets()[i]));
this->getA().manager->getComms()->send_vector_async(x, current_part, 40000 + current_part, x.get_block_size()*this->getA().manager->getConsolidationArrayOffsets()[num_parts + i], x.get_block_size() * (this->getA().manager->getConsolidationArrayOffsets()[num_parts + i + 1] - this->getA().manager->getConsolidationArrayOffsets()[num_parts + i]));
}
}
}
else
{
int my_destination_part = this->getA().manager->getMyDestinationPartition();
// Vector x is of unknown size
int i_off, i_size, b_off, b_size;
this->getA().manager->getConsolidationOffsets(&i_off, &i_size, &b_off, &b_size);
this->getA().manager->getComms()->recv_vector(x, my_destination_part, 30000 + this->getA().manager->global_id(), i_off * x.get_block_size(), i_size * x.get_block_size());
this->getA().manager->getComms()->recv_vector(x, my_destination_part, 40000 + this->getA().manager->global_id(), b_off * x.get_block_size(), b_size * x.get_block_size());
}
}
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::createCoarseVertices()
{
//Set the aggregates
this->m_selector->setAggregates(this->getA(), this->m_aggregates, this->m_aggregates_fine_idx, this->m_num_aggregates);
if ( this->m_print_aggregation_info )
{
this->m_selector->printAggregationInfo( this->m_aggregates, this->m_aggregates_fine_idx, this->m_num_aggregates );
}
this->getA().template setParameter< int > ("aggregates_num", this->m_num_aggregates); // ptr to aaggregates
}
// Creating the next level
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::createCoarseMatrices()
{
Matrix<TConfig> &A = this->getA();
Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA();
/* WARNING: do not recompute prolongation (P) and restriction (R) when you
are reusing the level structure (structure_reuse_levels > 0).
Notice that in aggregation path, prolongation P is implicit,
and is used through the aggregates array. */
bool const consolidation_level = !A.is_matrix_singleGPU() && this->isConsolidationLevel();
// bookkeeping for the coarse grid: renumber aggregates,
// if consolidation compute consolidated halo-offsets, etc
if (!this->isReuseLevel())
{
if (consolidation_level)
{
// Consolidation-path steps 1-9
this->consolidationBookKeeping();
}
else
{
this->setNeighborAggregates();
}
}
this->getA().setView(ALL);
// Compute restriction operator
// TODO: computing the restriction operator could be merged with the selector to save some work
// If we reuse the level we keep the previous restriction operator
if (this->isReuseLevel() == false)
{
computeRestrictionOperator();
}
Ac.set_initialized(0);
Ac.copyAuxData(&A);
this->m_coarseAGenerator->computeAOperator(A, Ac, this->m_aggregates, this->m_R_row_offsets, this->m_R_column_indices, this->m_num_all_aggregates);
Ac.setColsReorderedByColor(false);
Ac.setView(FULL);
if (consolidation_level)
{
// Consolidation-path Steps 11-12, send matrices to root, consolidate, final bookkeeping
this->consolidateCoarseGridMatrix();
}
else
{
this->prepareNextLevelMatrix(A, Ac);
}
A.setView(OWNED);
Ac.setView(OWNED);
this->m_next_level_size = this->m_num_all_aggregates * Ac.get_block_dimy();
if (this->m_print_aggregation_info)
{
MatrixAnalysis<TConfig> ana(&Ac);
ana.aggregatesQuality2(this->m_aggregates, this->m_num_aggregates, A);
}
}
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::consolidationBookKeeping()
{
Matrix<TConfig> &A = this->getA();
Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA();
int num_parts, num_fine_neighbors, my_id;
if (!A.is_matrix_singleGPU())
{
num_parts = A.manager->getComms()->get_num_partitions();
num_fine_neighbors = A.manager->neighbors.size();
my_id = A.manager->global_id();
}
else
{
num_parts = 1;
num_fine_neighbors = 0;
my_id = 0;
}
// ----------------------------------------------------
// Consolidate multiple fine matrices into one coarse matrix
// ----------------------------------------------------
// ----------------
// Step 1
// Decide which partitions should be merged together, store in destination_partitions vector
// ---------------
IVector_h &destination_part = A.manager->getDestinationPartitions();
int my_destination_part = A.manager->getMyDestinationPartition();
if (my_destination_part >= num_parts)
{
FatalError("During consolidation, sending data to partition that doesn't exist", AMGX_ERR_NOT_IMPLEMENTED);
}
// Create mapping from coarse partition indices (ranks on the coarse consolidated level) to partition indices on the fine level (ranks on the fine level)
IVector_h coarse_part_to_fine_part = destination_part;
thrust::sort(coarse_part_to_fine_part.begin(), coarse_part_to_fine_part.end());
cudaCheckError();
coarse_part_to_fine_part.erase(thrust::unique(coarse_part_to_fine_part.begin(), coarse_part_to_fine_part.end()), coarse_part_to_fine_part.end());
cudaCheckError();
//Then, the number of coarse partitions is simply the size of this vector
int num_coarse_partitions = coarse_part_to_fine_part.size();
// Create mapping from fine partition indices to coarse partition indices, with fine partitions that are merging together having the same coarse indices
IVector_h fine_part_to_coarse_part(num_parts);
thrust::lower_bound(coarse_part_to_fine_part.begin(), coarse_part_to_fine_part.end(), destination_part.begin(), destination_part.end(), fine_part_to_coarse_part.begin());
cudaCheckError();
// Create mapping from this specific partition's neighbors to consolidated coarse neighbors, but using their fine index (aka. destination partition indices for my neighbors)
IVector_h fine_neigh_to_fine_part;
A.manager->createNeighToDestPartMap(fine_neigh_to_fine_part, A.manager->neighbors, destination_part, num_fine_neighbors);
// Create mapping from consolidated coarse neighbors to fine partition indices (even if the current partition is not going to be a root)
IVector_h coarse_neigh_to_fine_part;
int num_coarse_neighbors;
A.manager->createConsolidatedNeighToPartMap(coarse_neigh_to_fine_part, fine_neigh_to_fine_part, my_destination_part, destination_part, num_coarse_neighbors);
// Create mapping from fine neighbors to coarse neighbors, with fine neighbors this partition is merging with labeled with -1
IVector_h fine_neigh_to_coarse_neigh;
A.manager->createNeighToConsNeigh(fine_neigh_to_coarse_neigh, coarse_neigh_to_fine_part, fine_neigh_to_fine_part, my_destination_part, num_fine_neighbors);
/*
EXAMPLE
Take the following partition graph (that describes connections between partitions, vertices are the partitions themselves), this is the same graph that is used in the setup example
number of partitions num_parts=12
CSR row_offsets [0 4 8 13 21 25 32 36 41 46 50 57 61]
CSR col_indices [0 1 3 8
0 1 2 3
1 2 3 4 5
0 1 2 3 4 5 8 10
2 4 5 6
2 3 4 5 6 7 10
4 5 6 7
5 6 7 9 10
0 3 8 10 11
7 9 10 11
3 5 7 8 9 10 11
8 9 10 11]
destination_part = [0 0 0 0 4 4 4 4 8 8 8 8]
coarse_part_to_fine_part = [0 4 8] num_coarse_partitions = 3
fine_part_to_coarse_part = [0 0 0 0 1 1 1 1 2 2 2 2]
original neighbor lists correspond to the rows of the matrix, minus the diagonal elements: (part 0)[1 3 8] (part 3)[0 1 2 4 5 8 10] (part 10)[3 5 7 8 9 11]
fine_neigh_to_fine_part (part 0)[0 0 2] (part 3)[0 0 0 0 1 2 2] (part 10)[0 1 1 2 2 2]
coarse_neigh_to_fine_part (part 0)[8] (part 3)[4 8] (part 10)[0 4]
fine_neigh_to_coarse_neigh (part 0)[-1 -1 0] (part 3)[-1 -1 -1 0 0 1 1] (part 10)[0 1 1 -1 -1 -1]
*/
// --------------------------
// Step 2
// Create coarse B2L_maps, by mapping fine B2L maps to coarse indices using this->m_aggregates and eliminating duplicates
// --------------------------
std::vector<IVector> coarse_B2L_maps(num_fine_neighbors);
m_num_all_aggregates = m_num_aggregates;
int num_neighbors_temp = A.manager->neighbors.size();
int num_rings = A.manager->B2L_rings[0].size() - 1;
if (num_rings != 1)
{
FatalError("num_rings > 1 not supported in consolidation\n", AMGX_ERR_NOT_IMPLEMENTED);
}
IndexType max_b2l = 0;
for (int i = 0; i < num_neighbors_temp; i++ ) { max_b2l = max_b2l > A.manager->B2L_rings[i][1] ? max_b2l : A.manager->B2L_rings[i][1]; }
IVector B2L_aggregates(max_b2l);
IVector indices(max_b2l);
//TODO: use the algorithm from setNeighborAggregates()
for (int i = 0; i < num_neighbors_temp; i++ )
{
int size = A.manager->B2L_rings[i][1];
thrust::fill(B2L_aggregates.begin(), B2L_aggregates.begin() + size, 0);
thrust::sequence(indices.begin(), indices.begin() + size);
//substitute coarse aggregate indices for fine boundary nodes
thrust::copy(thrust::make_permutation_iterator(this->m_aggregates.begin(), A.manager->B2L_maps[i].begin()),
thrust::make_permutation_iterator(this->m_aggregates.begin(), A.manager->B2L_maps[i].begin() + size),
B2L_aggregates.begin());
//find the unique ones
thrust::sort_by_key(B2L_aggregates.begin(), B2L_aggregates.begin() + size, indices.begin());
IndexType num_unique = thrust::unique_by_key(B2L_aggregates.begin(), B2L_aggregates.begin() + size, indices.begin()).first - B2L_aggregates.begin();
coarse_B2L_maps[i].resize(num_unique);
//sort it back so we have the original ordering
thrust::sort_by_key(indices.begin(), indices.begin() + num_unique, B2L_aggregates.begin());
thrust::copy(B2L_aggregates.begin(), B2L_aggregates.begin() + num_unique, coarse_B2L_maps[i].begin());
}
cudaCheckError();
/*
* EXAMPLE
say, partition 3 has the following coarse B2L_maps:
neighbors [0 1 2 4 5 8 10]
B2L_maps[0(=0)] = [6 7 8]
B2L_maps[1(=1)] = [8 9 10]
B2L_maps[2(=2)] = [10 11 12 13]
B2L_maps[3(=4)] = [13 14 15]
B2L_maps[4(=5)] = [15 16 17]
B2L_maps[5(=8)] = [6 18 19]
B2L_maps[6(=10)] = [17 20 19]
*/
// ---------------------------------------------------
// Step 3
// create new B2L maps for each merged destination neighbor and drop B2L maps to neighbors we are merging with
// ---------------------------------------------------
std::vector<IVector> dest_coarse_B2L_maps;
A.manager->consolidateB2Lmaps(dest_coarse_B2L_maps, coarse_B2L_maps, fine_neigh_to_coarse_neigh, num_coarse_neighbors, num_fine_neighbors);
/*
* EXAMPLE
Then, merging the coarse B2L maps on partition 3, we get:
coarse_neigh_to_fine_part [4 8]
dest_coarse_B2L_maps[0(=4)] = [13 14 15 16 17]
dest_coarse_B2L_maps[1(=8)] = [6 17 18 19 20]
*/
// -----------------------
// Step 4
// Create interior-boundary renumbering of aggregates according to dest_coarse_B2L_maps
// -----------------------
// Now renumber the aggregates with all interior aggregates first, boundary aggregates second
int num_interior_aggregates; //returned by createAggregatesRenumbering
int num_boundary_aggregates; //returned by createAggregatesRenumbering
IVector renumbering; //returned by createAggregatesRenumbering
// Following calls create renumbering array and modifies B2L_maps
A.manager->createAggregatesRenumbering(renumbering, dest_coarse_B2L_maps, this->m_num_aggregates, num_coarse_neighbors, num_interior_aggregates, num_boundary_aggregates, num_rings);
/*
* EXAMPLE
Partition 3 will get a renumbering vector of size 21, for the 21 owned agggregates:
[0 1 2 3 4 5 17 6 7 8 9 10 11 12 13 14 15 16 18 19 20]
num_interior_aggregates = 12
num_boundary_aggregates = 9
*/
// -------------------------------------------------
// Step 5
// Determine whether root partition, make list of partitions merged into one
// ------------------------------------------------
// Check if I'm root partition and how fine partitions (including myself) are merging into me
// bool is_root_partition = false;
bool &is_root_partition = this->m_is_root_partition;
is_root_partition = false;
int num_fine_parts_to_consolidate = 0;
// IVector_h fine_parts_to_consolidate;
IVector_h &fine_parts_to_consolidate = this->m_fine_parts_to_consolidate;
for (int i = 0; i < num_parts; i++)
{
if (destination_part[i] == my_id)
{
is_root_partition = true;
num_fine_parts_to_consolidate++;
}
}
fine_parts_to_consolidate.resize(num_fine_parts_to_consolidate);
int count = 0;
for (int i = 0; i < num_parts; i++)
{
if (destination_part[i] == my_id)
{
fine_parts_to_consolidate[count] = i;
count++;
}
}
//save this information as state, as this will also be required during solve for restriction/prolongation
A.manager->setIsRootPartition(is_root_partition);
A.manager->setNumPartsToConsolidate(num_fine_parts_to_consolidate);
A.manager->setPartsToConsolidate(fine_parts_to_consolidate);
// Create a new distributed communicator for coarse levels that only contains active partitions
if (Ac.manager == NULL)
{
Ac.manager = new DistributedManager<TConfig>();
}
Ac.manager->setComms(A.manager->getComms()->Clone());
Ac.manager->getComms()->createSubComm(coarse_part_to_fine_part, is_root_partition);
/*
* EXAMPLE
isRootPartition is true for partitions 0,4,8 false for others
num_fine_parts_to_consolidate = 4 for partitions 0,4,8
fine_parts_to_consolidate (part 0)[0 1 2 3] (part 4)[4 5 6 7] (part 8)[8 9 10 11]
*/
// ----------------------
// Step 6
// Compute number of interior, boundary and total nodes in the consolidated coarse matrix. Create offsets so that partitions being merged together will have their aggregate indices ordered like this:
// [num_interior(fine_parts_to_consolidate[0]] num_interior(fine_parts_to_consolidate[1]] ... num_interior(fine_parts_to_consolidate[num_fine_parts_to_consolidate]
// num_boundary(fine_parts_to_consolidate[0]] num_boundary(fine_parts_to_consolidate[1]] ... num_boundary(fine_parts_to_consolidate[num_fine_parts_to_consolidate] ]
// ----------------------
// Gather to get number of interior/boundary aggregates of neighbors I will merge with
// std::vector<IVector_h> vertex_counts;
std::vector<IVector_h> &vertex_counts = this->m_vertex_counts;
// int interior_offset, boundary_offset, total_interior_rows_in_merged, total_boundary_rows_in_merged;
int interior_offset, boundary_offset;
int &total_interior_rows_in_merged = this->m_total_interior_rows_in_merged;
int &total_boundary_rows_in_merged = this->m_total_boundary_rows_in_merged;
int total_rows_in_merged;
//Computes these offsets on the root, sends them back
A.manager->computeConsolidatedOffsets(my_id, my_destination_part, is_root_partition, num_interior_aggregates, num_boundary_aggregates, vertex_counts, fine_parts_to_consolidate, num_fine_parts_to_consolidate, interior_offset, boundary_offset, total_interior_rows_in_merged, total_boundary_rows_in_merged, total_rows_in_merged, A.manager->getComms());
//Partitions save these offsets, as it will be required during solve restriction/prolongation
A.manager->setConsolidationOffsets(interior_offset, num_interior_aggregates, boundary_offset + num_interior_aggregates, num_boundary_aggregates);
/*
* EXAMPLE
For root partition 0, say we have the following interior/boundary counts (note that partition 1 has 0 boundary, as it is only connected to partitions it is merging with)
part 0 - interior: 10 boundary 3
part 1 - interior: 18
part 2 - interior: 10 boundary 16
part 3 - interior: 12 boundary 9
interior_offset for partitions 0,1,2,3: 0 10 28 38 (total_interior_rows_in_merged 50)
boundary_offset for partitions 0,1,2,3: 0 3 3 19 (total_boundary_rows_in_merged 28)
*/
// ----------------------
// Step 7
// Each partition renumbers its aggregates and dest_coarse_B2L_maps using offsets computed in Step 6 and permutation in Step 4
// ----------------------
// Kernel to renumber the aggregates
int block_size = 128;
int grid_size = std::min( 4096, ( A.manager->halo_offsets[0] + block_size - 1 ) / block_size);
renumberAggregatesKernel <<< grid_size, block_size >>>(renumbering.raw(), interior_offset, boundary_offset, this->m_aggregates.raw(), A.manager->halo_offsets[0], num_interior_aggregates, renumbering.size());
cudaCheckError();
for (int i = 0; i < num_coarse_neighbors; i++)
{
thrust::transform(dest_coarse_B2L_maps[i].begin(),
dest_coarse_B2L_maps[i].end(),
thrust::constant_iterator<IndexType>(boundary_offset),
dest_coarse_B2L_maps[i].begin(),
thrust::plus<IndexType>());
}
cudaCheckError();
/*
* EXAMPLE
Partition 3 had a renumbering vector:
[0 1 2 3 4 5 17 6 7 8 9 10 11 12 13 14 15 16 18 19 20]
which is now adjusted to account for the consolidated coarse matrices' indices:
[38 39 40 41 42 43 74 44 45 46 47 48 49 69 70 71 72 73 75 76 77]
And the dest_coarse_B2L_maps, which looked like:
dest_coarse_B2L_maps[0(=4)] = [13 14 15 16 17]
dest_coarse_B2L_maps[1(=8)] = [6 17 18 19 20]
is now:
dest_coarse_B2L_maps[0(=4)] = [69 70 71 72 73]
dest_coarse_B2L_maps[1(=8)] = [74 73 75 76 77]
*/
// -------------------------------------------------
// Step 8
// Send dest_coarse_B2L_maps to root partitions
// ------------------------------------------------
// Each fine partition sends to its root the number of coarse neighbors it has, their ids, and the number of boundary nodes for each coarse neighbor
IVector_h num_bdy_per_coarse_neigh(num_coarse_neighbors);
for (int i = 0; i < num_coarse_neighbors; i++)
{
num_bdy_per_coarse_neigh[i] = dest_coarse_B2L_maps[i].size();
}
IVector_h consolidated_coarse_neigh_to_fine_part; //consolidated list of coarse neighbors for the root partition, using fine partition indices
int num_consolidated_neighbors = 0;
// std::vector<IVector> consolidated_B2L_maps; //concatenates dest_coarse_B2L_maps received from partitions that are merging into the same root and pointing to the same destination coarse neighbor
std::vector<IVector> &consolidated_B2L_maps = this->m_consolidated_B2L_maps;
A.manager->consolidateB2LmapsOnRoot(num_consolidated_neighbors, consolidated_B2L_maps, consolidated_coarse_neigh_to_fine_part, dest_coarse_B2L_maps, coarse_neigh_to_fine_part, num_bdy_per_coarse_neigh, fine_parts_to_consolidate, num_fine_parts_to_consolidate, my_id, my_destination_part, is_root_partition, num_coarse_neighbors, A.manager->getComms());
//
// Step 9 - figuring out halo aggregate IDs
//
//Now we need to update halo aggregate IDs - this is just a halo exchange on this->m_aggregates between partitions
//that are being merged together, but we need to send other halos to the root to come up with the halo renumbering
//TODO: separate transactions, send "real halo" to the root nodes (coarse neighbors) immediately
//Step 9.1: takes care of synchronizing the aggregate IDs between partitions we are merging together and got consistent halo aggregate IDs for neighbor we are not merging with (which are going to be sent to the root in 9.2)
A.manager->exchange_halo(this->m_aggregates, 6666);
/*
* EXAMPLE 2
This example is independent from the previous ones.
Say partition 0 and 1 are merging (into 0) partition 0 is neighbors with 1,2,3 and partition 1 is neighbors with 0,3,4
Partitions 3 and 4 are merging (into partition 3) and partition 2 is not merging with anyone.
This example details the renumbering of halo indices on partition 0 and partition 1.
After the exchange halo, we have:
this->m_aggregates on partition 0:
[(fine interior nodes) (fine boundary nodes) (fine halo from part 1) (fine halo from part 2) (fine halo from part 3)]
[(fine interior nodes) (fine boundary nodes) (13 13 15) (12 15 17) (14 16 18)]
aggregates on partition 1:
[(fine interior nodes) (fine boundary nodes) (fine halo from part 0) (fine halo from part 3) (fine halo from part 4)]
[(fine interior nodes) (fine boundary nodes) (14 16 17) (18 19 19) (15 15 17)]
indices in (fine halo from part 0) and (fine halo from part 1) actually contain interior aggregate indices (if they are not connected to partitions 2,3 or 4), because the boundary is disappearing there.
Indices in halo regions contain remote-local indices.
This example is used throughout consolidateAndRenumberHalos
*/
//Step 9.2 - 9.5
// IVector_h halo_offsets(num_consolidated_neighbors + 1, 0);
IVector_h &halo_offsets = this->m_consolidated_halo_offsets;
halo_offsets = IVector_h(num_consolidated_neighbors + 1, 0);
A.manager->consolidateAndRenumberHalos(this->m_aggregates, A.manager->halo_offsets, halo_offsets, A.manager->neighbors, num_fine_neighbors, consolidated_coarse_neigh_to_fine_part, num_consolidated_neighbors, destination_part, my_destination_part, is_root_partition, fine_parts_to_consolidate, num_fine_parts_to_consolidate, num_parts, my_id, total_rows_in_merged, this->m_num_all_aggregates, A.manager->getComms());
if (is_root_partition)
{
for (int i = 0; i < consolidated_B2L_maps.size(); i++)
{
thrust::sort(consolidated_B2L_maps[i].begin(), consolidated_B2L_maps[i].end());
}
this->m_consolidated_neighbors.resize(num_consolidated_neighbors);
for (int i = 0; i < num_consolidated_neighbors; i++)
{
this->m_consolidated_neighbors[i] = fine_part_to_coarse_part[consolidated_coarse_neigh_to_fine_part[i]];
}
cudaCheckError();
}
}
template <class T_Config>
void Aggregation_AMG_Level_Base<T_Config>::consolidateCoarseGridMatrix()
{
Matrix<TConfig> &A = this->getA();
Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA();
int num_parts, num_fine_neighbors, my_id;
num_parts = A.manager->getComms()->get_num_partitions();
num_fine_neighbors = A.manager->neighbors.size();
my_id = A.manager->global_id();
IVector_h &destination_part = A.manager->getDestinationPartitions();
int my_destination_part = A.manager->getMyDestinationPartition();
// bookkeeping stored in AMG_Level_Base
std::vector<IVector_h> &vertex_counts = this->m_vertex_counts;
IVector_h &fine_parts_to_consolidate = this->m_fine_parts_to_consolidate;
// bookkeeping stored in either AMG_Level_Base or Acs' DistributedManager
IVector_h &halo_offsets = this->isReuseLevel() ? Ac.manager->getHaloOffsets() : this->m_consolidated_halo_offsets;
std::vector<IVector> &consolidated_B2L_maps = this->isReuseLevel() ? Ac.manager->getB2Lmaps() : this->m_consolidated_B2L_maps;
int num_consolidated_neighbors = this->isRootPartition() ? this->m_consolidated_neighbors.size() : 0;
if (!this->isRootPartition())
{
A.manager->getComms()->send_vector_async(Ac.row_offsets, my_destination_part, 1111);
A.manager->getComms()->send_vector_async(Ac.col_indices, my_destination_part, 1112);
A.manager->getComms()->send_vector_async(Ac.values, my_destination_part, 1113);
}
else
{
int num_fine_parts_to_consolidate = fine_parts_to_consolidate.size();
int total_num_rows = this->m_num_all_aggregates;
IVector new_row_offsets(total_num_rows + 1, 0);
//if diags are inside then we won't be counting those twice when computing halo row length
if (!Ac.hasProps(DIAG))
{
thrust::fill(new_row_offsets.begin() + halo_offsets[0], new_row_offsets.begin() + halo_offsets[num_consolidated_neighbors], 1);
cudaCheckError();
}
std::vector<IVector> recv_row_offsets(num_fine_parts_to_consolidate);
std::vector<VecInt_t> num_nz(num_fine_parts_to_consolidate);
IVector *work_row_offsets;
std::vector<VecInt_t> index_offset_array(2 * num_fine_parts_to_consolidate + 1);
int interior_offset = 0;
int boundary_offset = 0;
for (int i = 0; i < num_fine_parts_to_consolidate; i++)
{
boundary_offset += vertex_counts[i][0];
}
int max_num_nz = 0;
for (int i = 0; i < num_fine_parts_to_consolidate; i++)
{
int current_part = fine_parts_to_consolidate[i];
//receive row offsets
if (current_part != my_id)
{
recv_row_offsets[i].resize(total_num_rows + 1);
A.manager->getComms()->recv_vector(recv_row_offsets[i], current_part, 1111);
work_row_offsets = &(recv_row_offsets[i]);
num_nz[i] = (*work_row_offsets)[work_row_offsets->size() - 1];
max_num_nz = max_num_nz > num_nz[i] ? max_num_nz : num_nz[i];
}
else
{
work_row_offsets = &(Ac.row_offsets);
num_nz[i] = Ac.get_num_nz();
}
//Get interior row length
thrust::transform(work_row_offsets->begin() + interior_offset + 1,
work_row_offsets->begin() + interior_offset + vertex_counts[i][0] + 1,
work_row_offsets->begin() + interior_offset,
new_row_offsets.begin() + interior_offset,
thrust::minus<IndexType>());
cudaCheckError();
//Get boundary row length
thrust::transform(work_row_offsets->begin() + boundary_offset + 1,
work_row_offsets->begin() + boundary_offset + vertex_counts[i][1] + 1,
work_row_offsets->begin() + boundary_offset,
new_row_offsets.begin() + boundary_offset,
thrust::minus<IndexType>());
cudaCheckError();
//Increment halo row length by one for every nonzero that is an edge from the halo into this partition
int size = halo_offsets[num_consolidated_neighbors] - halo_offsets[0];
const int block_size = 128;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (size - 1) / block_size + 1);
set_halo_rowlen <<< num_blocks, block_size>>>(work_row_offsets->raw() + halo_offsets[0], new_row_offsets.raw() + halo_offsets[0], size, Ac.hasProps(DIAG));
cudaCheckError();
index_offset_array[i] = interior_offset;
index_offset_array[num_fine_parts_to_consolidate + i] = boundary_offset;
interior_offset += vertex_counts[i][0];
boundary_offset += vertex_counts[i][1];
index_offset_array[i + 1] = interior_offset;
index_offset_array[num_fine_parts_to_consolidate + i + 1] = boundary_offset;
}
A.manager->setConsolidationArrayOffsets(index_offset_array);
//Exclusive scan row length array to get row offsets
thrust::exclusive_scan(new_row_offsets.begin(), new_row_offsets.end(), new_row_offsets.begin());
cudaCheckError();
//Prepare to receive column indices and values
int num_nz_consolidated = new_row_offsets[new_row_offsets.size() - 1];
IVector recv_col_indices(max_num_nz);
IVector new_col_indices(num_nz_consolidated);
MVector recv_values((max_num_nz + 1 + Ac.hasProps(DIAG) * (halo_offsets[num_consolidated_neighbors] - 1))*Ac.get_block_size());
MVector new_values((num_nz_consolidated + 1 + Ac.hasProps(DIAG) * (halo_offsets[num_consolidated_neighbors] - 1))*Ac.get_block_size());
thrust::fill(new_col_indices.begin() + new_row_offsets[halo_offsets[0]], new_col_indices.end(), -1); //Set all the halo col indices to -1
if (!Ac.hasProps(DIAG)) { thrust::fill(new_values.begin() + num_nz_consolidated * Ac.get_block_size(), new_values.end(), types::util<ValueTypeA>::get_zero()); }
cudaCheckError();
IVector *work_col_indices;
MVector *work_values;
interior_offset = 0;
boundary_offset = 0;
for (int i = 0; i < num_fine_parts_to_consolidate; i++)
{
int current_part = fine_parts_to_consolidate[i];
boundary_offset += vertex_counts[i][0];
}
for (int i = 0; i < num_fine_parts_to_consolidate; i++)
{
int current_part = fine_parts_to_consolidate[i];
if (current_part != my_id)
{
A.manager->getComms()->recv_vector(recv_col_indices, current_part, 1112, 0, num_nz[i]);
A.manager->getComms()->recv_vector(recv_values, current_part, 1113, 0, (num_nz[i] + 1 + Ac.hasProps(DIAG) * (halo_offsets[num_consolidated_neighbors] - 1))*Ac.get_block_size());
work_col_indices = &(recv_col_indices);
work_row_offsets = &(recv_row_offsets[i]);
work_values = &(recv_values);
}
else
{
work_row_offsets = &(Ac.row_offsets);
work_col_indices = &(Ac.col_indices);
work_values = &(Ac.values);
}
//Put interior rows in place
thrust::copy(work_col_indices->begin() + (*work_row_offsets)[interior_offset],
work_col_indices->begin() + (*work_row_offsets)[interior_offset + vertex_counts[i][0]],
new_col_indices.begin() + new_row_offsets[interior_offset]);
cudaCheckError();
thrust::copy(work_values->begin() + (*work_row_offsets)[interior_offset]*Ac.get_block_size(),
work_values->begin() + ((*work_row_offsets)[interior_offset + vertex_counts[i][0]])*Ac.get_block_size(),
new_values.begin() + new_row_offsets[interior_offset]*Ac.get_block_size());
cudaCheckError();
//Put boundary rows in place
thrust::copy(work_col_indices->begin() + (*work_row_offsets)[boundary_offset],
work_col_indices->begin() + (*work_row_offsets)[boundary_offset + vertex_counts[i][1]],
new_col_indices.begin() + new_row_offsets[boundary_offset]);
cudaCheckError();
thrust::copy(work_values->begin() + (*work_row_offsets)[boundary_offset]*Ac.get_block_size(),
work_values->begin() + ((*work_row_offsets)[boundary_offset + vertex_counts[i][1]])*Ac.get_block_size(),
new_values.begin() + new_row_offsets[boundary_offset]*Ac.get_block_size());
cudaCheckError();
//Process halo rows (merge)
int size = halo_offsets[num_consolidated_neighbors] - halo_offsets[0];
const int block_size = 128;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (size - 1) / block_size + 1);
//TODO: vectorise this kernel, will be inefficient for larger block sizes
append_halo_nz <<< num_blocks, block_size>>>(work_row_offsets->raw() + halo_offsets[0],
new_row_offsets.raw() + halo_offsets[0],
work_col_indices->raw(),
new_col_indices.raw(),
work_values->raw(),
new_values.raw(),
size, Ac.hasProps(DIAG), halo_offsets[0], Ac.get_block_size());
cudaCheckError();
// Diagonals
if (Ac.hasProps(DIAG))
{
// Diagonal corresponding to interior rows
thrust::copy(work_values->begin() + (num_nz[i] + interior_offset)*Ac.get_block_size(),
work_values->begin() + (num_nz[i] + interior_offset + vertex_counts[i][0])*Ac.get_block_size(),
new_values.begin() + (new_row_offsets[halo_offsets[halo_offsets.size() - 1]] + interior_offset)*Ac.get_block_size());
// Diagonal corresponding to boundary rows
thrust::copy(work_values->begin() + (num_nz[i] + boundary_offset)*Ac.get_block_size(),
work_values->begin() + (num_nz[i] + boundary_offset + vertex_counts[i][1])*Ac.get_block_size(),
new_values.begin() + (new_row_offsets[halo_offsets[halo_offsets.size() - 1]] + boundary_offset)*Ac.get_block_size());
cudaCheckError();
}
interior_offset += vertex_counts[i][0];
boundary_offset += vertex_counts[i][1];
}
Ac.set_initialized(0);
Ac.row_offsets = new_row_offsets;
Ac.col_indices = new_col_indices;
Ac.values = new_values;
}
// A new distributed communicator for coarse levels that only contains active partitions
// has already been created in consolidatedBookKeeping!
//
// Step 12 - finalizing, bookkeping
//
if (this->isRootPartition())
{
// int my_consolidated_id = fine_part_to_coarse_part[my_id];
int my_consolidated_id = Ac.manager->getComms()->get_global_id();
if (!this->isReuseLevel())
{
Ac.manager->initializeAfterConsolidation(
my_consolidated_id,
Ac,
this->m_consolidated_neighbors,
this->m_total_interior_rows_in_merged,
this->m_total_boundary_rows_in_merged,
this->m_num_all_aggregates,
this->m_consolidated_halo_offsets,
this->m_consolidated_B2L_maps,
1,
true);
// this is now stored in Acs DistributedManager
this->m_consolidated_neighbors.resize(0);
this->m_consolidated_halo_offsets.resize(0);
this->m_consolidated_B2L_maps.resize(0);
Ac.manager->B2L_rings.resize(num_consolidated_neighbors + 1);
for (int i = 0; i < num_consolidated_neighbors; i++)
{
Ac.manager->B2L_rings[i].resize(2);
Ac.manager->B2L_rings[i][0] = 0;
Ac.manager->B2L_rings[i][1] = consolidated_B2L_maps[i].size();
}
}
Ac.manager->set_initialized(Ac.row_offsets);
Ac.manager->getComms()->set_neighbors(num_consolidated_neighbors);
int new_nnz = Ac.row_offsets[Ac.row_offsets.size() - 1];
Ac.set_num_nz(new_nnz);
Ac.set_num_cols(Ac.manager->halo_offsets[Ac.manager->halo_offsets.size() - 1]);
Ac.set_num_rows(Ac.get_num_cols());
if (A.hasProps(DIAG)) { Ac.addProps(DIAG); }
Ac.computeDiagonal();
Ac.set_initialized(1);
}
else
{
this->getA().manager->getComms()->send_vector_wait_all(Ac.row_offsets);
this->getA().manager->getComms()->send_vector_wait_all(Ac.col_indices);
this->getA().manager->getComms()->send_vector_wait_all(Ac.values);
Ac.set_initialized(0);
// set size of Ac to be zero
Ac.resize(0, 0, 0, 1);
Ac.set_initialized(1);
}
}
// -------------------------------------------------------------
// Explicit instantiations
// -------------------------------------------------------------
#define AMGX_CASE_LINE(CASE) template class Aggregation_AMG_Level<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}
}
|
248a81d72314007f1129c10c730f48a342e9dfa0.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h>
#include <ATen/hip/HIPContext.h>
#include <list>
#include <cstdio>
#include <cassert>
#include <hip/hip_runtime_api.h>
#include "nccl.h"
#define CUDACHECK(cmd) do { \
hipError_t err = cmd; \
if( err != hipSuccess ) { \
char hostname[1024]; \
gethostname(hostname, 1024); \
printf("%s: CUDA failure %s:%d '%s'\n", \
hostname, \
__FILE__,__LINE__,hipGetErrorString(err)); \
} \
} while(0)
namespace {
constexpr int THREADS_PER_CTA = 128;
/* Basic deleter function for from_blob function.
void deleter(void* ptr)
{
printf("deleter(ptr=%p)\n",ptr);
hipFree(ptr);
}
*/
template<class T>
at::Tensor blob_view(T* raw_ptr, std::vector<int64_t> shape, const at::TensorOptions& options, bool channels_last)
{
size_t size = 1;
std::vector<int64_t> strides(shape.size());
if (channels_last) {
assert(shape.size() == 4);
strides[0] = shape[1]*shape[2]*shape[3];
strides[1] = 1;
strides[2] = shape[1]*shape[3];
strides[3] = shape[1];
} else {
int idx = strides.size();
for (auto it = shape.rbegin(); it != shape.rend(); ++it)
{
strides[--idx] = size;
size *= *it;
}
}
size *= sizeof(T);
// TODO: Implement dynamic reuse of pooled peer memory.
// We provide no deleter function because all peer memory allocations are static in this implementation.
return torch::from_blob((void*)raw_ptr, shape, strides, 0L, options);
}
void tensor_shape(at::Tensor t, bool explicit_nhwc, int& N, int& C, int& H, int& W)
{
if (t.dim() == 3) {
N = 1;
if (explicit_nhwc) {
C = t.size(2);
H = t.size(0);
W = t.size(1);
} else {
C = t.size(0);
H = t.size(1);
W = t.size(2);
}
} else if (t.dim() == 4) {
if (explicit_nhwc) {
N = t.size(0);
C = t.size(3);
H = t.size(1);
W = t.size(2);
} else {
N = t.size(0);
C = t.size(1);
H = t.size(2);
W = t.size(3);
}
} else {
printf("%s;%d - t.dim() must be either 3 or 4 (was %d)\n",__FILE__,__LINE__,int(t.dim()));
assert(t.dim() == 3 || t.dim() == 4);
}
}
void tensor_strides(at::Tensor t, bool explicit_nhwc, int& stride_N, int& stride_C, int& stride_H, int& stride_W)
{
if (t.dim() == 3) {
if (explicit_nhwc) {
stride_C = t.stride(2);
stride_H = t.stride(0);
stride_W = t.stride(1);
} else {
stride_C = t.stride(0);
stride_H = t.stride(1);
stride_W = t.stride(2);
}
stride_N = t.size(0)*t.size(1)*t.size(2);
} else if (t.dim() == 4) {
if (explicit_nhwc) {
stride_N = t.stride(0);
stride_C = t.stride(3);
stride_H = t.stride(1);
stride_W = t.stride(2);
} else {
stride_N = t.stride(0);
stride_C = t.stride(1);
stride_H = t.stride(2);
stride_W = t.stride(3);
}
} else {
printf("%s;%d - t.dim() must be either 3 or 4 (was %d)\n",__FILE__,__LINE__,t.dim());
assert(t.dim() == 3 || t.dim() == 4);
}
}
template<class T>
inline __device__ void __zero(T* dst)
{
*dst = T(0);
}
inline __device__ void __zero(int2* dst)
{
*dst = {0, 0};
}
template<class T, bool contiguous>
inline __device__ void zero_tensor(
const int dim0,
const int dim1,
const int dim2,
T* __restrict__ data,
const int data_stride0,
const int data_stride1,
const int data_stride2,
const int thread_id,
const int block_id,
const int num_blocks
)
{
const int global_id = thread_id + block_id * THREADS_PER_CTA;
const int num_threads = num_blocks * THREADS_PER_CTA;
const int count = dim0 * dim1 * dim2;
for (int i = global_id; i < count; i += num_threads) {
int offset;
if (contiguous) {
offset = i;
} else {
const int j2 = i % dim2;
const int k = i / dim2;
const int j1 = k % dim1;
const int j0 = k / dim1;
offset = j0 * data_stride0 + j1 * data_stride1 + j2 * data_stride2;
}
__zero(data + offset);
}
}
template<class T, bool contiguous>
inline __device__ void push_pull_tensor(
const int dim0,
const int dim1,
const int dim2,
const T* __restrict__ data_in,
const int data_in_stride0,
const int data_in_stride1,
const int data_in_stride2,
T* __restrict__ data_out,
const int data_out_stride0,
const int data_out_stride1,
const int data_out_stride2,
int4* local_peer,
int4* remote_peer,
const int thread_id,
const int block_id,
const int num_blocks
)
{
// 128b=16B NVLink flit
// Note: Use last 4B as a semaphore
static_assert(sizeof(T) <= 12);
union Flit {
T payload;
uint uints[4];
};
// Communication bit indicates whether flit has been received from
// a remote GPU
constexpr uint communication_mask = 1 << 0;
// Status bit is used to choose the active peer buffer in an
// alternating double buffer scheme. We use buffer 1 if the bits
// match, use buffer 2 if the bits differ, and invert the bit
// after finishing with a buffer.
constexpr uint status_mask = 1 << 1;
// Split peer memory into two sets of buffers
// Note: Each block owns a THREADS_PER_CTA*2*16B chunk of peer
// memory
const int peer_offset1 = block_id * THREADS_PER_CTA * 2 + thread_id;
const int peer_offset2 = peer_offset1 + THREADS_PER_CTA;
volatile int* local_peer1 = reinterpret_cast<volatile int*>(local_peer + peer_offset1);
volatile int* local_peer2 = reinterpret_cast<volatile int*>(local_peer + peer_offset2);
volatile int* remote_peer1 = reinterpret_cast<volatile int*>(remote_peer + peer_offset1);
volatile int* remote_peer2 = reinterpret_cast<volatile int*>(remote_peer + peer_offset2);
// Iterate through tensor entries
const int num_threads = num_blocks * THREADS_PER_CTA;
const int count = dim0 * dim1 * dim2;
for (int i0 = block_id * THREADS_PER_CTA; i0 < count; i0 += num_threads) {
const int i = i0 + thread_id;
const bool has_data = i < count;
// Calculate buffer positions
int data_in_offset, data_out_offset;
if (contiguous) {
data_in_offset = i;
data_out_offset = i;
} else {
const int j2 = i % dim2;
const int k = i / dim2;
const int j1 = k % dim1;
const int j0 = k / dim1;
data_in_offset = j0 * data_in_stride0 + j1 * data_in_stride1 + j2 * data_in_stride2;
data_out_offset = j0 * data_out_stride0 + j1 * data_out_stride1 + j2 * data_out_stride2;
}
// Determine which peer memory buffer to use
// Note: The status bit is not affected by asynchronous
// communication from the remote GPU.
Flit local_message1, local_message2;
asm volatile("ld.volatile.global.v4.u32 {%0,%1,%2,%3}, [%4];" :
"=r"(local_message1.uints[0]),
"=r"(local_message1.uints[1]),
"=r"(local_message1.uints[2]),
"=r"(local_message1.uints[3])
: "l"(local_peer1) : "memory");
asm volatile("ld.volatile.global.v4.u32 {%0,%1,%2,%3}, [%4];" :
"=r"(local_message2.uints[0]),
"=r"(local_message2.uints[1]),
"=r"(local_message2.uints[2]),
"=r"(local_message2.uints[3])
: "l"(local_peer2) : "memory");
const uint status1 = local_message1.uints[3] & status_mask;
const uint status2 = local_message2.uints[3] & status_mask;
const bool peer1_is_active = (status1 ^ status2) == 0;
volatile int* ox = peer1_is_active ? remote_peer1 : remote_peer2;
volatile int* ix = peer1_is_active ? local_peer1 : local_peer2;
const uint status = peer1_is_active ? status1 : status2;
Flit recv_message = peer1_is_active ? local_message1 : local_message2;
// Send flit to remote GPU
// Note: Set communication bit and keep status bit
Flit send_message;
if (has_data) {
send_message.payload = data_in[data_in_offset];
}
send_message.uints[3] = communication_mask | status;
asm volatile("st.volatile.global.v4.u32 [%0], {%1,%2,%3,%4};" ::
"l"(ox),
"r"(send_message.uints[0]),
"r"(send_message.uints[1]),
"r"(send_message.uints[2]),
"r"(send_message.uints[3])
: "memory");
// Recieve flit from peer
while ((recv_message.uints[3] & communication_mask) == 0) {
asm volatile("ld.volatile.global.v4.u32 {%0,%1,%2,%3}, [%4];" :
"=r"(recv_message.uints[0]),
"=r"(recv_message.uints[1]),
"=r"(recv_message.uints[2]),
"=r"(recv_message.uints[3])
: "l"(ix) : "memory");
}
if (has_data) {
data_out[data_out_offset] = recv_message.payload;
}
// Reset semaphore
// Note: Clear communication bit and invert status bit
uint flag = ~status & status_mask;
asm volatile("st.volatile.global.v4.u32 [%0], {%1,%2,%3,%4};" ::
"l"(ix),
"n"(0),
"n"(0),
"n"(0),
"r"(flag)
: "memory");
if (i0 + num_threads < count) {
__threadfence_system();
}
}
}
template<class T, bool contiguous, bool top_zero, bool btm_zero>
#if __CUDA_ARCH__ >= 700
__launch_bounds__(THREADS_PER_CTA)
#endif
__global__ void push_pull_halos_1d_kernel(
// top halo,
T* toh, int toh_stride0, int toh_stride1, int toh_stride2, // top output halo (local)
const T* tih, int tih_stride0, int tih_stride1, int tih_stride2, // top input halo (local)
int4* tox, // top output transfer buffer (remote peer)
int4* tix, // top input transfer buffer (local peer)
// btm halo
T* boh, int boh_stride0, int boh_stride1, int boh_stride2, // btm output halo (local)
const T* bih, int bih_stride0, int bih_stride1, int bih_stride2, // btm input halo (local)
int4* box, // btm output transfer buffer (remote peer)
int4* bix, // btm input transfer buffer (local peer)
// dimensions
int dim0, int dim1, int dim2,
bool top_first // whether to launch communicate top halo first
)
{
const int num_blocks_side = gridDim.x / 2;
const int block_id_side = (blockIdx.x < num_blocks_side
? blockIdx.x
: blockIdx.x - num_blocks_side);
const bool in_top_block = top_first == (blockIdx.x < num_blocks_side);
if (in_top_block) {
if (top_zero) {
zero_tensor<T,contiguous>(
dim0, dim1, dim2,
toh, toh_stride0, toh_stride1, toh_stride2,
threadIdx.x, block_id_side, num_blocks_side);
} else {
push_pull_tensor<T,contiguous>(
dim0, dim1, dim2,
tih, tih_stride0, tih_stride1, tih_stride2,
toh, toh_stride0, toh_stride1, toh_stride2,
tix, tox,
threadIdx.x, block_id_side, num_blocks_side);
}
} else {
if (btm_zero) {
zero_tensor<T,contiguous>(
dim0, dim1, dim2,
boh, boh_stride0, boh_stride1, boh_stride2,
threadIdx.x, block_id_side, num_blocks_side);
} else {
push_pull_tensor<T,contiguous>(
dim0, dim1, dim2,
bih, bih_stride0, bih_stride1, bih_stride2,
boh, boh_stride0, boh_stride1, boh_stride2,
bix, box,
threadIdx.x, block_id_side, num_blocks_side);
}
}
}
__global__ void delay_kernel(int delay_nanoseconds, int* counter)
{
if (blockIdx.x == 0 && threadIdx.x == 0) {
// waste time while doing something compiler can't predict, thus preventing it from optimizing away this code.
int new_counter = 0;
double elapsed = 0;
clock_t start = clock();
do {
clock_t now = clock();
elapsed = (double)(now - start)*1e9 / CLOCKS_PER_SEC;
++new_counter;
} while (elapsed < (double)delay_nanoseconds);
*counter = new_counter;
}
}
}
namespace apex { namespace contrib { namespace peer_memory {
int64_t allocate_raw(int64_t size)
{
float* ptr = 0L;
hipMalloc(&ptr, size);
hipMemset(ptr, 0, size);
return (int64_t)ptr;
}
void free_raw(int64_t raw)
{
hipFree((void*)raw);
}
void zero(int64_t raw, int64_t size)
{
hipMemset((void*)raw, 0, size);
}
at::Tensor get_raw_ipc_address(int64_t raw)
{
hipIpcMemHandle_t mem_handle;
CUDACHECK( hipIpcGetMemHandle(&mem_handle, (void*)raw) );
const int n = sizeof(hipIpcMemHandle_t);
auto address_tensor = torch::empty({n}, torch::dtype(torch::kUInt8));
auto address_tensor_p = address_tensor.data_ptr<uint8_t>();
memcpy(address_tensor_p, (uint8_t*)&mem_handle, n);
return address_tensor;
}
std::vector<int64_t> get_raw_peers(at::Tensor ipc_addresses, int peer_rank, int64_t raw)
{
int peer_group_size = ipc_addresses.size(0);
std::vector<int64_t> results(peer_group_size);
for (int i = 0; i < peer_group_size; ++i) {
if (i != peer_rank) {
hipIpcMemHandle_t mem_handle;
memcpy(&mem_handle, ipc_addresses.index({i}).data_ptr<uint8_t>(), sizeof(hipIpcMemHandle_t));
void* p = 0L;
CUDACHECK( hipIpcOpenMemHandle((void**)&p, mem_handle, hipIpcMemLazyEnablePeerAccess) );
results[i] = (int64_t)p;
} else {
results[i] = (int64_t)raw;
}
}
return results;
}
at::Tensor blob_view_half(int64_t raw, std::vector<int64_t> shape, bool channels_last)
{
return blob_view<at::Half>((at::Half*)raw, shape, torch::dtype(torch::kFloat16).device(torch::kCUDA), channels_last);
}
at::Tensor blob_view_float(int64_t raw, std::vector<int64_t> shape, bool channels_last)
{
return blob_view<float>((float*)raw, shape, torch::dtype(torch::kFloat32).device(torch::kCUDA), channels_last);
}
at::Tensor blob_view_int(int64_t raw, std::vector<int64_t> shape, bool channels_last)
{
return blob_view<int>((int*)raw, shape, torch::dtype(torch::kInt32).device(torch::kCUDA), channels_last);
}
void push_pull_halos_1d(
bool diagnostics,
bool explicit_nhwc,
int numSM, // number of SMs to use (zero corresponds to all SMs)
int rank, // rank in spatial parallel group
bool top_zero, // if top halo should be zeroed
at::Tensor top_in_halo, // top input halo buffer (in local device memory, sent to top neighbor)
at::Tensor top_in_transfer, // top input transfer buffer (in local peer memory)
at::Tensor top_out_transfer, // top output transfer buffer (in top neighbor peer memory)
at::Tensor top_out_halo, // top output halo buffer (in local device memory, received from top neighbor)
bool btm_zero, // if btm halo should be zeroed
at::Tensor btm_in_halo, // btm input halo buffer (in local device memory, sent to btm neighbor)
at::Tensor btm_in_transfer, // btm input transfer buffer (in local peer memory)
at::Tensor btm_out_transfer, // btm output transfer buffer (in btm neighbor peer memory)
at::Tensor btm_out_halo // btm output halo buffer (in local device memory, received from btm neighbor)
)
{
// basic checks of inputs
TORCH_CHECK(!(top_zero && btm_zero));
TORCH_CHECK(top_in_halo.is_cuda());
TORCH_CHECK(top_out_transfer.is_cuda());
TORCH_CHECK(top_in_transfer.is_cuda());
TORCH_CHECK(top_out_halo.is_cuda());
TORCH_CHECK(btm_in_halo.is_cuda());
TORCH_CHECK(btm_out_transfer.is_cuda());
TORCH_CHECK(btm_in_transfer.is_cuda());
TORCH_CHECK(btm_out_halo.is_cuda());
// tensor shapes
int tih_N, tih_C, tih_H, tih_W;
tensor_shape(top_in_halo, explicit_nhwc, tih_N, tih_C, tih_H, tih_W);
int toh_N, toh_C, toh_H, toh_W;
tensor_shape(top_out_halo, explicit_nhwc, toh_N, toh_C, toh_H, toh_W);
int bih_N, bih_C, bih_H, bih_W;
tensor_shape(btm_in_halo, explicit_nhwc, bih_N, bih_C, bih_H, bih_W);
int boh_N, boh_C, boh_H, boh_W;
tensor_shape(btm_out_halo, explicit_nhwc, boh_N, boh_C, boh_H, boh_W);
TORCH_CHECK(toh_N == tih_N && tih_N == boh_N && boh_N == bih_N &&
toh_C == tih_C && tih_C == boh_C && boh_C == bih_C &&
toh_H == tih_H && tih_H == boh_H && boh_H == bih_H &&
toh_W == tih_W && tih_W == boh_W && boh_W == bih_W);
int NN=toh_N, NC=toh_C, NH=toh_H, NW=toh_W;
if (diagnostics) {
printf("rank %d: NN=%d, NC=%d, NH=%d, NW=%d\n", rank, NN, NC, NH, NW);
}
TORCH_CHECK(NN == 1);
// tensor strides
int tih_stride_N, tih_stride_C, tih_stride_H, tih_stride_W;
tensor_strides(top_in_halo, explicit_nhwc, tih_stride_N, tih_stride_C, tih_stride_H, tih_stride_W);
int toh_stride_N, toh_stride_C, toh_stride_H, toh_stride_W;
tensor_strides(top_out_halo, explicit_nhwc, toh_stride_N, toh_stride_C, toh_stride_H, toh_stride_W);
int bih_stride_N, bih_stride_C, bih_stride_H, bih_stride_W;
tensor_strides(btm_in_halo, explicit_nhwc, bih_stride_N, bih_stride_C, bih_stride_H, bih_stride_W);
int boh_stride_N, boh_stride_C, boh_stride_H, boh_stride_W;
tensor_strides(btm_out_halo, explicit_nhwc, boh_stride_N, boh_stride_C, boh_stride_H, boh_stride_W);
if (diagnostics) {
printf("rank %d: tih_stride :: N=%d, C=%d, H=%d, W=%d\n",
rank, tih_stride_N, tih_stride_C, tih_stride_H, tih_stride_W);
printf("rank %d: toh_stride :: N=%d, C=%d, H=%d, W=%d\n",
rank, toh_stride_N, toh_stride_C, toh_stride_H, toh_stride_W);
printf("rank %d: bih_stride :: N=%d, C=%d, H=%d, W=%d\n",
rank, bih_stride_N, bih_stride_C, bih_stride_H, bih_stride_W);
printf("rank %d: boh_stride :: N=%d, C=%d, H=%d, W=%d\n",
rank, boh_stride_N, boh_stride_C, boh_stride_H, boh_stride_W);
}
// determine if nhwc
bool is_nhwc = (toh_stride_C == 1);
if (diagnostics) {
printf("rank %d: is_nhwc = %s\n", rank, is_nhwc ? "true" : "false");
}
// determine if contiguous
bool contiguous = true;
if ((NN-1)*toh_stride_N + (NC-1)*toh_stride_C +
(NH-1)*toh_stride_H + (NW-1)*toh_stride_W
!= NN*NC*NH*NW - 1) {
contiguous = false;
}
if ((NN-1)*boh_stride_N + (NC-1)*boh_stride_C +
(NH-1)*boh_stride_H + (NW-1)*boh_stride_W
!= NN*NC*NH*NW - 1) {
contiguous = false;
}
if (!top_zero) {
if (toh_stride_N != tih_stride_N || toh_stride_C != tih_stride_C ||
toh_stride_H != tih_stride_H || toh_stride_W != tih_stride_W) {
contiguous = false;
}
}
if (!btm_zero) {
if (boh_stride_N != bih_stride_N || boh_stride_C != bih_stride_C ||
boh_stride_H != bih_stride_H || boh_stride_W != bih_stride_W) {
contiguous = false;
}
}
if (diagnostics) {
printf("rank %d: contiguous = %s\n", rank, contiguous ? "true" : "false");
}
// determine whether to communicate top halo first
bool top_first = rank % 2 != 0;
if (diagnostics) {
printf("rank %d: top_first = %s\n", rank, top_first ? "true" : "false");
}
// peer memory buffers
int tox_size = top_out_transfer.numel() * top_out_transfer.element_size();
int tix_size = top_in_transfer.numel() * top_in_transfer.element_size();
int box_size = btm_out_transfer.numel() * btm_out_transfer.element_size();
int bix_size = btm_in_transfer.numel() * btm_in_transfer.element_size();
if (!top_zero) {
TORCH_CHECK(top_out_transfer.is_contiguous());
TORCH_CHECK(top_in_transfer.is_contiguous());
TORCH_CHECK(tox_size == tix_size);
}
if (!btm_zero) {
TORCH_CHECK(btm_out_transfer.is_contiguous());
TORCH_CHECK(btm_in_transfer.is_contiguous());
TORCH_CHECK(box_size == bix_size);
}
// figure out launch parameters
int device;
hipGetDevice(&device);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, device);
if (numSM <= 0 || numSM > prop.multiProcessorCount) {
numSM = prop.multiProcessorCount;
}
auto current_stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 block(THREADS_PER_CTA, 1, 1);
// helper macros to launch templated kernel
#define LAUNCH_PUSH_PULL_HALO_KERNEL_BASE(T, CONTIGUOUS, TOP_ZERO, BTM_ZERO, KERNEL_ARGS, NUM_ELEMENTS) \
do { \
/* kernel configuration */ \
int numBlocksPerSm; \
hipOccupancyMaxActiveBlocksPerMultiprocessor( \
&numBlocksPerSm, \
push_pull_halos_1d_kernel<T,CONTIGUOUS,TOP_ZERO,BTM_ZERO>, \
THREADS_PER_CTA, \
0); \
dim3 grid(numSM*numBlocksPerSm,1,1); \
if (grid.x % 2 != 0) { \
/* require even number of blocks (half for top, half for bottom) */ \
grid.x -= 1; \
} \
if ((grid.x / 2) * THREADS_PER_CTA > NUM_ELEMENTS) { \
/* only need enough blocks to cover top and bottom halo elements */ \
grid.x = 2 * ((NUM_ELEMENTS + THREADS_PER_CTA - 1) / THREADS_PER_CTA); \
} \
if (!TOP_ZERO) { \
/* require 2*128b=32B peer memory per thread */ \
if ((grid.x / 2) * THREADS_PER_CTA * 32 > tox_size) { \
grid.x = 2 * (tox_size / (THREADS_PER_CTA * 32)); \
} \
} \
if (!BTM_ZERO) { \
/* require 2*128b=32B peer memory per thread */ \
if ((grid.x / 2) * THREADS_PER_CTA * 32 > box_size) { \
grid.x = 2 * (box_size / (THREADS_PER_CTA * 32)); \
} \
} \
TORCH_CHECK(grid.x >= 2); \
\
/* launch kernel */ \
hipLaunchCooperativeKernel( \
(void*)push_pull_halos_1d_kernel<T,CONTIGUOUS,TOP_ZERO,BTM_ZERO>, \
grid, \
block, \
KERNEL_ARGS, \
0, \
current_stream); \
} while (false)
#define LAUNCH_PUSH_PULL_HALO_KERNEL(T, CONTIGUOUS, KERNEL_ARGS, NUM_ELEMENTS) \
do { \
if (top_zero) { \
LAUNCH_PUSH_PULL_HALO_KERNEL_BASE(T, CONTIGUOUS, true, false, KERNEL_ARGS, NUM_ELEMENTS); \
} else if (btm_zero) { \
LAUNCH_PUSH_PULL_HALO_KERNEL_BASE(T, CONTIGUOUS, false, true, KERNEL_ARGS, NUM_ELEMENTS); \
} else { \
LAUNCH_PUSH_PULL_HALO_KERNEL_BASE(T, CONTIGUOUS, false, false, KERNEL_ARGS, NUM_ELEMENTS); \
} \
} while (false)
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, top_out_halo.scalar_type(), "push_pull_halos_1d_kernel", [&]{
if (diagnostics) {
printf("rank %d: size(scalar_t) = %ld\n", rank, sizeof(scalar_t));
}
scalar_t* toh_p = top_out_halo.data_ptr<scalar_t>();
scalar_t* tih_p = top_in_halo.data_ptr<scalar_t>();
int4* tox_p = reinterpret_cast<int4*>(top_out_transfer.data_ptr<scalar_t>());
int4* tix_p = reinterpret_cast<int4*>(top_in_transfer.data_ptr<scalar_t>());
scalar_t* boh_p = btm_out_halo.data_ptr<scalar_t>();
scalar_t* bih_p = btm_in_halo.data_ptr<scalar_t>();
int4* box_p = reinterpret_cast<int4*>(btm_out_transfer.data_ptr<scalar_t>());
int4* bix_p = reinterpret_cast<int4*>(btm_in_transfer.data_ptr<scalar_t>());
if (diagnostics) printf("rank %d: choosing halo exchange kernel\n", rank);
// do int2 vector loads if channel count permits
if (contiguous &&
(NN*NH*NW*NC * sizeof(scalar_t)) % sizeof(int2) == 0) {
// can do contiguous int2 transfers
if (diagnostics) {
}
toh_stride_N = toh_stride_H = toh_stride_W = toh_stride_C = 1;
tih_stride_N = tih_stride_H = tih_stride_W = tih_stride_C = 1;
boh_stride_N = boh_stride_H = boh_stride_W = boh_stride_C = 1;
bih_stride_N = bih_stride_H = bih_stride_W = bih_stride_C = 1;
NC = (NN*NH*NW*NC * sizeof(scalar_t)) / sizeof(int2);
NN = NH = NW = 1;
if (diagnostics) {
printf("rank %d: launching contiguous int2 halo exchange kernel\n",
rank);
printf("rank %d: NC=%d, NH=%d, NW=%d\n", rank, NC, NH, NW);
}
void *kernel_args[] = {
(int2**)&toh_p, &toh_stride_H, &toh_stride_W, &toh_stride_C,
(int2**)&tih_p, &tih_stride_H, &tih_stride_W, &tih_stride_C,
&tox_p, &tix_p,
(int2**)&boh_p, &boh_stride_H, &boh_stride_W, &boh_stride_C,
(int2**)&bih_p, &bih_stride_H, &bih_stride_W, &bih_stride_C,
&box_p, &bix_p,
&NH, &NW, &NC,
&top_first
};
int num_elem = NN*NH*NW*NC;
LAUNCH_PUSH_PULL_HALO_KERNEL(int2, true, kernel_args, num_elem);
} else if (is_nhwc && (NC * sizeof(scalar_t)) % sizeof(int2) == 0) {
// can do strided int2 transfers
int divisor = sizeof(int2) / sizeof(scalar_t);
if (diagnostics) {
printf("rank %d: launching strided int2 halo exchange kernel\n",
rank);
}
toh_stride_N /= divisor; toh_stride_H /= divisor; toh_stride_W /= divisor;
tih_stride_N /= divisor; tih_stride_H /= divisor; tih_stride_W /= divisor;
boh_stride_N /= divisor; boh_stride_H /= divisor; boh_stride_W /= divisor;
bih_stride_N /= divisor; bih_stride_H /= divisor; bih_stride_W /= divisor;
NC /= divisor;
if (diagnostics) {
printf("rank %d: divisor=%d\n", rank, divisor);
printf("rank %d: tih_stride :: N=%d, C=%d, H=%d, W=%d\n",
rank, tih_stride_N, tih_stride_C, tih_stride_H, tih_stride_W);
printf("rank %d: toh_stride :: N=%d, C=%d, H=%d, W=%d\n",
rank, toh_stride_N, toh_stride_C, toh_stride_H, toh_stride_W);
printf("rank %d: bih_stride :: N=%d, C=%d, H=%d, W=%d\n",
rank, bih_stride_N, bih_stride_C, bih_stride_H, bih_stride_W);
printf("rank %d: boh_stride :: N=%d, C=%d, H=%d, W=%d\n",
rank, boh_stride_N, boh_stride_C, boh_stride_H, boh_stride_W);
printf("rank %d: NC=%d, NH=%d, NW=%d\n", rank, NC, NH, NW);
}
void *kernel_args[] = {
(int2**)&toh_p, &toh_stride_H, &toh_stride_W, &toh_stride_C,
(int2**)&tih_p, &tih_stride_H, &tih_stride_W, &tih_stride_C,
&tox_p, &tix_p,
(int2**)&boh_p, &boh_stride_H, &boh_stride_W, &boh_stride_C,
(int2**)&bih_p, &bih_stride_H, &bih_stride_W, &bih_stride_C,
&box_p, &bix_p,
&NH, &NW, &NC,
&top_first
};
int num_elem = NH*NW*NC;
LAUNCH_PUSH_PULL_HALO_KERNEL(int2, false, kernel_args, num_elem);
} else {
// cannot do int2 transfers
if (diagnostics) {
printf("rank %d: launching non-int2 halo exchange kernel\n",
rank);
}
int num_elem = NC*NH*NW;
if (is_nhwc) {
void *kernel_args[] = {
&toh_p, &toh_stride_H, &toh_stride_W, &toh_stride_C,
&tih_p, &tih_stride_H, &tih_stride_W, &tih_stride_C,
&tox_p, &tix_p,
&boh_p, &boh_stride_H, &boh_stride_W, &boh_stride_C,
&bih_p, &bih_stride_H, &bih_stride_W, &bih_stride_C,
&box_p, &bix_p,
&NH, &NW, &NC,
&top_first
};
LAUNCH_PUSH_PULL_HALO_KERNEL(scalar_t, false, kernel_args, num_elem);
} else {
void *kernel_args[] = {
&toh_p, &toh_stride_C, &toh_stride_H, &toh_stride_W,
&tih_p, &tih_stride_C, &tih_stride_H, &tih_stride_W,
&tox_p, &tix_p,
&boh_p, &boh_stride_C, &boh_stride_H, &boh_stride_W,
&bih_p, &bih_stride_C, &bih_stride_H, &bih_stride_W,
&box_p, &bix_p,
&NC, &NH, &NW,
&top_first
};
LAUNCH_PUSH_PULL_HALO_KERNEL(scalar_t, false, kernel_args, num_elem);
}
}
} );
#undef LAUNCH_PUSH_PULL_HALO_KERNEL_BASE
#undef LAUNCH_PUSH_PULL_HALO_KERNEL
}
} } }
| 248a81d72314007f1129c10c730f48a342e9dfa0.cu | #include <torch/extension.h>
#include <c10/cuda/CUDACachingAllocator.h>
#include <ATen/cuda/CUDAContext.h>
#include <list>
#include <cstdio>
#include <cassert>
#include <cuda_runtime_api.h>
#include "nccl.h"
#define CUDACHECK(cmd) do { \
cudaError_t err = cmd; \
if( err != cudaSuccess ) { \
char hostname[1024]; \
gethostname(hostname, 1024); \
printf("%s: CUDA failure %s:%d '%s'\n", \
hostname, \
__FILE__,__LINE__,cudaGetErrorString(err)); \
} \
} while(0)
namespace {
constexpr int THREADS_PER_CTA = 128;
/* Basic deleter function for from_blob function.
void deleter(void* ptr)
{
printf("deleter(ptr=%p)\n",ptr);
cudaFree(ptr);
}
*/
template<class T>
at::Tensor blob_view(T* raw_ptr, std::vector<int64_t> shape, const at::TensorOptions& options, bool channels_last)
{
size_t size = 1;
std::vector<int64_t> strides(shape.size());
if (channels_last) {
assert(shape.size() == 4);
strides[0] = shape[1]*shape[2]*shape[3];
strides[1] = 1;
strides[2] = shape[1]*shape[3];
strides[3] = shape[1];
} else {
int idx = strides.size();
for (auto it = shape.rbegin(); it != shape.rend(); ++it)
{
strides[--idx] = size;
size *= *it;
}
}
size *= sizeof(T);
// TODO: Implement dynamic reuse of pooled peer memory.
// We provide no deleter function because all peer memory allocations are static in this implementation.
return torch::from_blob((void*)raw_ptr, shape, strides, 0L, options);
}
void tensor_shape(at::Tensor t, bool explicit_nhwc, int& N, int& C, int& H, int& W)
{
if (t.dim() == 3) {
N = 1;
if (explicit_nhwc) {
C = t.size(2);
H = t.size(0);
W = t.size(1);
} else {
C = t.size(0);
H = t.size(1);
W = t.size(2);
}
} else if (t.dim() == 4) {
if (explicit_nhwc) {
N = t.size(0);
C = t.size(3);
H = t.size(1);
W = t.size(2);
} else {
N = t.size(0);
C = t.size(1);
H = t.size(2);
W = t.size(3);
}
} else {
printf("%s;%d - t.dim() must be either 3 or 4 (was %d)\n",__FILE__,__LINE__,int(t.dim()));
assert(t.dim() == 3 || t.dim() == 4);
}
}
void tensor_strides(at::Tensor t, bool explicit_nhwc, int& stride_N, int& stride_C, int& stride_H, int& stride_W)
{
if (t.dim() == 3) {
if (explicit_nhwc) {
stride_C = t.stride(2);
stride_H = t.stride(0);
stride_W = t.stride(1);
} else {
stride_C = t.stride(0);
stride_H = t.stride(1);
stride_W = t.stride(2);
}
stride_N = t.size(0)*t.size(1)*t.size(2);
} else if (t.dim() == 4) {
if (explicit_nhwc) {
stride_N = t.stride(0);
stride_C = t.stride(3);
stride_H = t.stride(1);
stride_W = t.stride(2);
} else {
stride_N = t.stride(0);
stride_C = t.stride(1);
stride_H = t.stride(2);
stride_W = t.stride(3);
}
} else {
printf("%s;%d - t.dim() must be either 3 or 4 (was %d)\n",__FILE__,__LINE__,t.dim());
assert(t.dim() == 3 || t.dim() == 4);
}
}
template<class T>
inline __device__ void __zero(T* dst)
{
*dst = T(0);
}
inline __device__ void __zero(int2* dst)
{
*dst = {0, 0};
}
template<class T, bool contiguous>
inline __device__ void zero_tensor(
const int dim0,
const int dim1,
const int dim2,
T* __restrict__ data,
const int data_stride0,
const int data_stride1,
const int data_stride2,
const int thread_id,
const int block_id,
const int num_blocks
)
{
const int global_id = thread_id + block_id * THREADS_PER_CTA;
const int num_threads = num_blocks * THREADS_PER_CTA;
const int count = dim0 * dim1 * dim2;
for (int i = global_id; i < count; i += num_threads) {
int offset;
if (contiguous) {
offset = i;
} else {
const int j2 = i % dim2;
const int k = i / dim2;
const int j1 = k % dim1;
const int j0 = k / dim1;
offset = j0 * data_stride0 + j1 * data_stride1 + j2 * data_stride2;
}
__zero(data + offset);
}
}
template<class T, bool contiguous>
inline __device__ void push_pull_tensor(
const int dim0,
const int dim1,
const int dim2,
const T* __restrict__ data_in,
const int data_in_stride0,
const int data_in_stride1,
const int data_in_stride2,
T* __restrict__ data_out,
const int data_out_stride0,
const int data_out_stride1,
const int data_out_stride2,
int4* local_peer,
int4* remote_peer,
const int thread_id,
const int block_id,
const int num_blocks
)
{
// 128b=16B NVLink flit
// Note: Use last 4B as a semaphore
static_assert(sizeof(T) <= 12);
union Flit {
T payload;
uint uints[4];
};
// Communication bit indicates whether flit has been received from
// a remote GPU
constexpr uint communication_mask = 1 << 0;
// Status bit is used to choose the active peer buffer in an
// alternating double buffer scheme. We use buffer 1 if the bits
// match, use buffer 2 if the bits differ, and invert the bit
// after finishing with a buffer.
constexpr uint status_mask = 1 << 1;
// Split peer memory into two sets of buffers
// Note: Each block owns a THREADS_PER_CTA*2*16B chunk of peer
// memory
const int peer_offset1 = block_id * THREADS_PER_CTA * 2 + thread_id;
const int peer_offset2 = peer_offset1 + THREADS_PER_CTA;
volatile int* local_peer1 = reinterpret_cast<volatile int*>(local_peer + peer_offset1);
volatile int* local_peer2 = reinterpret_cast<volatile int*>(local_peer + peer_offset2);
volatile int* remote_peer1 = reinterpret_cast<volatile int*>(remote_peer + peer_offset1);
volatile int* remote_peer2 = reinterpret_cast<volatile int*>(remote_peer + peer_offset2);
// Iterate through tensor entries
const int num_threads = num_blocks * THREADS_PER_CTA;
const int count = dim0 * dim1 * dim2;
for (int i0 = block_id * THREADS_PER_CTA; i0 < count; i0 += num_threads) {
const int i = i0 + thread_id;
const bool has_data = i < count;
// Calculate buffer positions
int data_in_offset, data_out_offset;
if (contiguous) {
data_in_offset = i;
data_out_offset = i;
} else {
const int j2 = i % dim2;
const int k = i / dim2;
const int j1 = k % dim1;
const int j0 = k / dim1;
data_in_offset = j0 * data_in_stride0 + j1 * data_in_stride1 + j2 * data_in_stride2;
data_out_offset = j0 * data_out_stride0 + j1 * data_out_stride1 + j2 * data_out_stride2;
}
// Determine which peer memory buffer to use
// Note: The status bit is not affected by asynchronous
// communication from the remote GPU.
Flit local_message1, local_message2;
asm volatile("ld.volatile.global.v4.u32 {%0,%1,%2,%3}, [%4];" :
"=r"(local_message1.uints[0]),
"=r"(local_message1.uints[1]),
"=r"(local_message1.uints[2]),
"=r"(local_message1.uints[3])
: "l"(local_peer1) : "memory");
asm volatile("ld.volatile.global.v4.u32 {%0,%1,%2,%3}, [%4];" :
"=r"(local_message2.uints[0]),
"=r"(local_message2.uints[1]),
"=r"(local_message2.uints[2]),
"=r"(local_message2.uints[3])
: "l"(local_peer2) : "memory");
const uint status1 = local_message1.uints[3] & status_mask;
const uint status2 = local_message2.uints[3] & status_mask;
const bool peer1_is_active = (status1 ^ status2) == 0;
volatile int* ox = peer1_is_active ? remote_peer1 : remote_peer2;
volatile int* ix = peer1_is_active ? local_peer1 : local_peer2;
const uint status = peer1_is_active ? status1 : status2;
Flit recv_message = peer1_is_active ? local_message1 : local_message2;
// Send flit to remote GPU
// Note: Set communication bit and keep status bit
Flit send_message;
if (has_data) {
send_message.payload = data_in[data_in_offset];
}
send_message.uints[3] = communication_mask | status;
asm volatile("st.volatile.global.v4.u32 [%0], {%1,%2,%3,%4};" ::
"l"(ox),
"r"(send_message.uints[0]),
"r"(send_message.uints[1]),
"r"(send_message.uints[2]),
"r"(send_message.uints[3])
: "memory");
// Recieve flit from peer
while ((recv_message.uints[3] & communication_mask) == 0) {
asm volatile("ld.volatile.global.v4.u32 {%0,%1,%2,%3}, [%4];" :
"=r"(recv_message.uints[0]),
"=r"(recv_message.uints[1]),
"=r"(recv_message.uints[2]),
"=r"(recv_message.uints[3])
: "l"(ix) : "memory");
}
if (has_data) {
data_out[data_out_offset] = recv_message.payload;
}
// Reset semaphore
// Note: Clear communication bit and invert status bit
uint flag = ~status & status_mask;
asm volatile("st.volatile.global.v4.u32 [%0], {%1,%2,%3,%4};" ::
"l"(ix),
"n"(0),
"n"(0),
"n"(0),
"r"(flag)
: "memory");
if (i0 + num_threads < count) {
__threadfence_system();
}
}
}
template<class T, bool contiguous, bool top_zero, bool btm_zero>
#if __CUDA_ARCH__ >= 700
__launch_bounds__(THREADS_PER_CTA)
#endif
__global__ void push_pull_halos_1d_kernel(
// top halo,
T* toh, int toh_stride0, int toh_stride1, int toh_stride2, // top output halo (local)
const T* tih, int tih_stride0, int tih_stride1, int tih_stride2, // top input halo (local)
int4* tox, // top output transfer buffer (remote peer)
int4* tix, // top input transfer buffer (local peer)
// btm halo
T* boh, int boh_stride0, int boh_stride1, int boh_stride2, // btm output halo (local)
const T* bih, int bih_stride0, int bih_stride1, int bih_stride2, // btm input halo (local)
int4* box, // btm output transfer buffer (remote peer)
int4* bix, // btm input transfer buffer (local peer)
// dimensions
int dim0, int dim1, int dim2,
bool top_first // whether to launch communicate top halo first
)
{
const int num_blocks_side = gridDim.x / 2;
const int block_id_side = (blockIdx.x < num_blocks_side
? blockIdx.x
: blockIdx.x - num_blocks_side);
const bool in_top_block = top_first == (blockIdx.x < num_blocks_side);
if (in_top_block) {
if (top_zero) {
zero_tensor<T,contiguous>(
dim0, dim1, dim2,
toh, toh_stride0, toh_stride1, toh_stride2,
threadIdx.x, block_id_side, num_blocks_side);
} else {
push_pull_tensor<T,contiguous>(
dim0, dim1, dim2,
tih, tih_stride0, tih_stride1, tih_stride2,
toh, toh_stride0, toh_stride1, toh_stride2,
tix, tox,
threadIdx.x, block_id_side, num_blocks_side);
}
} else {
if (btm_zero) {
zero_tensor<T,contiguous>(
dim0, dim1, dim2,
boh, boh_stride0, boh_stride1, boh_stride2,
threadIdx.x, block_id_side, num_blocks_side);
} else {
push_pull_tensor<T,contiguous>(
dim0, dim1, dim2,
bih, bih_stride0, bih_stride1, bih_stride2,
boh, boh_stride0, boh_stride1, boh_stride2,
bix, box,
threadIdx.x, block_id_side, num_blocks_side);
}
}
}
__global__ void delay_kernel(int delay_nanoseconds, int* counter)
{
if (blockIdx.x == 0 && threadIdx.x == 0) {
// waste time while doing something compiler can't predict, thus preventing it from optimizing away this code.
int new_counter = 0;
double elapsed = 0;
clock_t start = clock();
do {
clock_t now = clock();
elapsed = (double)(now - start)*1e9 / CLOCKS_PER_SEC;
++new_counter;
} while (elapsed < (double)delay_nanoseconds);
*counter = new_counter;
}
}
}
namespace apex { namespace contrib { namespace peer_memory {
int64_t allocate_raw(int64_t size)
{
float* ptr = 0L;
cudaMalloc(&ptr, size);
cudaMemset(ptr, 0, size);
return (int64_t)ptr;
}
void free_raw(int64_t raw)
{
cudaFree((void*)raw);
}
void zero(int64_t raw, int64_t size)
{
cudaMemset((void*)raw, 0, size);
}
at::Tensor get_raw_ipc_address(int64_t raw)
{
cudaIpcMemHandle_t mem_handle;
CUDACHECK( cudaIpcGetMemHandle(&mem_handle, (void*)raw) );
const int n = sizeof(cudaIpcMemHandle_t);
auto address_tensor = torch::empty({n}, torch::dtype(torch::kUInt8));
auto address_tensor_p = address_tensor.data_ptr<uint8_t>();
memcpy(address_tensor_p, (uint8_t*)&mem_handle, n);
return address_tensor;
}
std::vector<int64_t> get_raw_peers(at::Tensor ipc_addresses, int peer_rank, int64_t raw)
{
int peer_group_size = ipc_addresses.size(0);
std::vector<int64_t> results(peer_group_size);
for (int i = 0; i < peer_group_size; ++i) {
if (i != peer_rank) {
cudaIpcMemHandle_t mem_handle;
memcpy(&mem_handle, ipc_addresses.index({i}).data_ptr<uint8_t>(), sizeof(cudaIpcMemHandle_t));
void* p = 0L;
CUDACHECK( cudaIpcOpenMemHandle((void**)&p, mem_handle, cudaIpcMemLazyEnablePeerAccess) );
results[i] = (int64_t)p;
} else {
results[i] = (int64_t)raw;
}
}
return results;
}
at::Tensor blob_view_half(int64_t raw, std::vector<int64_t> shape, bool channels_last)
{
return blob_view<at::Half>((at::Half*)raw, shape, torch::dtype(torch::kFloat16).device(torch::kCUDA), channels_last);
}
at::Tensor blob_view_float(int64_t raw, std::vector<int64_t> shape, bool channels_last)
{
return blob_view<float>((float*)raw, shape, torch::dtype(torch::kFloat32).device(torch::kCUDA), channels_last);
}
at::Tensor blob_view_int(int64_t raw, std::vector<int64_t> shape, bool channels_last)
{
return blob_view<int>((int*)raw, shape, torch::dtype(torch::kInt32).device(torch::kCUDA), channels_last);
}
void push_pull_halos_1d(
bool diagnostics,
bool explicit_nhwc,
int numSM, // number of SMs to use (zero corresponds to all SMs)
int rank, // rank in spatial parallel group
bool top_zero, // if top halo should be zeroed
at::Tensor top_in_halo, // top input halo buffer (in local device memory, sent to top neighbor)
at::Tensor top_in_transfer, // top input transfer buffer (in local peer memory)
at::Tensor top_out_transfer, // top output transfer buffer (in top neighbor peer memory)
at::Tensor top_out_halo, // top output halo buffer (in local device memory, received from top neighbor)
bool btm_zero, // if btm halo should be zeroed
at::Tensor btm_in_halo, // btm input halo buffer (in local device memory, sent to btm neighbor)
at::Tensor btm_in_transfer, // btm input transfer buffer (in local peer memory)
at::Tensor btm_out_transfer, // btm output transfer buffer (in btm neighbor peer memory)
at::Tensor btm_out_halo // btm output halo buffer (in local device memory, received from btm neighbor)
)
{
// basic checks of inputs
TORCH_CHECK(!(top_zero && btm_zero));
TORCH_CHECK(top_in_halo.is_cuda());
TORCH_CHECK(top_out_transfer.is_cuda());
TORCH_CHECK(top_in_transfer.is_cuda());
TORCH_CHECK(top_out_halo.is_cuda());
TORCH_CHECK(btm_in_halo.is_cuda());
TORCH_CHECK(btm_out_transfer.is_cuda());
TORCH_CHECK(btm_in_transfer.is_cuda());
TORCH_CHECK(btm_out_halo.is_cuda());
// tensor shapes
int tih_N, tih_C, tih_H, tih_W;
tensor_shape(top_in_halo, explicit_nhwc, tih_N, tih_C, tih_H, tih_W);
int toh_N, toh_C, toh_H, toh_W;
tensor_shape(top_out_halo, explicit_nhwc, toh_N, toh_C, toh_H, toh_W);
int bih_N, bih_C, bih_H, bih_W;
tensor_shape(btm_in_halo, explicit_nhwc, bih_N, bih_C, bih_H, bih_W);
int boh_N, boh_C, boh_H, boh_W;
tensor_shape(btm_out_halo, explicit_nhwc, boh_N, boh_C, boh_H, boh_W);
TORCH_CHECK(toh_N == tih_N && tih_N == boh_N && boh_N == bih_N &&
toh_C == tih_C && tih_C == boh_C && boh_C == bih_C &&
toh_H == tih_H && tih_H == boh_H && boh_H == bih_H &&
toh_W == tih_W && tih_W == boh_W && boh_W == bih_W);
int NN=toh_N, NC=toh_C, NH=toh_H, NW=toh_W;
if (diagnostics) {
printf("rank %d: NN=%d, NC=%d, NH=%d, NW=%d\n", rank, NN, NC, NH, NW);
}
TORCH_CHECK(NN == 1);
// tensor strides
int tih_stride_N, tih_stride_C, tih_stride_H, tih_stride_W;
tensor_strides(top_in_halo, explicit_nhwc, tih_stride_N, tih_stride_C, tih_stride_H, tih_stride_W);
int toh_stride_N, toh_stride_C, toh_stride_H, toh_stride_W;
tensor_strides(top_out_halo, explicit_nhwc, toh_stride_N, toh_stride_C, toh_stride_H, toh_stride_W);
int bih_stride_N, bih_stride_C, bih_stride_H, bih_stride_W;
tensor_strides(btm_in_halo, explicit_nhwc, bih_stride_N, bih_stride_C, bih_stride_H, bih_stride_W);
int boh_stride_N, boh_stride_C, boh_stride_H, boh_stride_W;
tensor_strides(btm_out_halo, explicit_nhwc, boh_stride_N, boh_stride_C, boh_stride_H, boh_stride_W);
if (diagnostics) {
printf("rank %d: tih_stride :: N=%d, C=%d, H=%d, W=%d\n",
rank, tih_stride_N, tih_stride_C, tih_stride_H, tih_stride_W);
printf("rank %d: toh_stride :: N=%d, C=%d, H=%d, W=%d\n",
rank, toh_stride_N, toh_stride_C, toh_stride_H, toh_stride_W);
printf("rank %d: bih_stride :: N=%d, C=%d, H=%d, W=%d\n",
rank, bih_stride_N, bih_stride_C, bih_stride_H, bih_stride_W);
printf("rank %d: boh_stride :: N=%d, C=%d, H=%d, W=%d\n",
rank, boh_stride_N, boh_stride_C, boh_stride_H, boh_stride_W);
}
// determine if nhwc
bool is_nhwc = (toh_stride_C == 1);
if (diagnostics) {
printf("rank %d: is_nhwc = %s\n", rank, is_nhwc ? "true" : "false");
}
// determine if contiguous
bool contiguous = true;
if ((NN-1)*toh_stride_N + (NC-1)*toh_stride_C +
(NH-1)*toh_stride_H + (NW-1)*toh_stride_W
!= NN*NC*NH*NW - 1) {
contiguous = false;
}
if ((NN-1)*boh_stride_N + (NC-1)*boh_stride_C +
(NH-1)*boh_stride_H + (NW-1)*boh_stride_W
!= NN*NC*NH*NW - 1) {
contiguous = false;
}
if (!top_zero) {
if (toh_stride_N != tih_stride_N || toh_stride_C != tih_stride_C ||
toh_stride_H != tih_stride_H || toh_stride_W != tih_stride_W) {
contiguous = false;
}
}
if (!btm_zero) {
if (boh_stride_N != bih_stride_N || boh_stride_C != bih_stride_C ||
boh_stride_H != bih_stride_H || boh_stride_W != bih_stride_W) {
contiguous = false;
}
}
if (diagnostics) {
printf("rank %d: contiguous = %s\n", rank, contiguous ? "true" : "false");
}
// determine whether to communicate top halo first
bool top_first = rank % 2 != 0;
if (diagnostics) {
printf("rank %d: top_first = %s\n", rank, top_first ? "true" : "false");
}
// peer memory buffers
int tox_size = top_out_transfer.numel() * top_out_transfer.element_size();
int tix_size = top_in_transfer.numel() * top_in_transfer.element_size();
int box_size = btm_out_transfer.numel() * btm_out_transfer.element_size();
int bix_size = btm_in_transfer.numel() * btm_in_transfer.element_size();
if (!top_zero) {
TORCH_CHECK(top_out_transfer.is_contiguous());
TORCH_CHECK(top_in_transfer.is_contiguous());
TORCH_CHECK(tox_size == tix_size);
}
if (!btm_zero) {
TORCH_CHECK(btm_out_transfer.is_contiguous());
TORCH_CHECK(btm_in_transfer.is_contiguous());
TORCH_CHECK(box_size == bix_size);
}
// figure out launch parameters
int device;
cudaGetDevice(&device);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, device);
if (numSM <= 0 || numSM > prop.multiProcessorCount) {
numSM = prop.multiProcessorCount;
}
auto current_stream = at::cuda::getCurrentCUDAStream();
dim3 block(THREADS_PER_CTA, 1, 1);
// helper macros to launch templated kernel
#define LAUNCH_PUSH_PULL_HALO_KERNEL_BASE(T, CONTIGUOUS, TOP_ZERO, BTM_ZERO, KERNEL_ARGS, NUM_ELEMENTS) \
do { \
/* kernel configuration */ \
int numBlocksPerSm; \
cudaOccupancyMaxActiveBlocksPerMultiprocessor( \
&numBlocksPerSm, \
push_pull_halos_1d_kernel<T,CONTIGUOUS,TOP_ZERO,BTM_ZERO>, \
THREADS_PER_CTA, \
0); \
dim3 grid(numSM*numBlocksPerSm,1,1); \
if (grid.x % 2 != 0) { \
/* require even number of blocks (half for top, half for bottom) */ \
grid.x -= 1; \
} \
if ((grid.x / 2) * THREADS_PER_CTA > NUM_ELEMENTS) { \
/* only need enough blocks to cover top and bottom halo elements */ \
grid.x = 2 * ((NUM_ELEMENTS + THREADS_PER_CTA - 1) / THREADS_PER_CTA); \
} \
if (!TOP_ZERO) { \
/* require 2*128b=32B peer memory per thread */ \
if ((grid.x / 2) * THREADS_PER_CTA * 32 > tox_size) { \
grid.x = 2 * (tox_size / (THREADS_PER_CTA * 32)); \
} \
} \
if (!BTM_ZERO) { \
/* require 2*128b=32B peer memory per thread */ \
if ((grid.x / 2) * THREADS_PER_CTA * 32 > box_size) { \
grid.x = 2 * (box_size / (THREADS_PER_CTA * 32)); \
} \
} \
TORCH_CHECK(grid.x >= 2); \
\
/* launch kernel */ \
cudaLaunchCooperativeKernel( \
(void*)push_pull_halos_1d_kernel<T,CONTIGUOUS,TOP_ZERO,BTM_ZERO>, \
grid, \
block, \
KERNEL_ARGS, \
0, \
current_stream); \
} while (false)
#define LAUNCH_PUSH_PULL_HALO_KERNEL(T, CONTIGUOUS, KERNEL_ARGS, NUM_ELEMENTS) \
do { \
if (top_zero) { \
LAUNCH_PUSH_PULL_HALO_KERNEL_BASE(T, CONTIGUOUS, true, false, KERNEL_ARGS, NUM_ELEMENTS); \
} else if (btm_zero) { \
LAUNCH_PUSH_PULL_HALO_KERNEL_BASE(T, CONTIGUOUS, false, true, KERNEL_ARGS, NUM_ELEMENTS); \
} else { \
LAUNCH_PUSH_PULL_HALO_KERNEL_BASE(T, CONTIGUOUS, false, false, KERNEL_ARGS, NUM_ELEMENTS); \
} \
} while (false)
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, top_out_halo.scalar_type(), "push_pull_halos_1d_kernel", [&]{
if (diagnostics) {
printf("rank %d: size(scalar_t) = %ld\n", rank, sizeof(scalar_t));
}
scalar_t* toh_p = top_out_halo.data_ptr<scalar_t>();
scalar_t* tih_p = top_in_halo.data_ptr<scalar_t>();
int4* tox_p = reinterpret_cast<int4*>(top_out_transfer.data_ptr<scalar_t>());
int4* tix_p = reinterpret_cast<int4*>(top_in_transfer.data_ptr<scalar_t>());
scalar_t* boh_p = btm_out_halo.data_ptr<scalar_t>();
scalar_t* bih_p = btm_in_halo.data_ptr<scalar_t>();
int4* box_p = reinterpret_cast<int4*>(btm_out_transfer.data_ptr<scalar_t>());
int4* bix_p = reinterpret_cast<int4*>(btm_in_transfer.data_ptr<scalar_t>());
if (diagnostics) printf("rank %d: choosing halo exchange kernel\n", rank);
// do int2 vector loads if channel count permits
if (contiguous &&
(NN*NH*NW*NC * sizeof(scalar_t)) % sizeof(int2) == 0) {
// can do contiguous int2 transfers
if (diagnostics) {
}
toh_stride_N = toh_stride_H = toh_stride_W = toh_stride_C = 1;
tih_stride_N = tih_stride_H = tih_stride_W = tih_stride_C = 1;
boh_stride_N = boh_stride_H = boh_stride_W = boh_stride_C = 1;
bih_stride_N = bih_stride_H = bih_stride_W = bih_stride_C = 1;
NC = (NN*NH*NW*NC * sizeof(scalar_t)) / sizeof(int2);
NN = NH = NW = 1;
if (diagnostics) {
printf("rank %d: launching contiguous int2 halo exchange kernel\n",
rank);
printf("rank %d: NC=%d, NH=%d, NW=%d\n", rank, NC, NH, NW);
}
void *kernel_args[] = {
(int2**)&toh_p, &toh_stride_H, &toh_stride_W, &toh_stride_C,
(int2**)&tih_p, &tih_stride_H, &tih_stride_W, &tih_stride_C,
&tox_p, &tix_p,
(int2**)&boh_p, &boh_stride_H, &boh_stride_W, &boh_stride_C,
(int2**)&bih_p, &bih_stride_H, &bih_stride_W, &bih_stride_C,
&box_p, &bix_p,
&NH, &NW, &NC,
&top_first
};
int num_elem = NN*NH*NW*NC;
LAUNCH_PUSH_PULL_HALO_KERNEL(int2, true, kernel_args, num_elem);
} else if (is_nhwc && (NC * sizeof(scalar_t)) % sizeof(int2) == 0) {
// can do strided int2 transfers
int divisor = sizeof(int2) / sizeof(scalar_t);
if (diagnostics) {
printf("rank %d: launching strided int2 halo exchange kernel\n",
rank);
}
toh_stride_N /= divisor; toh_stride_H /= divisor; toh_stride_W /= divisor;
tih_stride_N /= divisor; tih_stride_H /= divisor; tih_stride_W /= divisor;
boh_stride_N /= divisor; boh_stride_H /= divisor; boh_stride_W /= divisor;
bih_stride_N /= divisor; bih_stride_H /= divisor; bih_stride_W /= divisor;
NC /= divisor;
if (diagnostics) {
printf("rank %d: divisor=%d\n", rank, divisor);
printf("rank %d: tih_stride :: N=%d, C=%d, H=%d, W=%d\n",
rank, tih_stride_N, tih_stride_C, tih_stride_H, tih_stride_W);
printf("rank %d: toh_stride :: N=%d, C=%d, H=%d, W=%d\n",
rank, toh_stride_N, toh_stride_C, toh_stride_H, toh_stride_W);
printf("rank %d: bih_stride :: N=%d, C=%d, H=%d, W=%d\n",
rank, bih_stride_N, bih_stride_C, bih_stride_H, bih_stride_W);
printf("rank %d: boh_stride :: N=%d, C=%d, H=%d, W=%d\n",
rank, boh_stride_N, boh_stride_C, boh_stride_H, boh_stride_W);
printf("rank %d: NC=%d, NH=%d, NW=%d\n", rank, NC, NH, NW);
}
void *kernel_args[] = {
(int2**)&toh_p, &toh_stride_H, &toh_stride_W, &toh_stride_C,
(int2**)&tih_p, &tih_stride_H, &tih_stride_W, &tih_stride_C,
&tox_p, &tix_p,
(int2**)&boh_p, &boh_stride_H, &boh_stride_W, &boh_stride_C,
(int2**)&bih_p, &bih_stride_H, &bih_stride_W, &bih_stride_C,
&box_p, &bix_p,
&NH, &NW, &NC,
&top_first
};
int num_elem = NH*NW*NC;
LAUNCH_PUSH_PULL_HALO_KERNEL(int2, false, kernel_args, num_elem);
} else {
// cannot do int2 transfers
if (diagnostics) {
printf("rank %d: launching non-int2 halo exchange kernel\n",
rank);
}
int num_elem = NC*NH*NW;
if (is_nhwc) {
void *kernel_args[] = {
&toh_p, &toh_stride_H, &toh_stride_W, &toh_stride_C,
&tih_p, &tih_stride_H, &tih_stride_W, &tih_stride_C,
&tox_p, &tix_p,
&boh_p, &boh_stride_H, &boh_stride_W, &boh_stride_C,
&bih_p, &bih_stride_H, &bih_stride_W, &bih_stride_C,
&box_p, &bix_p,
&NH, &NW, &NC,
&top_first
};
LAUNCH_PUSH_PULL_HALO_KERNEL(scalar_t, false, kernel_args, num_elem);
} else {
void *kernel_args[] = {
&toh_p, &toh_stride_C, &toh_stride_H, &toh_stride_W,
&tih_p, &tih_stride_C, &tih_stride_H, &tih_stride_W,
&tox_p, &tix_p,
&boh_p, &boh_stride_C, &boh_stride_H, &boh_stride_W,
&bih_p, &bih_stride_C, &bih_stride_H, &bih_stride_W,
&box_p, &bix_p,
&NC, &NH, &NW,
&top_first
};
LAUNCH_PUSH_PULL_HALO_KERNEL(scalar_t, false, kernel_args, num_elem);
}
}
} );
#undef LAUNCH_PUSH_PULL_HALO_KERNEL_BASE
#undef LAUNCH_PUSH_PULL_HALO_KERNEL
}
} } }
|
6f2fa20275a813e7154ace09981c793a02d70fe5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/structs/structs_column_view.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/types.hpp>
#include <thrust/optional.h>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
namespace cudf {
namespace detail {
namespace {
/**
* @brief Struct which contains per-column information necessary to
* traverse a column hierarchy on the gpu.
*
* When `row_bit_count` is called, the input column hierarchy is flattened into a
* vector of column_device_views. For each one of them, we store a column_info
* struct. The `depth` field represents the depth of the column in the original
* hierarchy.
*
* As we traverse the hierarchy for each input row, we maintain a span representing
* the start and end rows for the current nesting depth. At depth 0, this span is
* always just 1 row. As we cross list boundaries int the hierarchy, this span
* grows. So for each column we visit we always know how many rows of it are relevant
* and can compute it's contribution to the overall size.
*
* An example using a list<list<int>> column, computing the size of row 1.
*
* { {{1, 2}, {3, 4}, {5, 6}}, {{7}, {8, 9, 10}, {11, 12, 13, 14}} }
*
* L0 = List<List<int32_t>>:
* Length : 2
* Offsets : 0, 3, 6
* L1 = List<int32_t>:
* Length : 6
* Offsets : 0, 2, 4, 6, 7, 10, 14
* I = 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
*
*
* span0 = [1, 2] row 1 is represented by the span [1, 2]
* span1 = [L0.offsets[span0[0]], L0.offsets[span0[1]]] expand by the offsets of L0
* span1 = [3, 6] span applied to children of L0
* span2 = [L1.offsets[span1[0]], L1.offsets[span1[1]]] expand by the offsets of L1
* span2 = [6, 14] span applied to children of L1
*
* The total size of our row is computed as:
* (span0[1] - span0[0]) * sizeof(int) the cost of the offsets for L0
* +
* (span1[1] - span1[0]) * sizeof(int) the cost of the offsets for L1
* +
* (span2[1] - span2[0]) * sizeof(int) the cost of the integers in I
*
* `depth` represents our depth in the source column hierarchy.
*
* "branches" within the spans can occur when we have lists inside of structs.
* consider a case where we are entering a struct<list, float> with a span of [4, 8].
* The internal list column will change that span to something else, say [5, 9].
* But when we finish processing the list column, the final float column wants to
* go back and use the original span [4, 8].
*
* [4, 8] [5, 9] [4, 8]
* struct< list<> float>
*
* To accomplish this we maintain a stack of spans. Pushing the current span
* whenever we enter a branch, and popping a span whenever we leave a branch.
*
* `branch_depth_start` represents the branch depth as we reach a new column.
* if `branch_depth_start` is < the last branch depth we saw, we are returning
* from a branch and should pop off the stack.
*
* `branch_depth_end` represents the new branch depth caused by this column.
* if branch_depth_end > branch_depth_start, we are branching and need to
* push the current span on the stack.
*
*/
struct column_info {
size_type depth;
size_type branch_depth_start;
size_type branch_depth_end;
};
/**
* @brief Struct which contains hierarchy information precomputed on the host.
*
* If the input data contains only fixed-width types, this preprocess step
* produces the value `simple_per_row_size` which is a constant for every
* row in the output. We can use this value and skip the more complicated
* processing for lists, structs and strings entirely if `complex_type_count`
* is 0.
*
*/
struct hierarchy_info {
hierarchy_info() : simple_per_row_size(0), complex_type_count(0), max_branch_depth(0) {}
// These two fields act as an optimization. If we find that the entire table
// is just fixed-width types, we do not need to do the more expensive kernel call that
// traverses the individual columns. So if complex_type_count is 0, we can just
// return a column where every row contains the value simple_per_row_size
size_type simple_per_row_size; // in bits
size_type complex_type_count;
// max depth of span branches present in the hierarchy.
size_type max_branch_depth;
};
/**
* @brief Function which flattens the incoming column hierarchy into a vector
* of column_views and produces accompanying column_info and hierarchy_info
* metadata.
*
* @param begin: Beginning of a range of column views
* @param end: End of a range of column views
* @param out: (output) Flattened vector of output column_views
* @param info: (output) Additional per-output column_view metadata needed by the gpu
* @param h_info: (output) Information about the hierarchy
* @param cur_depth: Current absolute depth in the hierarchy
* @param cur_branch_depth: Current branch depth
* @param parent_index: Index into `out` representing our owning parent column
*/
template <typename ColIter>
void flatten_hierarchy(ColIter begin,
ColIter end,
std::vector<cudf::column_view>& out,
std::vector<column_info>& info,
hierarchy_info& h_info,
rmm::cuda_stream_view stream,
size_type cur_depth = 0,
size_type cur_branch_depth = 0,
thrust::optional<int> parent_index = {});
/**
* @brief Type-dispatched functor called by flatten_hierarchy.
*
*/
struct flatten_functor {
rmm::cuda_stream_view stream;
// fixed width
template <typename T, std::enable_if_t<cudf::is_fixed_width<T>()>* = nullptr>
void operator()(column_view const& col,
std::vector<cudf::column_view>& out,
std::vector<column_info>& info,
hierarchy_info& h_info,
rmm::cuda_stream_view,
size_type cur_depth,
size_type cur_branch_depth,
thrust::optional<int>)
{
out.push_back(col);
info.push_back({cur_depth, cur_branch_depth, cur_branch_depth});
h_info.simple_per_row_size +=
(sizeof(device_storage_type_t<T>) * CHAR_BIT) + (col.nullable() ? 1 : 0);
}
// strings
template <typename T, std::enable_if_t<std::is_same_v<T, string_view>>* = nullptr>
void operator()(column_view const& col,
std::vector<cudf::column_view>& out,
std::vector<column_info>& info,
hierarchy_info& h_info,
rmm::cuda_stream_view,
size_type cur_depth,
size_type cur_branch_depth,
thrust::optional<int>)
{
out.push_back(col);
info.push_back({cur_depth, cur_branch_depth, cur_branch_depth});
h_info.complex_type_count++;
}
// lists
template <typename T, std::enable_if_t<std::is_same_v<T, list_view>>* = nullptr>
void operator()(column_view const& col,
std::vector<cudf::column_view>& out,
std::vector<column_info>& info,
hierarchy_info& h_info,
rmm::cuda_stream_view stream,
size_type cur_depth,
size_type cur_branch_depth,
thrust::optional<int> parent_index)
{
// track branch depth as we reach this list and after we pass it
size_type const branch_depth_start = cur_branch_depth;
auto const is_list_inside_struct =
parent_index && out[parent_index.value()].type().id() == type_id::STRUCT;
if (is_list_inside_struct) {
cur_branch_depth++;
h_info.max_branch_depth = max(h_info.max_branch_depth, cur_branch_depth);
}
size_type const branch_depth_end = cur_branch_depth;
out.push_back(col);
info.push_back({cur_depth, branch_depth_start, branch_depth_end});
lists_column_view lcv(col);
auto iter = cudf::detail::make_counting_transform_iterator(
0, [col = lcv.get_sliced_child(stream)](auto) { return col; });
h_info.complex_type_count++;
flatten_hierarchy(
iter, iter + 1, out, info, h_info, stream, cur_depth + 1, cur_branch_depth, out.size() - 1);
}
// structs
template <typename T, std::enable_if_t<std::is_same_v<T, struct_view>>* = nullptr>
void operator()(column_view const& col,
std::vector<cudf::column_view>& out,
std::vector<column_info>& info,
hierarchy_info& h_info,
rmm::cuda_stream_view stream,
size_type cur_depth,
size_type cur_branch_depth,
thrust::optional<int>)
{
out.push_back(col);
info.push_back({cur_depth, cur_branch_depth, cur_branch_depth});
h_info.simple_per_row_size += col.nullable() ? 1 : 0;
structs_column_view scv(col);
auto iter = cudf::detail::make_counting_transform_iterator(
0, [&scv](auto i) { return scv.get_sliced_child(i); });
flatten_hierarchy(iter,
iter + scv.num_children(),
out,
info,
h_info,
stream,
cur_depth + 1,
cur_branch_depth,
out.size() - 1);
}
// everything else
template <typename T, typename... Args>
std::enable_if_t<!cudf::is_fixed_width<T>() && !std::is_same_v<T, string_view> &&
!std::is_same_v<T, list_view> && !std::is_same_v<T, struct_view>,
void>
operator()(Args&&...)
{
CUDF_FAIL("Unsupported column type in row_bit_count");
}
};
template <typename ColIter>
void flatten_hierarchy(ColIter begin,
ColIter end,
std::vector<cudf::column_view>& out,
std::vector<column_info>& info,
hierarchy_info& h_info,
rmm::cuda_stream_view stream,
size_type cur_depth,
size_type cur_branch_depth,
thrust::optional<int> parent_index)
{
std::for_each(begin, end, [&](column_view const& col) {
cudf::type_dispatcher(col.type(),
flatten_functor{stream},
col,
out,
info,
h_info,
stream,
cur_depth,
cur_branch_depth,
parent_index);
});
}
/**
* @brief Struct representing a span of rows.
*
*/
struct row_span {
size_type row_start, row_end;
};
/**
* @brief Functor for computing the size, in bits, of a `row_span` of rows for a given
* `column_device_view`
*
*/
struct row_size_functor {
/**
* @brief Computes size in bits of a span of rows in a fixed-width column.
*
* Computed as : ((# of rows) * sizeof(data type) * 8)
* +
* 1 bit per row for validity if applicable.
*/
template <typename T>
__device__ size_type operator()(column_device_view const& col, row_span const& span)
{
auto const num_rows{span.row_end - span.row_start};
auto const element_size = sizeof(device_storage_type_t<T>) * CHAR_BIT;
auto const validity_size = col.nullable() ? 1 : 0;
return (element_size + validity_size) * num_rows;
}
};
/**
* @brief Computes size in bits of a span of rows in a strings column.
*
* Computed as : ((# of rows) * sizeof(offset) * 8) + (total # of characters * 8))
* +
* 1 bit per row for validity if applicable.
*/
template <>
__device__ size_type row_size_functor::operator()<string_view>(column_device_view const& col,
row_span const& span)
{
auto const num_rows{span.row_end - span.row_start};
if (num_rows == 0) {
// For empty columns, the `span` cannot have a row size.
return 0;
}
auto const& offsets = col.child(strings_column_view::offsets_column_index);
auto const row_start{span.row_start + col.offset()};
auto const row_end{span.row_end + col.offset()};
if (row_start == row_end) {
// Empty row contributes 0 bits to row_bit_count().
// Note: Validity bit doesn't count either. There are no rows in the child column
// corresponding to this span.
return 0;
}
auto const offsets_size = sizeof(offset_type) * CHAR_BIT;
auto const validity_size = col.nullable() ? 1 : 0;
auto const chars_size =
(offsets.data<offset_type>()[row_end] - offsets.data<offset_type>()[row_start]) * CHAR_BIT;
return ((offsets_size + validity_size) * num_rows) + chars_size;
}
/**
* @brief Computes size in bits of a span of rows in a list column.
*
* Computed as : ((# of rows) * sizeof(offset) * 8)
* +
* 1 bit per row for validity if applicable.
*/
template <>
__device__ size_type row_size_functor::operator()<list_view>(column_device_view const& col,
row_span const& span)
{
auto const num_rows{span.row_end - span.row_start};
auto const offsets_size = sizeof(offset_type) * CHAR_BIT;
auto const validity_size = col.nullable() ? 1 : 0;
return (offsets_size + validity_size) * num_rows;
}
/**
* @brief Computes size in bits of a span of rows in a struct column.
*
* Computed as : 1 bit per row for validity if applicable.
*/
template <>
__device__ size_type row_size_functor::operator()<struct_view>(column_device_view const& col,
row_span const& span)
{
auto const num_rows{span.row_end - span.row_start};
return (col.nullable() ? 1 : 0) * num_rows; // cost of validity
}
/**
* @brief Kernel for computing per-row sizes in bits.
*
* @param cols An span of column_device_views representing a column hierarchy
* @param info An span of column_info structs corresponding the elements in `cols`
* @param output Output span of size (# rows) where per-row bit sizes are stored
* @param max_branch_depth Maximum depth of the span stack needed per-thread
*/
__global__ void compute_row_sizes(device_span<column_device_view const> cols,
device_span<column_info const> info,
device_span<size_type> output,
size_type max_branch_depth)
{
extern __shared__ row_span thread_branch_stacks[];
int const tid = threadIdx.x + blockIdx.x * blockDim.x;
auto const num_rows = output.size();
if (tid >= num_rows) { return; }
// branch stack. points to the last list prior to branching.
row_span* my_branch_stack = thread_branch_stacks + (threadIdx.x * max_branch_depth);
size_type branch_depth{0};
// current row span - always starts at 1 row.
row_span cur_span{tid, tid + 1};
// output size
size_type& size = output[tid];
size = 0;
size_type last_branch_depth{0};
for (size_type idx = 0; idx < cols.size(); idx++) {
column_device_view const& col = cols[idx];
// if we've returned from a branch
if (info[idx].branch_depth_start < last_branch_depth) {
cur_span = my_branch_stack[--branch_depth];
}
// if we're entering a new branch.
// NOTE: this case can happen (a pop and a push by the same column)
// when we have a struct<list, list>
if (info[idx].branch_depth_end > info[idx].branch_depth_start) {
my_branch_stack[branch_depth++] = cur_span;
}
// if we're back at depth 0, this is a new top-level column, so reset
// span info
if (info[idx].depth == 0) {
branch_depth = 0;
last_branch_depth = 0;
cur_span = row_span{tid, tid + 1};
}
// add the contributing size of this row
size += cudf::type_dispatcher(col.type(), row_size_functor{}, col, cur_span);
// if this is a list column, update the working span from our offsets
if (col.type().id() == type_id::LIST && col.size() > 0) {
column_device_view const& offsets = col.child(lists_column_view::offsets_column_index);
auto const base_offset = offsets.data<offset_type>()[col.offset()];
cur_span.row_start =
offsets.data<offset_type>()[cur_span.row_start + col.offset()] - base_offset;
cur_span.row_end = offsets.data<offset_type>()[cur_span.row_end + col.offset()] - base_offset;
}
last_branch_depth = info[idx].branch_depth_end;
}
}
} // anonymous namespace
/**
* @copydoc cudf::detail::row_bit_count
*
*/
std::unique_ptr<column> row_bit_count(table_view const& t,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// no rows
if (t.num_rows() <= 0) { return cudf::make_empty_column(data_type{type_id::INT32}); }
// flatten the hierarchy and determine some information about it.
std::vector<cudf::column_view> cols;
std::vector<column_info> info;
hierarchy_info h_info;
flatten_hierarchy(t.begin(), t.end(), cols, info, h_info, stream);
CUDF_EXPECTS(info.size() == cols.size(), "Size/info mismatch");
// create output buffer and view
auto output = cudf::make_fixed_width_column(
data_type{type_id::INT32}, t.num_rows(), mask_state::UNALLOCATED, stream, mr);
mutable_column_view mcv = output->mutable_view();
// simple case. if we have no complex types (lists, strings, etc), the per-row size is already
// trivially computed
if (h_info.complex_type_count <= 0) {
thrust::fill(rmm::exec_policy(stream),
mcv.begin<size_type>(),
mcv.end<size_type>(),
h_info.simple_per_row_size);
return output;
}
// create a contiguous block of column_device_views
auto d_cols = contiguous_copy_column_device_views<column_device_view>(cols, stream);
// move stack info to the gpu
rmm::device_uvector<column_info> d_info(info.size(), stream);
CUDA_TRY(hipMemcpyAsync(d_info.data(),
info.data(),
sizeof(column_info) * info.size(),
hipMemcpyHostToDevice,
stream.value()));
// each thread needs to maintain a stack of row spans of size max_branch_depth. we will use
// shared memory to do this rather than allocating a potentially gigantic temporary buffer
// of memory of size (# input rows * sizeof(row_span) * max_branch_depth).
auto const shmem_per_thread = sizeof(row_span) * h_info.max_branch_depth;
int device_id;
CUDA_TRY(hipGetDevice(&device_id));
int shmem_limit_per_block;
CUDA_TRY(
hipDeviceGetAttribute(&shmem_limit_per_block, hipDeviceAttributeMaxSharedMemoryPerBlock, device_id));
constexpr int max_block_size = 256;
auto const block_size =
shmem_per_thread != 0
? ::min(max_block_size, shmem_limit_per_block / static_cast<int>(shmem_per_thread))
: max_block_size;
auto const shared_mem_size = shmem_per_thread * block_size;
// should we be aborting if we reach some extremely small block size, or just if we hit 0?
CUDF_EXPECTS(block_size > 0, "Encountered a column hierarchy too complex for row_bit_count");
cudf::detail::grid_1d grid{t.num_rows(), block_size, 1};
hipLaunchKernelGGL(( compute_row_sizes), dim3(grid.num_blocks), dim3(block_size), shared_mem_size, stream.value(),
{std::get<1>(d_cols), cols.size()},
{d_info.data(), info.size()},
{mcv.data<size_type>(), static_cast<std::size_t>(t.num_rows())},
h_info.max_branch_depth);
return output;
}
} // namespace detail
/**
* @copydoc cudf::row_bit_count
*
*/
std::unique_ptr<column> row_bit_count(table_view const& t, rmm::mr::device_memory_resource* mr)
{
return detail::row_bit_count(t, rmm::cuda_stream_default, mr);
}
} // namespace cudf
| 6f2fa20275a813e7154ace09981c793a02d70fe5.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/structs/structs_column_view.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/types.hpp>
#include <thrust/optional.h>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
namespace cudf {
namespace detail {
namespace {
/**
* @brief Struct which contains per-column information necessary to
* traverse a column hierarchy on the gpu.
*
* When `row_bit_count` is called, the input column hierarchy is flattened into a
* vector of column_device_views. For each one of them, we store a column_info
* struct. The `depth` field represents the depth of the column in the original
* hierarchy.
*
* As we traverse the hierarchy for each input row, we maintain a span representing
* the start and end rows for the current nesting depth. At depth 0, this span is
* always just 1 row. As we cross list boundaries int the hierarchy, this span
* grows. So for each column we visit we always know how many rows of it are relevant
* and can compute it's contribution to the overall size.
*
* An example using a list<list<int>> column, computing the size of row 1.
*
* { {{1, 2}, {3, 4}, {5, 6}}, {{7}, {8, 9, 10}, {11, 12, 13, 14}} }
*
* L0 = List<List<int32_t>>:
* Length : 2
* Offsets : 0, 3, 6
* L1 = List<int32_t>:
* Length : 6
* Offsets : 0, 2, 4, 6, 7, 10, 14
* I = 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
*
*
* span0 = [1, 2] row 1 is represented by the span [1, 2]
* span1 = [L0.offsets[span0[0]], L0.offsets[span0[1]]] expand by the offsets of L0
* span1 = [3, 6] span applied to children of L0
* span2 = [L1.offsets[span1[0]], L1.offsets[span1[1]]] expand by the offsets of L1
* span2 = [6, 14] span applied to children of L1
*
* The total size of our row is computed as:
* (span0[1] - span0[0]) * sizeof(int) the cost of the offsets for L0
* +
* (span1[1] - span1[0]) * sizeof(int) the cost of the offsets for L1
* +
* (span2[1] - span2[0]) * sizeof(int) the cost of the integers in I
*
* `depth` represents our depth in the source column hierarchy.
*
* "branches" within the spans can occur when we have lists inside of structs.
* consider a case where we are entering a struct<list, float> with a span of [4, 8].
* The internal list column will change that span to something else, say [5, 9].
* But when we finish processing the list column, the final float column wants to
* go back and use the original span [4, 8].
*
* [4, 8] [5, 9] [4, 8]
* struct< list<> float>
*
* To accomplish this we maintain a stack of spans. Pushing the current span
* whenever we enter a branch, and popping a span whenever we leave a branch.
*
* `branch_depth_start` represents the branch depth as we reach a new column.
* if `branch_depth_start` is < the last branch depth we saw, we are returning
* from a branch and should pop off the stack.
*
* `branch_depth_end` represents the new branch depth caused by this column.
* if branch_depth_end > branch_depth_start, we are branching and need to
* push the current span on the stack.
*
*/
struct column_info {
size_type depth;
size_type branch_depth_start;
size_type branch_depth_end;
};
/**
* @brief Struct which contains hierarchy information precomputed on the host.
*
* If the input data contains only fixed-width types, this preprocess step
* produces the value `simple_per_row_size` which is a constant for every
* row in the output. We can use this value and skip the more complicated
* processing for lists, structs and strings entirely if `complex_type_count`
* is 0.
*
*/
struct hierarchy_info {
hierarchy_info() : simple_per_row_size(0), complex_type_count(0), max_branch_depth(0) {}
// These two fields act as an optimization. If we find that the entire table
// is just fixed-width types, we do not need to do the more expensive kernel call that
// traverses the individual columns. So if complex_type_count is 0, we can just
// return a column where every row contains the value simple_per_row_size
size_type simple_per_row_size; // in bits
size_type complex_type_count;
// max depth of span branches present in the hierarchy.
size_type max_branch_depth;
};
/**
* @brief Function which flattens the incoming column hierarchy into a vector
* of column_views and produces accompanying column_info and hierarchy_info
* metadata.
*
* @param begin: Beginning of a range of column views
* @param end: End of a range of column views
* @param out: (output) Flattened vector of output column_views
* @param info: (output) Additional per-output column_view metadata needed by the gpu
* @param h_info: (output) Information about the hierarchy
* @param cur_depth: Current absolute depth in the hierarchy
* @param cur_branch_depth: Current branch depth
* @param parent_index: Index into `out` representing our owning parent column
*/
template <typename ColIter>
void flatten_hierarchy(ColIter begin,
ColIter end,
std::vector<cudf::column_view>& out,
std::vector<column_info>& info,
hierarchy_info& h_info,
rmm::cuda_stream_view stream,
size_type cur_depth = 0,
size_type cur_branch_depth = 0,
thrust::optional<int> parent_index = {});
/**
* @brief Type-dispatched functor called by flatten_hierarchy.
*
*/
struct flatten_functor {
rmm::cuda_stream_view stream;
// fixed width
template <typename T, std::enable_if_t<cudf::is_fixed_width<T>()>* = nullptr>
void operator()(column_view const& col,
std::vector<cudf::column_view>& out,
std::vector<column_info>& info,
hierarchy_info& h_info,
rmm::cuda_stream_view,
size_type cur_depth,
size_type cur_branch_depth,
thrust::optional<int>)
{
out.push_back(col);
info.push_back({cur_depth, cur_branch_depth, cur_branch_depth});
h_info.simple_per_row_size +=
(sizeof(device_storage_type_t<T>) * CHAR_BIT) + (col.nullable() ? 1 : 0);
}
// strings
template <typename T, std::enable_if_t<std::is_same_v<T, string_view>>* = nullptr>
void operator()(column_view const& col,
std::vector<cudf::column_view>& out,
std::vector<column_info>& info,
hierarchy_info& h_info,
rmm::cuda_stream_view,
size_type cur_depth,
size_type cur_branch_depth,
thrust::optional<int>)
{
out.push_back(col);
info.push_back({cur_depth, cur_branch_depth, cur_branch_depth});
h_info.complex_type_count++;
}
// lists
template <typename T, std::enable_if_t<std::is_same_v<T, list_view>>* = nullptr>
void operator()(column_view const& col,
std::vector<cudf::column_view>& out,
std::vector<column_info>& info,
hierarchy_info& h_info,
rmm::cuda_stream_view stream,
size_type cur_depth,
size_type cur_branch_depth,
thrust::optional<int> parent_index)
{
// track branch depth as we reach this list and after we pass it
size_type const branch_depth_start = cur_branch_depth;
auto const is_list_inside_struct =
parent_index && out[parent_index.value()].type().id() == type_id::STRUCT;
if (is_list_inside_struct) {
cur_branch_depth++;
h_info.max_branch_depth = max(h_info.max_branch_depth, cur_branch_depth);
}
size_type const branch_depth_end = cur_branch_depth;
out.push_back(col);
info.push_back({cur_depth, branch_depth_start, branch_depth_end});
lists_column_view lcv(col);
auto iter = cudf::detail::make_counting_transform_iterator(
0, [col = lcv.get_sliced_child(stream)](auto) { return col; });
h_info.complex_type_count++;
flatten_hierarchy(
iter, iter + 1, out, info, h_info, stream, cur_depth + 1, cur_branch_depth, out.size() - 1);
}
// structs
template <typename T, std::enable_if_t<std::is_same_v<T, struct_view>>* = nullptr>
void operator()(column_view const& col,
std::vector<cudf::column_view>& out,
std::vector<column_info>& info,
hierarchy_info& h_info,
rmm::cuda_stream_view stream,
size_type cur_depth,
size_type cur_branch_depth,
thrust::optional<int>)
{
out.push_back(col);
info.push_back({cur_depth, cur_branch_depth, cur_branch_depth});
h_info.simple_per_row_size += col.nullable() ? 1 : 0;
structs_column_view scv(col);
auto iter = cudf::detail::make_counting_transform_iterator(
0, [&scv](auto i) { return scv.get_sliced_child(i); });
flatten_hierarchy(iter,
iter + scv.num_children(),
out,
info,
h_info,
stream,
cur_depth + 1,
cur_branch_depth,
out.size() - 1);
}
// everything else
template <typename T, typename... Args>
std::enable_if_t<!cudf::is_fixed_width<T>() && !std::is_same_v<T, string_view> &&
!std::is_same_v<T, list_view> && !std::is_same_v<T, struct_view>,
void>
operator()(Args&&...)
{
CUDF_FAIL("Unsupported column type in row_bit_count");
}
};
template <typename ColIter>
void flatten_hierarchy(ColIter begin,
ColIter end,
std::vector<cudf::column_view>& out,
std::vector<column_info>& info,
hierarchy_info& h_info,
rmm::cuda_stream_view stream,
size_type cur_depth,
size_type cur_branch_depth,
thrust::optional<int> parent_index)
{
std::for_each(begin, end, [&](column_view const& col) {
cudf::type_dispatcher(col.type(),
flatten_functor{stream},
col,
out,
info,
h_info,
stream,
cur_depth,
cur_branch_depth,
parent_index);
});
}
/**
* @brief Struct representing a span of rows.
*
*/
struct row_span {
size_type row_start, row_end;
};
/**
* @brief Functor for computing the size, in bits, of a `row_span` of rows for a given
* `column_device_view`
*
*/
struct row_size_functor {
/**
* @brief Computes size in bits of a span of rows in a fixed-width column.
*
* Computed as : ((# of rows) * sizeof(data type) * 8)
* +
* 1 bit per row for validity if applicable.
*/
template <typename T>
__device__ size_type operator()(column_device_view const& col, row_span const& span)
{
auto const num_rows{span.row_end - span.row_start};
auto const element_size = sizeof(device_storage_type_t<T>) * CHAR_BIT;
auto const validity_size = col.nullable() ? 1 : 0;
return (element_size + validity_size) * num_rows;
}
};
/**
* @brief Computes size in bits of a span of rows in a strings column.
*
* Computed as : ((# of rows) * sizeof(offset) * 8) + (total # of characters * 8))
* +
* 1 bit per row for validity if applicable.
*/
template <>
__device__ size_type row_size_functor::operator()<string_view>(column_device_view const& col,
row_span const& span)
{
auto const num_rows{span.row_end - span.row_start};
if (num_rows == 0) {
// For empty columns, the `span` cannot have a row size.
return 0;
}
auto const& offsets = col.child(strings_column_view::offsets_column_index);
auto const row_start{span.row_start + col.offset()};
auto const row_end{span.row_end + col.offset()};
if (row_start == row_end) {
// Empty row contributes 0 bits to row_bit_count().
// Note: Validity bit doesn't count either. There are no rows in the child column
// corresponding to this span.
return 0;
}
auto const offsets_size = sizeof(offset_type) * CHAR_BIT;
auto const validity_size = col.nullable() ? 1 : 0;
auto const chars_size =
(offsets.data<offset_type>()[row_end] - offsets.data<offset_type>()[row_start]) * CHAR_BIT;
return ((offsets_size + validity_size) * num_rows) + chars_size;
}
/**
* @brief Computes size in bits of a span of rows in a list column.
*
* Computed as : ((# of rows) * sizeof(offset) * 8)
* +
* 1 bit per row for validity if applicable.
*/
template <>
__device__ size_type row_size_functor::operator()<list_view>(column_device_view const& col,
row_span const& span)
{
auto const num_rows{span.row_end - span.row_start};
auto const offsets_size = sizeof(offset_type) * CHAR_BIT;
auto const validity_size = col.nullable() ? 1 : 0;
return (offsets_size + validity_size) * num_rows;
}
/**
* @brief Computes size in bits of a span of rows in a struct column.
*
* Computed as : 1 bit per row for validity if applicable.
*/
template <>
__device__ size_type row_size_functor::operator()<struct_view>(column_device_view const& col,
row_span const& span)
{
auto const num_rows{span.row_end - span.row_start};
return (col.nullable() ? 1 : 0) * num_rows; // cost of validity
}
/**
* @brief Kernel for computing per-row sizes in bits.
*
* @param cols An span of column_device_views representing a column hierarchy
* @param info An span of column_info structs corresponding the elements in `cols`
* @param output Output span of size (# rows) where per-row bit sizes are stored
* @param max_branch_depth Maximum depth of the span stack needed per-thread
*/
__global__ void compute_row_sizes(device_span<column_device_view const> cols,
device_span<column_info const> info,
device_span<size_type> output,
size_type max_branch_depth)
{
extern __shared__ row_span thread_branch_stacks[];
int const tid = threadIdx.x + blockIdx.x * blockDim.x;
auto const num_rows = output.size();
if (tid >= num_rows) { return; }
// branch stack. points to the last list prior to branching.
row_span* my_branch_stack = thread_branch_stacks + (threadIdx.x * max_branch_depth);
size_type branch_depth{0};
// current row span - always starts at 1 row.
row_span cur_span{tid, tid + 1};
// output size
size_type& size = output[tid];
size = 0;
size_type last_branch_depth{0};
for (size_type idx = 0; idx < cols.size(); idx++) {
column_device_view const& col = cols[idx];
// if we've returned from a branch
if (info[idx].branch_depth_start < last_branch_depth) {
cur_span = my_branch_stack[--branch_depth];
}
// if we're entering a new branch.
// NOTE: this case can happen (a pop and a push by the same column)
// when we have a struct<list, list>
if (info[idx].branch_depth_end > info[idx].branch_depth_start) {
my_branch_stack[branch_depth++] = cur_span;
}
// if we're back at depth 0, this is a new top-level column, so reset
// span info
if (info[idx].depth == 0) {
branch_depth = 0;
last_branch_depth = 0;
cur_span = row_span{tid, tid + 1};
}
// add the contributing size of this row
size += cudf::type_dispatcher(col.type(), row_size_functor{}, col, cur_span);
// if this is a list column, update the working span from our offsets
if (col.type().id() == type_id::LIST && col.size() > 0) {
column_device_view const& offsets = col.child(lists_column_view::offsets_column_index);
auto const base_offset = offsets.data<offset_type>()[col.offset()];
cur_span.row_start =
offsets.data<offset_type>()[cur_span.row_start + col.offset()] - base_offset;
cur_span.row_end = offsets.data<offset_type>()[cur_span.row_end + col.offset()] - base_offset;
}
last_branch_depth = info[idx].branch_depth_end;
}
}
} // anonymous namespace
/**
* @copydoc cudf::detail::row_bit_count
*
*/
std::unique_ptr<column> row_bit_count(table_view const& t,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// no rows
if (t.num_rows() <= 0) { return cudf::make_empty_column(data_type{type_id::INT32}); }
// flatten the hierarchy and determine some information about it.
std::vector<cudf::column_view> cols;
std::vector<column_info> info;
hierarchy_info h_info;
flatten_hierarchy(t.begin(), t.end(), cols, info, h_info, stream);
CUDF_EXPECTS(info.size() == cols.size(), "Size/info mismatch");
// create output buffer and view
auto output = cudf::make_fixed_width_column(
data_type{type_id::INT32}, t.num_rows(), mask_state::UNALLOCATED, stream, mr);
mutable_column_view mcv = output->mutable_view();
// simple case. if we have no complex types (lists, strings, etc), the per-row size is already
// trivially computed
if (h_info.complex_type_count <= 0) {
thrust::fill(rmm::exec_policy(stream),
mcv.begin<size_type>(),
mcv.end<size_type>(),
h_info.simple_per_row_size);
return output;
}
// create a contiguous block of column_device_views
auto d_cols = contiguous_copy_column_device_views<column_device_view>(cols, stream);
// move stack info to the gpu
rmm::device_uvector<column_info> d_info(info.size(), stream);
CUDA_TRY(cudaMemcpyAsync(d_info.data(),
info.data(),
sizeof(column_info) * info.size(),
cudaMemcpyHostToDevice,
stream.value()));
// each thread needs to maintain a stack of row spans of size max_branch_depth. we will use
// shared memory to do this rather than allocating a potentially gigantic temporary buffer
// of memory of size (# input rows * sizeof(row_span) * max_branch_depth).
auto const shmem_per_thread = sizeof(row_span) * h_info.max_branch_depth;
int device_id;
CUDA_TRY(cudaGetDevice(&device_id));
int shmem_limit_per_block;
CUDA_TRY(
cudaDeviceGetAttribute(&shmem_limit_per_block, cudaDevAttrMaxSharedMemoryPerBlock, device_id));
constexpr int max_block_size = 256;
auto const block_size =
shmem_per_thread != 0
? std::min(max_block_size, shmem_limit_per_block / static_cast<int>(shmem_per_thread))
: max_block_size;
auto const shared_mem_size = shmem_per_thread * block_size;
// should we be aborting if we reach some extremely small block size, or just if we hit 0?
CUDF_EXPECTS(block_size > 0, "Encountered a column hierarchy too complex for row_bit_count");
cudf::detail::grid_1d grid{t.num_rows(), block_size, 1};
compute_row_sizes<<<grid.num_blocks, block_size, shared_mem_size, stream.value()>>>(
{std::get<1>(d_cols), cols.size()},
{d_info.data(), info.size()},
{mcv.data<size_type>(), static_cast<std::size_t>(t.num_rows())},
h_info.max_branch_depth);
return output;
}
} // namespace detail
/**
* @copydoc cudf::row_bit_count
*
*/
std::unique_ptr<column> row_bit_count(table_view const& t, rmm::mr::device_memory_resource* mr)
{
return detail::row_bit_count(t, rmm::cuda_stream_default, mr);
}
} // namespace cudf
|
82ffe09646e97621be2284a6d0a6afd7317f57c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Example of using threads in a CUDA program
*/
#include <stdio.h>
#include <time.h>
#define N (1024)
__global__ void add(int *a, int *b, int *c){
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
void random_ints(int *a, int n)
{
int i;
for (i=0; i < n; ++i){
a[i] = rand();
}
}
int main(void) {
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
random_ints(b, N);
random_ints(a, N);
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice);
clock_t t;
t = clock();
hipLaunchKernelGGL(( add), dim3(1),dim3(N), 0, 0, d_a, d_b, d_c);
t = clock() - t;
double time_taken = ((double)t)/CLOCKS_PER_SEC;
printf("Time taken by function is %f seconds\n",time_taken);
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
hipFree(d_a);
hipFree(d_c);
hipFree(d_c);
free(a);
free(b);
free(c);
return 0;
}
| 82ffe09646e97621be2284a6d0a6afd7317f57c6.cu | /*
Example of using threads in a CUDA program
*/
#include <stdio.h>
#include <time.h>
#define N (1024)
__global__ void add(int *a, int *b, int *c){
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
void random_ints(int *a, int n)
{
int i;
for (i=0; i < n; ++i){
a[i] = rand();
}
}
int main(void) {
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
random_ints(b, N);
random_ints(a, N);
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
clock_t t;
t = clock();
add<<<1,N>>>(d_a, d_b, d_c);
t = clock() - t;
double time_taken = ((double)t)/CLOCKS_PER_SEC;
printf("Time taken by function is %f seconds\n",time_taken);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_c);
cudaFree(d_c);
free(a);
free(b);
free(c);
return 0;
}
|
5edc627c45eb675dbf15bdb586c847bcd0ba8dbe.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2013--2018 James E. McClure, Virginia Polytechnic & State University
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
// Basic cuda functions callable from C/C++ code
#include <hip/hip_runtime.h>
#include <stdio.h>
extern "C" int ScaLBL_SetDevice(int rank){
int n_devices;
//int local_rank = atoi(getenv("OMPI_COMM_WORLD_LOCAL_RANK"));
hipGetDeviceCount(&n_devices);
//int device = local_rank % n_devices;
int device = rank % n_devices;
hipSetDevice(device);
printf("MPI rank=%i will use GPU ID %i / %i \n",rank,device,n_devices);
return device;
}
extern "C" void ScaLBL_AllocateDeviceMemory(void** address, size_t size){
hipMalloc(address,size);
hipError_t err = hipGetLastError();
if (hipSuccess != err){
printf("Error in hipMalloc: %s \n",hipGetErrorString(err));
}
}
extern "C" void ScaLBL_FreeDeviceMemory(void* pointer){
hipFree(pointer);
}
extern "C" void ScaLBL_CopyToDevice(void* dest, const void* source, size_t size){
hipMemcpy(dest,source,size,hipMemcpyHostToDevice);
hipError_t err = hipGetLastError();
if (hipSuccess != err){
printf("Error in hipMemcpy (host->device): %s \n",hipGetErrorString(err));
}
}
extern "C" void ScaLBL_AllocateZeroCopy(void** address, size_t size){
//hipHostMalloc(address,size);
hipMalloc(address,size);
hipError_t err = hipGetLastError();
if (hipSuccess != err){
printf("Error in hipHostMalloc: %s \n",hipGetErrorString(err));
}
}
extern "C" void ScaLBL_CopyToZeroCopy(void* dest, const void* source, size_t size){
hipMemcpy(dest,source,size,hipMemcpyHostToDevice);
hipError_t err = hipGetLastError();
//memcpy(dest, source, size);
}
extern "C" void ScaLBL_CopyToHost(void* dest, const void* source, size_t size){
hipMemcpy(dest,source,size,hipMemcpyDeviceToHost);
hipError_t err = hipGetLastError();
if (hipSuccess != err){
printf("Error in hipMemcpy (device->host): %s \n",hipGetErrorString(err));
}
}
extern "C" void ScaLBL_DeviceBarrier(){
hipDeviceSynchronize();
}
| 5edc627c45eb675dbf15bdb586c847bcd0ba8dbe.cu | /*
Copyright 2013--2018 James E. McClure, Virginia Polytechnic & State University
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
// Basic cuda functions callable from C/C++ code
#include <cuda.h>
#include <stdio.h>
extern "C" int ScaLBL_SetDevice(int rank){
int n_devices;
//int local_rank = atoi(getenv("OMPI_COMM_WORLD_LOCAL_RANK"));
cudaGetDeviceCount(&n_devices);
//int device = local_rank % n_devices;
int device = rank % n_devices;
cudaSetDevice(device);
printf("MPI rank=%i will use GPU ID %i / %i \n",rank,device,n_devices);
return device;
}
extern "C" void ScaLBL_AllocateDeviceMemory(void** address, size_t size){
cudaMalloc(address,size);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("Error in cudaMalloc: %s \n",cudaGetErrorString(err));
}
}
extern "C" void ScaLBL_FreeDeviceMemory(void* pointer){
cudaFree(pointer);
}
extern "C" void ScaLBL_CopyToDevice(void* dest, const void* source, size_t size){
cudaMemcpy(dest,source,size,cudaMemcpyHostToDevice);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("Error in cudaMemcpy (host->device): %s \n",cudaGetErrorString(err));
}
}
extern "C" void ScaLBL_AllocateZeroCopy(void** address, size_t size){
//cudaMallocHost(address,size);
cudaMalloc(address,size);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("Error in cudaMallocHost: %s \n",cudaGetErrorString(err));
}
}
extern "C" void ScaLBL_CopyToZeroCopy(void* dest, const void* source, size_t size){
cudaMemcpy(dest,source,size,cudaMemcpyHostToDevice);
cudaError_t err = cudaGetLastError();
//memcpy(dest, source, size);
}
extern "C" void ScaLBL_CopyToHost(void* dest, const void* source, size_t size){
cudaMemcpy(dest,source,size,cudaMemcpyDeviceToHost);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
printf("Error in cudaMemcpy (device->host): %s \n",cudaGetErrorString(err));
}
}
extern "C" void ScaLBL_DeviceBarrier(){
cudaDeviceSynchronize();
}
|
41969cd51a4e36dff3700bf2b16fa38fce1a3e38.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if GOOGLE_CUDA
#define EIGEN_USE_GPU
#include "ew_op_gpu.h"
//#include <stdio.h>
// mean = mean(x, axis=0)
template <typename T, int UNROLL, int THREADS>
__global__ void __launch_bounds__(THREADS) layer_norm_mean_CN(
float* Mean,
const T* __restrict__ X,
int K, int N, float rcpK)
{
__shared__ float4 Share4[THREADS];
float* Share = (float*)Share4;
int tid = threadIdx.x;
int idx_K = blockIdx.x;
int idx_N = blockIdx.y;
int tid16 = tid >> 4;
int tid15 = tid & 15;
int k = idx_K*UNROLL*(THREADS/16) + tid16;
int n = idx_N*16 + tid15;
int N4 = N >> 2;
bool bn = n < N4;
int xi = k*N4 + n;
int inc = N4 * (THREADS/16);
float4 mean4;
ew_zero(mean4);
#pragma unroll 4
for (int j = 0; j < UNROLL; j++)
{
float4 x = load(X, xi, bn && k < K);
mean4 = ew_add(mean4, x);
k += (THREADS/16);
xi += inc;
}
Share4[(tid16 << 4) + tid15] = mean4;
__syncthreads();
int tid32 = tid >> 5;
int tid31 = tid & 31;
if (tid32 == 0)
{
n = idx_N*64 + tid31;
Mean += n;
float mean = 0.0f;
#pragma unroll
for (int i = 0; i < (THREADS/16); i++)
mean += Share[tid31 + i*64];
mean *= rcpK;
if (n < N)
//*Mean = mean;
atomicAdd(Mean, mean);
}
else if (tid32 == 3)
{
n = idx_N*64 + tid31+32;
Mean += n;
float mean = 0.0f;
#pragma unroll
for (int i = 0; i < (THREADS/16); i++)
mean += Share[tid31 + i*64 + 32];
mean *= rcpK;
if (n < N)
//*Mean = mean;
atomicAdd(Mean, mean);
}
}
// var = var(x, axis=0)
template <typename T, int UNROLL, int THREADS>
__global__ void __launch_bounds__(THREADS) layer_norm_var_CN(
float* Var,
const T* __restrict__ X,
const float4* __restrict__ Mean,
int K, int N, float rcpK)
{
__shared__ float4 Share4[THREADS];
float* Share = (float*)Share4;
int tid = threadIdx.x;
int idx_K = blockIdx.x;
int idx_N = blockIdx.y;
int tid16 = tid >> 4;
int tid15 = tid & 15;
int k = idx_K*UNROLL*(THREADS/16) + tid16;
int n = idx_N*16 + tid15;
int N4 = N >> 2;
bool bn = n < N4;
int xi = k*N4 + n;
int inc = N4 * (THREADS/16);
float4 mean = load(Mean, n, bn);
float4 var4;
ew_zero(var4);
#pragma unroll 4
for (int j = 0; j < UNROLL; j++)
{
float4 x = load(X, xi, bn && k < K);
// var4 += (x - mean)**2
if (k < K)
var4 = ew_add(var4, ew_sqr(ew_sub(x, mean)));
k += (THREADS/16);
xi += inc;
}
Share4[(tid16 << 4) + tid15] = var4;
__syncthreads();
int tid32 = tid >> 5;
int tid31 = tid & 31;
if (tid32 == 0)
{
n = idx_N*64 + tid31;
Var += n;
float var = 0.0f;
#pragma unroll
for (int i = 0; i < (THREADS/16); i++)
var += Share[tid31 + i*64];
var *= rcpK;
if (n < N)
//*Var = var;
atomicAdd(Var, var);
}
else if (tid32 == 3)
{
n = idx_N*64 + tid31+32;
Var += n;
float var = 0.0f;
#pragma unroll
for (int i = 0; i < (THREADS/16); i++)
var += Share[tid31 + i*64 + 32];
var *= rcpK;
if (n < N)
//*Var = var;
atomicAdd(Var, var);
}
}
// xstdr = rcp(sqrt(xvar + epsilon))
// xhat = xmean * xstdr
// y = xhat*g + b
template <typename T, int UNROLL>
__global__ void __launch_bounds__(32) layer_norm_CN(
T* Y,
const T* __restrict__ X,
const float4* __restrict__ Mean,
const float4* __restrict__ Var,
const float* __restrict__ G,
const float* __restrict__ B,
int K, int N, float epsilon, int relu)
{
__shared__ float Gain[UNROLL*2];
__shared__ float Bias[UNROLL*2];
int tid = threadIdx.x;
int idx_K = blockIdx.x * UNROLL*2;
int idx_N = blockIdx.y * 16;
// load gain/bias for this K-block
int ki = idx_K + tid;
if (tid < UNROLL*2 && ki < K)
{
Gain[tid] = G[ki];
Bias[tid] = B[ki];
}
int tid16 = tid >> 4;
int tid15 = tid & 15;
int k = idx_K + tid16;
int n = idx_N + tid15;
int N4 = N >> 2;
bool bn = n < N4;
int xi = k*N4 + n;
int inc = N4 * 2;
float4 var = load(Var, n, bn);
float4 mean = load(Mean, n, bn);
// rstd = 1 / sqrt(var + epsilon)
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.x) : );
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.y) : );
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.z) : );
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.w) : );
float4 rstd = ew_rsqrt(ew_add(var, epsilon));
#pragma unroll 4
for (int j = 0; j < UNROLL; j++)
{
bool bnk = bn && k < K;
float4 x = load(X, xi, bnk);
float g = Gain[tid16];
float b = Bias[tid16];
// xhat = (x - mean) / sqrt(var + epsilon)
// y = g * xhat + b
float4 xhat = ew_mul(ew_sub(x, mean), rstd);
float4 y = ew_add(ew_mul(xhat, g), b);
if (relu)
y = ew_relu(y);
store_f(Y, y, xi, bnk);
k += 2;
tid16 += 2;
xi += inc;
}
}
template <typename T, typename V>
bool LayerNormForward_CN(hipStream_t stream, int SMs,
T* y,
float* mean,
float* var,
const T* x,
const float* g,
const float* b,
float epsilon, int K, int N, float rcpK, int relu)
{
const V* X = (const V*)x;
const float4* Mean = (const float4*)mean;
const float4* Var = (const float4*)var;
hipMemsetAsync((hipDeviceptr_t)mean, 0, N, stream);
hipMemsetAsync((hipDeviceptr_t)var, 0, N, stream);
int gridN = (N >> 6) + ((N & 63) != 0);
int gridK = (K >> 3) + ((K & 7) != 0);
if ((K >> 8) < (SMs >> 1))
{
dim3 grid((K >> 7) + ((K & 127) != 0), gridN);
hipLaunchKernelGGL(( layer_norm_mean_CN<V,16,128>), dim3(grid),dim3(128),0,stream, mean, X, K, N, rcpK);
hipLaunchKernelGGL(( layer_norm_var_CN <V,16,128>), dim3(grid),dim3(128),0,stream, var, X, Mean, K, N, rcpK);
}
else
{
dim3 grid((K >> 8) + ((K & 255) != 0), gridN);
hipLaunchKernelGGL(( layer_norm_mean_CN<V,16,256>), dim3(grid),dim3(256),0,stream, mean, X, K, N, rcpK);
hipLaunchKernelGGL(( layer_norm_var_CN <V,16,256>), dim3(grid),dim3(256),0,stream, var, X, Mean, K, N, rcpK);
}
dim3 grid(gridK, gridN);
hipLaunchKernelGGL(( layer_norm_CN<V,4>), dim3(grid),dim3(32), 0,stream, (V*)y, X, Mean, Var, g, b, K, N, epsilon, relu);
return true; // TODO
}
template bool LayerNormForward_CN<float,float4>(hipStream_t stream, int SMs, float* y, float* mean, float* rstd, const float* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormForward_CN<ehalf,ehalf4>(hipStream_t stream, int SMs, ehalf* y, float* mean, float* rstd, const ehalf* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormForward_CN<bhalf,bhalf4>(hipStream_t stream, int SMs, bhalf* y, float* mean, float* rstd, const bhalf* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu);
// dg = sum(dy * xhat(x), axis=1)
// db = sum(dy, axis=1)
template <typename B, typename F>
__global__ void __launch_bounds__(128) layer_norm_dg_db_CN(
float* DG,
float* DB,
const B* __restrict__ DY,
const F* __restrict__ X,
const float* __restrict__ Gain,
const float* __restrict__ Bias,
const float4* __restrict__ Mean,
const float4* __restrict__ Var,
float epsilon, int K, int N, int relu)
{
__shared__ float gain[8];
__shared__ float bias[8];
int tid = threadIdx.x;
int idx_K = blockIdx.x * 8;
// load gain/bias for this K-block
int ki = idx_K + tid;
if (relu && tid < 8 && ki < K)
{
gain[tid] = Gain[ki];
bias[tid] = Bias[ki];
}
int tid16 = tid >> 4;
int tid15 = tid & 15;
int k = idx_K + tid16;
__syncthreads();
if (k < K)
{
int N4 = N >> 2;
int xi = k*N4;
X += xi;
DY += xi;
float4 dg4, db4;
ew_zero(dg4);
ew_zero(db4);
for (int n = tid15; n < N4; n += 16)
{
float4 x = load(X, n);
float4 dy = load(DY, n);
float4 var = load(Var, n);
float4 mean = load(Mean, n);
// rstd = 1 / sqrt(var + epsilon)
// xhat = (x - mean) * rstd
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.x) : );
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.y) : );
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.z) : );
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.w) : );
float4 rstd = ew_rsqrt(ew_add(var, epsilon));
float4 xhat = ew_mul(ew_sub(x, mean), rstd);
if (relu)
{
float g = gain[tid16];
float b = bias[tid16];
dy = ew_relu_grad(dy, ew_add(ew_mul(xhat, g), b));
}
dg4 = ew_add(ew_mul(dy, xhat), dg4);
db4 = ew_add(dy, db4);
}
float dg = ew_sum(dg4);
float db = ew_sum(db4);
// reduce each half warp
for (int i = 8; i > 0; i >>= 1)
{
dg += __shfl_xor(dg, i);
db += __shfl_xor(db, i);
}
if (tid15 == 0)
{
DG[k] = dg;
DB[k] = db;
}
}
}
// dy = dy * g
// sum1 = sum(xhat * dy, axis=0)
// sum2 = sum(dy, axis=0)
template <typename B, typename F, int UNROLL, int THREADS>
__global__ void __launch_bounds__(THREADS) layer_norm_dx_sum_CN(
float* Sum1,
float* Sum2,
const B* __restrict__ DY,
const F* __restrict__ X,
const float* __restrict__ Gain,
const float* __restrict__ Bias,
const float4* __restrict__ Mean,
const float4* __restrict__ Var,
float epsilon, int K, int N, int relu)
{
__shared__ float4 Sum1f4[THREADS];
__shared__ float4 Sum2f4[THREADS];
__shared__ float gain[UNROLL*(THREADS/16)];
__shared__ float bias[UNROLL*(THREADS/16)];
float* Sum1f1 = (float*)Sum1f4;
float* Sum2f1 = (float*)Sum2f4;
int tid = threadIdx.x;
int idx_K = blockIdx.x * UNROLL*(THREADS/16);
int idx_N = blockIdx.y * 16;
// load gain/bias for this K-block
int ki = idx_K + tid;
if (tid < UNROLL*(THREADS/16) && ki < K)
{
gain[tid] = Gain[ki];
bias[tid] = Bias[ki];
}
__syncthreads();
int tid16 = tid >> 4;
int tid15 = tid & 15;
int gbi = tid16;
int k = idx_K + tid16;
int n = idx_N + tid15;
int N4 = N >> 2;
bool bn = n < N4;
int xi = k*N4 + n;
int inc = N4 * (THREADS/16);
float4 var = load(Var, n, bn);
float4 mean = load(Mean, n, bn);
// rstd = 1 / sqrt(var + epsilon)
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.x) : );
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.y) : );
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.z) : );
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.w) : );
float4 rstd = ew_rsqrt(ew_add(var, epsilon));
float4 sum1, sum2;
ew_zero(sum1);
ew_zero(sum2);
#pragma unroll 2
for (int j = 0; j < UNROLL; j++)
{
bool bnk = bn & k < K;
float4 x = load( X, xi, bnk);
float4 dy = load(DY, xi, bnk);
float g = gain[gbi];
float b = bias[gbi];
float4 xhat = ew_mul(ew_sub(x, mean), rstd);
if (relu)
dy = ew_relu_grad(dy, ew_add(ew_mul(xhat, g), b));
dy = ew_mul(dy, g);
if (bnk)
{
sum1 = ew_add(sum1, ew_mul(dy, xhat));
sum2 = ew_add(sum2, dy);
}
k += (THREADS/16);
gbi += (THREADS/16);
xi += inc;
}
int si = (tid16 << 4) + tid15;
Sum1f4[si] = sum1;
Sum2f4[si] = sum2;
__syncthreads();
int tid32 = tid >> 5;
int tid31 = tid & 31;
n = idx_N*4 + tid31;
if (tid32 == 0)
{
Sum1 += n;
float sum1 = 0.0f;
#pragma unroll
for (int i = 0; i < (THREADS/16); i++)
sum1 += Sum1f1[tid31 + i*64];
if (n < N)
atomicAdd(Sum1, sum1);
}
else if (tid32 == 1)
{
n += 32;
Sum1 += n;
float sum1 = 0.0f;
#pragma unroll
for (int i = 0; i < (THREADS/16); i++)
sum1 += Sum1f1[tid31 + i*64 + 32];
if (n < N)
atomicAdd(Sum1, sum1);
}
else if (tid32 == 2)
{
Sum2 += n;
float sum2 = 0.0f;
#pragma unroll
for (int i = 0; i < (THREADS/16); i++)
sum2 += Sum2f1[tid31 + i*64];
if (n < N)
atomicAdd(Sum2, sum2);
}
else if (tid32 == 3)
{
n += 32;
Sum2 += n;
float sum2 = 0.0f;
#pragma unroll
for (int i = 0; i < (THREADS/16); i++)
sum2 += Sum2f1[tid31 + i*64 + 32];
if (n < N)
atomicAdd(Sum2, sum2);
}
}
// dy = dy * g
// dx = (dy - ((xhat * sum1 + sum2) * rcpK)) * xstdr
template <typename B, typename F, int UNROLL>
__global__ void __launch_bounds__(32) layer_norm_dx_CN(
B* DX,
const B* __restrict__ DY,
const F* __restrict__ X,
const float* __restrict__ Gain,
const float* __restrict__ Bias,
const float4* __restrict__ Mean,
const float4* __restrict__ Var,
const float4* __restrict__ Sum1,
const float4* __restrict__ Sum2,
float epsilon, int K, int N, float rcpK, int relu)
{
__shared__ float gain[UNROLL*2];
__shared__ float bias[UNROLL*2];
int tid = threadIdx.x;
int idx_K = blockIdx.x * UNROLL*2;
int idx_N = blockIdx.y * 16;
// load gain/bias for this K-block
int ki = idx_K + tid;
if (tid < UNROLL*2 && ki < K)
{
gain[tid] = Gain[ki];
bias[tid] = Bias[ki];
}
int tid16 = tid >> 4;
int tid15 = tid & 15;
int k = idx_K + tid16;
int n = idx_N + tid15;
int N4 = N >> 2;
bool bn = n < N4;
int xi = k*N4 + n;
int inc = N4 * 2;
float4 var = load(Var, n, bn);
float4 mean = load(Mean, n, bn);
float4 sum1 = load(Sum1, n, bn);
float4 sum2 = load(Sum2, n, bn);
// rstd = 1 / sqrt(var + epsilon)
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.x) : );
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.y) : );
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.z) : );
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.w) : );
float4 rstd = ew_rsqrt(ew_add(var, epsilon));
#pragma unroll 4
for (int j = 0; j < UNROLL; j++)
{
bool bnk = bn && k < K;
float4 x = load( X, xi, bnk);
float4 dy = load(DY, xi, bnk);
float g = gain[tid16];
float b = bias[tid16];
float4 xhat = ew_mul(ew_sub(x, mean), rstd);
if (relu)
dy = ew_relu_grad(dy, ew_add(ew_mul(xhat, g), b));
dy = ew_mul(dy, g);
// dx = (dy - ((xhat * sum1 + sum2) * rcpK)) * rstd;
float4 dx = ew_mul(ew_sub(dy, ew_mul(ew_add(ew_mul(xhat, sum1), sum2), rcpK)), rstd);
store_g(DX, dx, xi, bnk);
k += 2;
tid16 += 2;
xi += inc;
}
}
template <typename B, typename F, typename VB, typename VF>
bool LayerNormBackward_CN(hipStream_t stream, int SMs,
B* dx,
float* dg,
float* db,
float* sum1,
float* sum2,
const B* dy,
const F* x,
const float* g,
const float* b,
const float* mean,
const float* var,
float epsilon, int K, int N, float rcpK, int relu)
{
int gridK8 = (K >> 3) + ((K & 7) != 0);
int gridK256 = (K >> 8) + ((K & 255) != 0);
int gridN64 = (N >> 6) + ((N & 63) != 0);
dim3 grid8( gridK8, gridN64, 1);
dim3 grid256(gridK256, gridN64, 1);
VB* DX = ( VB*)dx;
const VB* DY = (const VB*)dy;
const VF* X = (const VF*)x;
const float4* Mean = (const float4*)mean;
const float4* Var = (const float4*)var;
const float4* Sum1 = (const float4*)sum1;
const float4* Sum2 = (const float4*)sum2;
hipMemsetAsync((hipDeviceptr_t)sum1, 0, N, stream);
hipMemsetAsync((hipDeviceptr_t)sum2, 0, N, stream);
hipLaunchKernelGGL(( layer_norm_dg_db_CN <VB,VF >), dim3(gridK8) ,dim3(128),0,stream, dg, db, DY, X, g, b, Mean, Var, epsilon, K, N, relu);
hipLaunchKernelGGL(( layer_norm_dx_sum_CN<VB,VF,16,256>), dim3(grid256),dim3(256),0,stream, sum1, sum2, DY, X, g, b, Mean, Var, epsilon, K, N, relu);
hipLaunchKernelGGL(( layer_norm_dx_CN <VB,VF, 4 >), dim3(grid8), dim3(32),0,stream, DX, DY, X, g, b, Mean, Var, Sum1, Sum2, epsilon, K, N, rcpK, relu);
return true; // TODO
}
template bool LayerNormBackward_CN<float,float,float4,float4>(hipStream_t stream, int SMs, float* dx, float* dg, float* db, float* sum1, float* sum2, const float* dy, const float* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormBackward_CN<ehalf,ehalf,ehalf4,ehalf4>(hipStream_t stream, int SMs, ehalf* dx, float* dg, float* db, float* sum1, float* sum2, const ehalf* dy, const ehalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormBackward_CN<bhalf,bhalf,bhalf4,bhalf4>(hipStream_t stream, int SMs, bhalf* dx, float* dg, float* db, float* sum1, float* sum2, const bhalf* dy, const bhalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormBackward_CN<float,ehalf,float4,ehalf4>(hipStream_t stream, int SMs, float* dx, float* dg, float* db, float* sum1, float* sum2, const float* dy, const ehalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormBackward_CN<float,bhalf,float4,bhalf4>(hipStream_t stream, int SMs, float* dx, float* dg, float* db, float* sum1, float* sum2, const float* dy, const bhalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu);
// Sparse Projection Code
template <typename T, typename V, int SHFT>
__global__ void __launch_bounds__(128) gather_scatter(
T* Z,
const T* __restrict__ X,
const int* __restrict__ Lut,
int K, int N)
{
int tid = threadIdx.x;
int idx_K = blockIdx.x;
int idx_N = blockIdx.y;
int tidK = tid >> SHFT;
int tidN = tid & ((1<<SHFT)-1);
int zk = (idx_K << (7-SHFT)) + tidK;
int n = (idx_N << SHFT) + tidN;
if (zk < K && n < N)
{
int xk = load(Lut, zk);
int zi = zk*N + n;
int xi = xk*N + n;
V x = load(X, xi, xk >= 0);
store(Z, x, zi);
}
}
template <typename T, typename V, int SHFT>
__global__ void __launch_bounds__(128) scatter_add(
T* Z, // large tensor
const T* __restrict__ X, // large tensor
const T* __restrict__ Y, // small tensor
const int* __restrict__ Lut,
int K, int N)
{
int tid = threadIdx.x;
int idx_K = blockIdx.x;
int idx_N = blockIdx.y;
int tidK = tid >> SHFT;
int tidN = tid & ((1<<SHFT)-1);
int yk = (idx_K << (7-SHFT)) + tidK;
int n = (idx_N << SHFT) + tidN;
if (yk < K && n < N)
{
int xk = load(Lut, yk);
int yi = yk*N + n;
int xi = xk*N + n;
V y = load(Y, yi);
V x = load(X, xi);
store(Z, ew_add(x, y), xi);
}
}
template <typename T, typename V, int SHFT>
__global__ void __launch_bounds__(128) scatter_mul(
T* Z, // large tensor
const T* __restrict__ X, // large tensor
const T* __restrict__ Y, // small tensor
const int* __restrict__ Lut,
int K, int N)
{
int tid = threadIdx.x;
int idx_K = blockIdx.x;
int idx_N = blockIdx.y;
int tidK = tid >> SHFT;
int tidN = tid & ((1<<SHFT)-1);
int xk = (idx_K << (7-SHFT)) + tidK;
int n = (idx_N << SHFT) + tidN;
if (xk < K && n < N)
{
int yk = load(Lut, xk);
int xi = xk*N + n;
int yi = yk*N + n;
V x = load(X, xi);
V y = load(Y, yi, yk >= 0);
V z = yk >= 0 ? ew_mul(x, y) : x; // pass through if unmapped
store(Z, z, xi);
}
}
template <typename T, typename V, int SHFT>
__global__ void __launch_bounds__(128) sparse_mul_grad(
T* DX, // large tensor
T* DY, // small tensor
const T* __restrict__ DZ, // large tensor (same pointer as DX)
const T* __restrict__ X, // large tensor
const T* __restrict__ Y, // small tensor
const int* __restrict__ Lut,
int K, int N)
{
int tid = threadIdx.x;
int idx_K = blockIdx.x;
int idx_N = blockIdx.y;
int tidK = tid >> SHFT;
int tidN = tid & ((1<<SHFT)-1);
int yk = (idx_K << (7-SHFT)) + tidK;
int n = (idx_N << SHFT) + tidN;
if (yk < K && n < N)
{
int xk = load(Lut, yk);
int yi = yk*N + n;
int xi = xk*N + n;
V y = load(Y, yi);
V x = load(X, xi);
V dz = load(DZ, xi);
store(DX, ew_mul(dz, y), xi);
store(DY, ew_mul(dz, x), yi);
}
}
#define OP_GAT 0
#define OP_SCT 1
#define OP_ADD 2
#define OP_MUL 3
template <typename T, typename V4, typename V8>
bool SparseOp(hipStream_t stream,
T* z,
const T* x,
const T* y,
const int* lut,
int op, int K, int N)
{
int gridN = (N >> 6) + ((N & 63) != 0);
if (sizeof(T) == 2 && (N & 7) == 0)
{
V8* Z = ( V8*)z;
const V8* X = (const V8*)x;
const V8* Y = (const V8*)y;
// blockK = 128 / 8 = 16
int gridK = (K >> 4) + ((K & 15) != 0);
dim3 grid(gridK, gridN, 1);
switch(op)
{
case OP_GAT:hipLaunchKernelGGL(( gather_scatter<V8,float8,3>), dim3(grid),dim3(128),0,stream, Z, X, lut, K, N>>3); break;
case OP_SCT:hipLaunchKernelGGL(( gather_scatter<V8,float8,3>), dim3(grid),dim3(128),0,stream, Z, X, lut, K, N>>3); break;
case OP_ADD: hipLaunchKernelGGL(( scatter_add<V8,float8,3>), dim3(grid),dim3(128),0,stream, Z, X, Y, lut, K, N>>3); break;
case OP_MUL: hipLaunchKernelGGL(( scatter_mul<V8,float8,3>), dim3(grid),dim3(128),0,stream, Z, X, Y, lut, K, N>>3); break;
}
}
else if ((N & 3) == 0)
{
V4* Z = ( V4*)z;
const V4* X = (const V4*)x;
const V4* Y = (const V4*)y;
// blockK = 128 / 16 = 8
int gridK = (K >> 3) + ((K & 7) != 0);
dim3 grid(gridK, gridN, 1);
switch(op)
{
case OP_GAT:hipLaunchKernelGGL(( gather_scatter<V4,float4,4>), dim3(grid),dim3(128),0,stream, Z, X, lut, K, N>>2); break;
case OP_SCT:hipLaunchKernelGGL(( gather_scatter<V4,float4,4>), dim3(grid),dim3(128),0,stream, Z, X, lut, K, N>>2); break;
case OP_ADD: hipLaunchKernelGGL(( scatter_add<V4,float4,4>), dim3(grid),dim3(128),0,stream, Z, X, Y, lut, K, N>>2); break;
case OP_MUL: hipLaunchKernelGGL(( scatter_mul<V4,float4,4>), dim3(grid),dim3(128),0,stream, Z, X, Y, lut, K, N>>2); break;
}
}
return true; // TODO
}
template <typename T, typename V4, typename V8>
bool SparseMulGrad(hipStream_t stream,
T* dx,
T* dy,
const T* dz,
const T* x,
const T* y,
const int* lut,
int K, int N)
{
int gridN = (N >> 6) + ((N & 63) != 0);
if (sizeof(T) == 2 && (N & 7) == 0)
{
V8* DX = ( V8*)dx;
V8* DY = ( V8*)dy;
const V8* DZ = (const V8*)dz;
const V8* X = (const V8*)x;
const V8* Y = (const V8*)y;
// blockK = 128 / 8 = 16
int gridK = (K >> 4) + ((K & 15) != 0);
dim3 grid(gridK, gridN, 1);
hipLaunchKernelGGL(( sparse_mul_grad<V8,float8,3>), dim3(grid),dim3(128),0,stream, DX, DY, DZ, X, Y, lut, K, N>>3);
}
else if ((N & 3) == 0)
{
V4* DX = ( V4*)dx;
V4* DY = ( V4*)dy;
const V4* DZ = (const V4*)dz;
const V4* X = (const V4*)x;
const V4* Y = (const V4*)y;
// blockK = 128 / 16 = 8
int gridK = (K >> 3) + ((K & 7) != 0);
dim3 grid(gridK, gridN, 1);
hipLaunchKernelGGL(( sparse_mul_grad<V4,float4,4>), dim3(grid),dim3(128),0,stream, DX, DY, DZ, X, Y, lut, K, N>>2);
}
return true; // TODO
}
template bool SparseOp<float,float4,float8>(hipStream_t stream, float* z, const float* x, const float* y, const int* lut, int op, int K, int N);
template bool SparseOp<ehalf,ehalf4,ehalf8>(hipStream_t stream, ehalf* z, const ehalf* x, const ehalf* y, const int* lut, int op, int K, int N);
template bool SparseOp<bhalf,bhalf4,bhalf8>(hipStream_t stream, bhalf* z, const bhalf* x, const bhalf* y, const int* lut, int op, int K, int N);
template bool SparseMulGrad<float,float4,float8>(hipStream_t stream, float* dx, float* dy, const float* dz, const float* x, const float* y, const int* lut, int K, int N);
template bool SparseMulGrad<ehalf,ehalf4,ehalf8>(hipStream_t stream, ehalf* dx, ehalf* dy, const ehalf* dz, const ehalf* x, const ehalf* y, const int* lut, int K, int N);
template bool SparseMulGrad<bhalf,bhalf4,bhalf8>(hipStream_t stream, bhalf* dx, bhalf* dy, const bhalf* dz, const bhalf* x, const bhalf* y, const int* lut, int K, int N);
#endif // GOOGLE_CUDA
// cuobjdump -xelf blocksparse_ops.5.sm_60.cubin blocksparse_ops.so
// cuobjdump -xelf blocksparse_ops.6.sm_61.cubin blocksparse_ops.so
// nvdisasm -c -raw blocksparse_ops.5.sm_60.cubin > blocksparse_ops.5.sm_60.sass
// nvdisasm -c -raw blocksparse_ops.6.sm_61.cubin > blocksparse_ops.6.sm_61.sass
| 41969cd51a4e36dff3700bf2b16fa38fce1a3e38.cu |
#if GOOGLE_CUDA
#define EIGEN_USE_GPU
#include "ew_op_gpu.h"
//#include <stdio.h>
// mean = mean(x, axis=0)
template <typename T, int UNROLL, int THREADS>
__global__ void __launch_bounds__(THREADS) layer_norm_mean_CN(
float* Mean,
const T* __restrict__ X,
int K, int N, float rcpK)
{
__shared__ float4 Share4[THREADS];
float* Share = (float*)Share4;
int tid = threadIdx.x;
int idx_K = blockIdx.x;
int idx_N = blockIdx.y;
int tid16 = tid >> 4;
int tid15 = tid & 15;
int k = idx_K*UNROLL*(THREADS/16) + tid16;
int n = idx_N*16 + tid15;
int N4 = N >> 2;
bool bn = n < N4;
int xi = k*N4 + n;
int inc = N4 * (THREADS/16);
float4 mean4;
ew_zero(mean4);
#pragma unroll 4
for (int j = 0; j < UNROLL; j++)
{
float4 x = load(X, xi, bn && k < K);
mean4 = ew_add(mean4, x);
k += (THREADS/16);
xi += inc;
}
Share4[(tid16 << 4) + tid15] = mean4;
__syncthreads();
int tid32 = tid >> 5;
int tid31 = tid & 31;
if (tid32 == 0)
{
n = idx_N*64 + tid31;
Mean += n;
float mean = 0.0f;
#pragma unroll
for (int i = 0; i < (THREADS/16); i++)
mean += Share[tid31 + i*64];
mean *= rcpK;
if (n < N)
//*Mean = mean;
atomicAdd(Mean, mean);
}
else if (tid32 == 3)
{
n = idx_N*64 + tid31+32;
Mean += n;
float mean = 0.0f;
#pragma unroll
for (int i = 0; i < (THREADS/16); i++)
mean += Share[tid31 + i*64 + 32];
mean *= rcpK;
if (n < N)
//*Mean = mean;
atomicAdd(Mean, mean);
}
}
// var = var(x, axis=0)
template <typename T, int UNROLL, int THREADS>
__global__ void __launch_bounds__(THREADS) layer_norm_var_CN(
float* Var,
const T* __restrict__ X,
const float4* __restrict__ Mean,
int K, int N, float rcpK)
{
__shared__ float4 Share4[THREADS];
float* Share = (float*)Share4;
int tid = threadIdx.x;
int idx_K = blockIdx.x;
int idx_N = blockIdx.y;
int tid16 = tid >> 4;
int tid15 = tid & 15;
int k = idx_K*UNROLL*(THREADS/16) + tid16;
int n = idx_N*16 + tid15;
int N4 = N >> 2;
bool bn = n < N4;
int xi = k*N4 + n;
int inc = N4 * (THREADS/16);
float4 mean = load(Mean, n, bn);
float4 var4;
ew_zero(var4);
#pragma unroll 4
for (int j = 0; j < UNROLL; j++)
{
float4 x = load(X, xi, bn && k < K);
// var4 += (x - mean)**2
if (k < K)
var4 = ew_add(var4, ew_sqr(ew_sub(x, mean)));
k += (THREADS/16);
xi += inc;
}
Share4[(tid16 << 4) + tid15] = var4;
__syncthreads();
int tid32 = tid >> 5;
int tid31 = tid & 31;
if (tid32 == 0)
{
n = idx_N*64 + tid31;
Var += n;
float var = 0.0f;
#pragma unroll
for (int i = 0; i < (THREADS/16); i++)
var += Share[tid31 + i*64];
var *= rcpK;
if (n < N)
//*Var = var;
atomicAdd(Var, var);
}
else if (tid32 == 3)
{
n = idx_N*64 + tid31+32;
Var += n;
float var = 0.0f;
#pragma unroll
for (int i = 0; i < (THREADS/16); i++)
var += Share[tid31 + i*64 + 32];
var *= rcpK;
if (n < N)
//*Var = var;
atomicAdd(Var, var);
}
}
// xstdr = rcp(sqrt(xvar + epsilon))
// xhat = xmean * xstdr
// y = xhat*g + b
template <typename T, int UNROLL>
__global__ void __launch_bounds__(32) layer_norm_CN(
T* Y,
const T* __restrict__ X,
const float4* __restrict__ Mean,
const float4* __restrict__ Var,
const float* __restrict__ G,
const float* __restrict__ B,
int K, int N, float epsilon, int relu)
{
__shared__ float Gain[UNROLL*2];
__shared__ float Bias[UNROLL*2];
int tid = threadIdx.x;
int idx_K = blockIdx.x * UNROLL*2;
int idx_N = blockIdx.y * 16;
// load gain/bias for this K-block
int ki = idx_K + tid;
if (tid < UNROLL*2 && ki < K)
{
Gain[tid] = G[ki];
Bias[tid] = B[ki];
}
int tid16 = tid >> 4;
int tid15 = tid & 15;
int k = idx_K + tid16;
int n = idx_N + tid15;
int N4 = N >> 2;
bool bn = n < N4;
int xi = k*N4 + n;
int inc = N4 * 2;
float4 var = load(Var, n, bn);
float4 mean = load(Mean, n, bn);
// rstd = 1 / sqrt(var + epsilon)
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.x) : );
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.y) : );
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.z) : );
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.w) : );
float4 rstd = ew_rsqrt(ew_add(var, epsilon));
#pragma unroll 4
for (int j = 0; j < UNROLL; j++)
{
bool bnk = bn && k < K;
float4 x = load(X, xi, bnk);
float g = Gain[tid16];
float b = Bias[tid16];
// xhat = (x - mean) / sqrt(var + epsilon)
// y = g * xhat + b
float4 xhat = ew_mul(ew_sub(x, mean), rstd);
float4 y = ew_add(ew_mul(xhat, g), b);
if (relu)
y = ew_relu(y);
store_f(Y, y, xi, bnk);
k += 2;
tid16 += 2;
xi += inc;
}
}
template <typename T, typename V>
bool LayerNormForward_CN(CUstream stream, int SMs,
T* y,
float* mean,
float* var,
const T* x,
const float* g,
const float* b,
float epsilon, int K, int N, float rcpK, int relu)
{
const V* X = (const V*)x;
const float4* Mean = (const float4*)mean;
const float4* Var = (const float4*)var;
cuMemsetD32Async((CUdeviceptr)mean, 0, N, stream);
cuMemsetD32Async((CUdeviceptr)var, 0, N, stream);
int gridN = (N >> 6) + ((N & 63) != 0);
int gridK = (K >> 3) + ((K & 7) != 0);
if ((K >> 8) < (SMs >> 1))
{
dim3 grid((K >> 7) + ((K & 127) != 0), gridN);
layer_norm_mean_CN<V,16,128><<<grid,128,0,stream>>>(mean, X, K, N, rcpK);
layer_norm_var_CN <V,16,128><<<grid,128,0,stream>>>(var, X, Mean, K, N, rcpK);
}
else
{
dim3 grid((K >> 8) + ((K & 255) != 0), gridN);
layer_norm_mean_CN<V,16,256><<<grid,256,0,stream>>>(mean, X, K, N, rcpK);
layer_norm_var_CN <V,16,256><<<grid,256,0,stream>>>(var, X, Mean, K, N, rcpK);
}
dim3 grid(gridK, gridN);
layer_norm_CN<V,4><<<grid,32, 0,stream>>>((V*)y, X, Mean, Var, g, b, K, N, epsilon, relu);
return true; // TODO
}
template bool LayerNormForward_CN<float,float4>(CUstream stream, int SMs, float* y, float* mean, float* rstd, const float* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormForward_CN<ehalf,ehalf4>(CUstream stream, int SMs, ehalf* y, float* mean, float* rstd, const ehalf* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormForward_CN<bhalf,bhalf4>(CUstream stream, int SMs, bhalf* y, float* mean, float* rstd, const bhalf* x, const float* g, const float* b, float epsilon, int K, int N, float rcpK, int relu);
// dg = sum(dy * xhat(x), axis=1)
// db = sum(dy, axis=1)
template <typename B, typename F>
__global__ void __launch_bounds__(128) layer_norm_dg_db_CN(
float* DG,
float* DB,
const B* __restrict__ DY,
const F* __restrict__ X,
const float* __restrict__ Gain,
const float* __restrict__ Bias,
const float4* __restrict__ Mean,
const float4* __restrict__ Var,
float epsilon, int K, int N, int relu)
{
__shared__ float gain[8];
__shared__ float bias[8];
int tid = threadIdx.x;
int idx_K = blockIdx.x * 8;
// load gain/bias for this K-block
int ki = idx_K + tid;
if (relu && tid < 8 && ki < K)
{
gain[tid] = Gain[ki];
bias[tid] = Bias[ki];
}
int tid16 = tid >> 4;
int tid15 = tid & 15;
int k = idx_K + tid16;
__syncthreads();
if (k < K)
{
int N4 = N >> 2;
int xi = k*N4;
X += xi;
DY += xi;
float4 dg4, db4;
ew_zero(dg4);
ew_zero(db4);
for (int n = tid15; n < N4; n += 16)
{
float4 x = load(X, n);
float4 dy = load(DY, n);
float4 var = load(Var, n);
float4 mean = load(Mean, n);
// rstd = 1 / sqrt(var + epsilon)
// xhat = (x - mean) * rstd
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.x) : );
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.y) : );
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.z) : );
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.w) : );
float4 rstd = ew_rsqrt(ew_add(var, epsilon));
float4 xhat = ew_mul(ew_sub(x, mean), rstd);
if (relu)
{
float g = gain[tid16];
float b = bias[tid16];
dy = ew_relu_grad(dy, ew_add(ew_mul(xhat, g), b));
}
dg4 = ew_add(ew_mul(dy, xhat), dg4);
db4 = ew_add(dy, db4);
}
float dg = ew_sum(dg4);
float db = ew_sum(db4);
// reduce each half warp
for (int i = 8; i > 0; i >>= 1)
{
dg += __shfl_xor(dg, i);
db += __shfl_xor(db, i);
}
if (tid15 == 0)
{
DG[k] = dg;
DB[k] = db;
}
}
}
// dy = dy * g
// sum1 = sum(xhat * dy, axis=0)
// sum2 = sum(dy, axis=0)
template <typename B, typename F, int UNROLL, int THREADS>
__global__ void __launch_bounds__(THREADS) layer_norm_dx_sum_CN(
float* Sum1,
float* Sum2,
const B* __restrict__ DY,
const F* __restrict__ X,
const float* __restrict__ Gain,
const float* __restrict__ Bias,
const float4* __restrict__ Mean,
const float4* __restrict__ Var,
float epsilon, int K, int N, int relu)
{
__shared__ float4 Sum1f4[THREADS];
__shared__ float4 Sum2f4[THREADS];
__shared__ float gain[UNROLL*(THREADS/16)];
__shared__ float bias[UNROLL*(THREADS/16)];
float* Sum1f1 = (float*)Sum1f4;
float* Sum2f1 = (float*)Sum2f4;
int tid = threadIdx.x;
int idx_K = blockIdx.x * UNROLL*(THREADS/16);
int idx_N = blockIdx.y * 16;
// load gain/bias for this K-block
int ki = idx_K + tid;
if (tid < UNROLL*(THREADS/16) && ki < K)
{
gain[tid] = Gain[ki];
bias[tid] = Bias[ki];
}
__syncthreads();
int tid16 = tid >> 4;
int tid15 = tid & 15;
int gbi = tid16;
int k = idx_K + tid16;
int n = idx_N + tid15;
int N4 = N >> 2;
bool bn = n < N4;
int xi = k*N4 + n;
int inc = N4 * (THREADS/16);
float4 var = load(Var, n, bn);
float4 mean = load(Mean, n, bn);
// rstd = 1 / sqrt(var + epsilon)
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.x) : );
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.y) : );
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.z) : );
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.w) : );
float4 rstd = ew_rsqrt(ew_add(var, epsilon));
float4 sum1, sum2;
ew_zero(sum1);
ew_zero(sum2);
#pragma unroll 2
for (int j = 0; j < UNROLL; j++)
{
bool bnk = bn & k < K;
float4 x = load( X, xi, bnk);
float4 dy = load(DY, xi, bnk);
float g = gain[gbi];
float b = bias[gbi];
float4 xhat = ew_mul(ew_sub(x, mean), rstd);
if (relu)
dy = ew_relu_grad(dy, ew_add(ew_mul(xhat, g), b));
dy = ew_mul(dy, g);
if (bnk)
{
sum1 = ew_add(sum1, ew_mul(dy, xhat));
sum2 = ew_add(sum2, dy);
}
k += (THREADS/16);
gbi += (THREADS/16);
xi += inc;
}
int si = (tid16 << 4) + tid15;
Sum1f4[si] = sum1;
Sum2f4[si] = sum2;
__syncthreads();
int tid32 = tid >> 5;
int tid31 = tid & 31;
n = idx_N*4 + tid31;
if (tid32 == 0)
{
Sum1 += n;
float sum1 = 0.0f;
#pragma unroll
for (int i = 0; i < (THREADS/16); i++)
sum1 += Sum1f1[tid31 + i*64];
if (n < N)
atomicAdd(Sum1, sum1);
}
else if (tid32 == 1)
{
n += 32;
Sum1 += n;
float sum1 = 0.0f;
#pragma unroll
for (int i = 0; i < (THREADS/16); i++)
sum1 += Sum1f1[tid31 + i*64 + 32];
if (n < N)
atomicAdd(Sum1, sum1);
}
else if (tid32 == 2)
{
Sum2 += n;
float sum2 = 0.0f;
#pragma unroll
for (int i = 0; i < (THREADS/16); i++)
sum2 += Sum2f1[tid31 + i*64];
if (n < N)
atomicAdd(Sum2, sum2);
}
else if (tid32 == 3)
{
n += 32;
Sum2 += n;
float sum2 = 0.0f;
#pragma unroll
for (int i = 0; i < (THREADS/16); i++)
sum2 += Sum2f1[tid31 + i*64 + 32];
if (n < N)
atomicAdd(Sum2, sum2);
}
}
// dy = dy * g
// dx = (dy - ((xhat * sum1 + sum2) * rcpK)) * xstdr
template <typename B, typename F, int UNROLL>
__global__ void __launch_bounds__(32) layer_norm_dx_CN(
B* DX,
const B* __restrict__ DY,
const F* __restrict__ X,
const float* __restrict__ Gain,
const float* __restrict__ Bias,
const float4* __restrict__ Mean,
const float4* __restrict__ Var,
const float4* __restrict__ Sum1,
const float4* __restrict__ Sum2,
float epsilon, int K, int N, float rcpK, int relu)
{
__shared__ float gain[UNROLL*2];
__shared__ float bias[UNROLL*2];
int tid = threadIdx.x;
int idx_K = blockIdx.x * UNROLL*2;
int idx_N = blockIdx.y * 16;
// load gain/bias for this K-block
int ki = idx_K + tid;
if (tid < UNROLL*2 && ki < K)
{
gain[tid] = Gain[ki];
bias[tid] = Bias[ki];
}
int tid16 = tid >> 4;
int tid15 = tid & 15;
int k = idx_K + tid16;
int n = idx_N + tid15;
int N4 = N >> 2;
bool bn = n < N4;
int xi = k*N4 + n;
int inc = N4 * 2;
float4 var = load(Var, n, bn);
float4 mean = load(Mean, n, bn);
float4 sum1 = load(Sum1, n, bn);
float4 sum2 = load(Sum2, n, bn);
// rstd = 1 / sqrt(var + epsilon)
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.x) : );
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.y) : );
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.z) : );
// asm("and.b32 %0, %0, 0xffffc000;" : "+f"(var.w) : );
float4 rstd = ew_rsqrt(ew_add(var, epsilon));
#pragma unroll 4
for (int j = 0; j < UNROLL; j++)
{
bool bnk = bn && k < K;
float4 x = load( X, xi, bnk);
float4 dy = load(DY, xi, bnk);
float g = gain[tid16];
float b = bias[tid16];
float4 xhat = ew_mul(ew_sub(x, mean), rstd);
if (relu)
dy = ew_relu_grad(dy, ew_add(ew_mul(xhat, g), b));
dy = ew_mul(dy, g);
// dx = (dy - ((xhat * sum1 + sum2) * rcpK)) * rstd;
float4 dx = ew_mul(ew_sub(dy, ew_mul(ew_add(ew_mul(xhat, sum1), sum2), rcpK)), rstd);
store_g(DX, dx, xi, bnk);
k += 2;
tid16 += 2;
xi += inc;
}
}
template <typename B, typename F, typename VB, typename VF>
bool LayerNormBackward_CN(CUstream stream, int SMs,
B* dx,
float* dg,
float* db,
float* sum1,
float* sum2,
const B* dy,
const F* x,
const float* g,
const float* b,
const float* mean,
const float* var,
float epsilon, int K, int N, float rcpK, int relu)
{
int gridK8 = (K >> 3) + ((K & 7) != 0);
int gridK256 = (K >> 8) + ((K & 255) != 0);
int gridN64 = (N >> 6) + ((N & 63) != 0);
dim3 grid8( gridK8, gridN64, 1);
dim3 grid256(gridK256, gridN64, 1);
VB* DX = ( VB*)dx;
const VB* DY = (const VB*)dy;
const VF* X = (const VF*)x;
const float4* Mean = (const float4*)mean;
const float4* Var = (const float4*)var;
const float4* Sum1 = (const float4*)sum1;
const float4* Sum2 = (const float4*)sum2;
cuMemsetD32Async((CUdeviceptr)sum1, 0, N, stream);
cuMemsetD32Async((CUdeviceptr)sum2, 0, N, stream);
layer_norm_dg_db_CN <VB,VF ><<<gridK8 ,128,0,stream>>>(dg, db, DY, X, g, b, Mean, Var, epsilon, K, N, relu);
layer_norm_dx_sum_CN<VB,VF,16,256><<<grid256,256,0,stream>>>(sum1, sum2, DY, X, g, b, Mean, Var, epsilon, K, N, relu);
layer_norm_dx_CN <VB,VF, 4 ><<<grid8, 32,0,stream>>>(DX, DY, X, g, b, Mean, Var, Sum1, Sum2, epsilon, K, N, rcpK, relu);
return true; // TODO
}
template bool LayerNormBackward_CN<float,float,float4,float4>(CUstream stream, int SMs, float* dx, float* dg, float* db, float* sum1, float* sum2, const float* dy, const float* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormBackward_CN<ehalf,ehalf,ehalf4,ehalf4>(CUstream stream, int SMs, ehalf* dx, float* dg, float* db, float* sum1, float* sum2, const ehalf* dy, const ehalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormBackward_CN<bhalf,bhalf,bhalf4,bhalf4>(CUstream stream, int SMs, bhalf* dx, float* dg, float* db, float* sum1, float* sum2, const bhalf* dy, const bhalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormBackward_CN<float,ehalf,float4,ehalf4>(CUstream stream, int SMs, float* dx, float* dg, float* db, float* sum1, float* sum2, const float* dy, const ehalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu);
template bool LayerNormBackward_CN<float,bhalf,float4,bhalf4>(CUstream stream, int SMs, float* dx, float* dg, float* db, float* sum1, float* sum2, const float* dy, const bhalf* x, const float* g, const float* b, const float* mean, const float* rstd, float epsilon, int K, int N, float rcpK, int relu);
// Sparse Projection Code
template <typename T, typename V, int SHFT>
__global__ void __launch_bounds__(128) gather_scatter(
T* Z,
const T* __restrict__ X,
const int* __restrict__ Lut,
int K, int N)
{
int tid = threadIdx.x;
int idx_K = blockIdx.x;
int idx_N = blockIdx.y;
int tidK = tid >> SHFT;
int tidN = tid & ((1<<SHFT)-1);
int zk = (idx_K << (7-SHFT)) + tidK;
int n = (idx_N << SHFT) + tidN;
if (zk < K && n < N)
{
int xk = load(Lut, zk);
int zi = zk*N + n;
int xi = xk*N + n;
V x = load(X, xi, xk >= 0);
store(Z, x, zi);
}
}
template <typename T, typename V, int SHFT>
__global__ void __launch_bounds__(128) scatter_add(
T* Z, // large tensor
const T* __restrict__ X, // large tensor
const T* __restrict__ Y, // small tensor
const int* __restrict__ Lut,
int K, int N)
{
int tid = threadIdx.x;
int idx_K = blockIdx.x;
int idx_N = blockIdx.y;
int tidK = tid >> SHFT;
int tidN = tid & ((1<<SHFT)-1);
int yk = (idx_K << (7-SHFT)) + tidK;
int n = (idx_N << SHFT) + tidN;
if (yk < K && n < N)
{
int xk = load(Lut, yk);
int yi = yk*N + n;
int xi = xk*N + n;
V y = load(Y, yi);
V x = load(X, xi);
store(Z, ew_add(x, y), xi);
}
}
template <typename T, typename V, int SHFT>
__global__ void __launch_bounds__(128) scatter_mul(
T* Z, // large tensor
const T* __restrict__ X, // large tensor
const T* __restrict__ Y, // small tensor
const int* __restrict__ Lut,
int K, int N)
{
int tid = threadIdx.x;
int idx_K = blockIdx.x;
int idx_N = blockIdx.y;
int tidK = tid >> SHFT;
int tidN = tid & ((1<<SHFT)-1);
int xk = (idx_K << (7-SHFT)) + tidK;
int n = (idx_N << SHFT) + tidN;
if (xk < K && n < N)
{
int yk = load(Lut, xk);
int xi = xk*N + n;
int yi = yk*N + n;
V x = load(X, xi);
V y = load(Y, yi, yk >= 0);
V z = yk >= 0 ? ew_mul(x, y) : x; // pass through if unmapped
store(Z, z, xi);
}
}
template <typename T, typename V, int SHFT>
__global__ void __launch_bounds__(128) sparse_mul_grad(
T* DX, // large tensor
T* DY, // small tensor
const T* __restrict__ DZ, // large tensor (same pointer as DX)
const T* __restrict__ X, // large tensor
const T* __restrict__ Y, // small tensor
const int* __restrict__ Lut,
int K, int N)
{
int tid = threadIdx.x;
int idx_K = blockIdx.x;
int idx_N = blockIdx.y;
int tidK = tid >> SHFT;
int tidN = tid & ((1<<SHFT)-1);
int yk = (idx_K << (7-SHFT)) + tidK;
int n = (idx_N << SHFT) + tidN;
if (yk < K && n < N)
{
int xk = load(Lut, yk);
int yi = yk*N + n;
int xi = xk*N + n;
V y = load(Y, yi);
V x = load(X, xi);
V dz = load(DZ, xi);
store(DX, ew_mul(dz, y), xi);
store(DY, ew_mul(dz, x), yi);
}
}
#define OP_GAT 0
#define OP_SCT 1
#define OP_ADD 2
#define OP_MUL 3
template <typename T, typename V4, typename V8>
bool SparseOp(CUstream stream,
T* z,
const T* x,
const T* y,
const int* lut,
int op, int K, int N)
{
int gridN = (N >> 6) + ((N & 63) != 0);
if (sizeof(T) == 2 && (N & 7) == 0)
{
V8* Z = ( V8*)z;
const V8* X = (const V8*)x;
const V8* Y = (const V8*)y;
// blockK = 128 / 8 = 16
int gridK = (K >> 4) + ((K & 15) != 0);
dim3 grid(gridK, gridN, 1);
switch(op)
{
case OP_GAT: gather_scatter<V8,float8,3><<<grid,128,0,stream>>>(Z, X, lut, K, N>>3); break;
case OP_SCT: gather_scatter<V8,float8,3><<<grid,128,0,stream>>>(Z, X, lut, K, N>>3); break;
case OP_ADD: scatter_add<V8,float8,3><<<grid,128,0,stream>>>(Z, X, Y, lut, K, N>>3); break;
case OP_MUL: scatter_mul<V8,float8,3><<<grid,128,0,stream>>>(Z, X, Y, lut, K, N>>3); break;
}
}
else if ((N & 3) == 0)
{
V4* Z = ( V4*)z;
const V4* X = (const V4*)x;
const V4* Y = (const V4*)y;
// blockK = 128 / 16 = 8
int gridK = (K >> 3) + ((K & 7) != 0);
dim3 grid(gridK, gridN, 1);
switch(op)
{
case OP_GAT: gather_scatter<V4,float4,4><<<grid,128,0,stream>>>(Z, X, lut, K, N>>2); break;
case OP_SCT: gather_scatter<V4,float4,4><<<grid,128,0,stream>>>(Z, X, lut, K, N>>2); break;
case OP_ADD: scatter_add<V4,float4,4><<<grid,128,0,stream>>>(Z, X, Y, lut, K, N>>2); break;
case OP_MUL: scatter_mul<V4,float4,4><<<grid,128,0,stream>>>(Z, X, Y, lut, K, N>>2); break;
}
}
return true; // TODO
}
template <typename T, typename V4, typename V8>
bool SparseMulGrad(CUstream stream,
T* dx,
T* dy,
const T* dz,
const T* x,
const T* y,
const int* lut,
int K, int N)
{
int gridN = (N >> 6) + ((N & 63) != 0);
if (sizeof(T) == 2 && (N & 7) == 0)
{
V8* DX = ( V8*)dx;
V8* DY = ( V8*)dy;
const V8* DZ = (const V8*)dz;
const V8* X = (const V8*)x;
const V8* Y = (const V8*)y;
// blockK = 128 / 8 = 16
int gridK = (K >> 4) + ((K & 15) != 0);
dim3 grid(gridK, gridN, 1);
sparse_mul_grad<V8,float8,3><<<grid,128,0,stream>>>(DX, DY, DZ, X, Y, lut, K, N>>3);
}
else if ((N & 3) == 0)
{
V4* DX = ( V4*)dx;
V4* DY = ( V4*)dy;
const V4* DZ = (const V4*)dz;
const V4* X = (const V4*)x;
const V4* Y = (const V4*)y;
// blockK = 128 / 16 = 8
int gridK = (K >> 3) + ((K & 7) != 0);
dim3 grid(gridK, gridN, 1);
sparse_mul_grad<V4,float4,4><<<grid,128,0,stream>>>(DX, DY, DZ, X, Y, lut, K, N>>2);
}
return true; // TODO
}
template bool SparseOp<float,float4,float8>(CUstream stream, float* z, const float* x, const float* y, const int* lut, int op, int K, int N);
template bool SparseOp<ehalf,ehalf4,ehalf8>(CUstream stream, ehalf* z, const ehalf* x, const ehalf* y, const int* lut, int op, int K, int N);
template bool SparseOp<bhalf,bhalf4,bhalf8>(CUstream stream, bhalf* z, const bhalf* x, const bhalf* y, const int* lut, int op, int K, int N);
template bool SparseMulGrad<float,float4,float8>(CUstream stream, float* dx, float* dy, const float* dz, const float* x, const float* y, const int* lut, int K, int N);
template bool SparseMulGrad<ehalf,ehalf4,ehalf8>(CUstream stream, ehalf* dx, ehalf* dy, const ehalf* dz, const ehalf* x, const ehalf* y, const int* lut, int K, int N);
template bool SparseMulGrad<bhalf,bhalf4,bhalf8>(CUstream stream, bhalf* dx, bhalf* dy, const bhalf* dz, const bhalf* x, const bhalf* y, const int* lut, int K, int N);
#endif // GOOGLE_CUDA
// cuobjdump -xelf blocksparse_ops.5.sm_60.cubin blocksparse_ops.so
// cuobjdump -xelf blocksparse_ops.6.sm_61.cubin blocksparse_ops.so
// nvdisasm -c -raw blocksparse_ops.5.sm_60.cubin > blocksparse_ops.5.sm_60.sass
// nvdisasm -c -raw blocksparse_ops.6.sm_61.cubin > blocksparse_ops.6.sm_61.sass
|
4e27c3da50c0e412454bc60df3797e684538ee1f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file quantized_pooling.cu
*/
#include <mxnet/operator_util.h>
#include <vector>
#include "../nn/pooling-inl.h"
#include "../mshadow_op.h"
namespace mxnet {
namespace op {
#if MXNET_USE_CUDNN == 1 && TORCH_HIP_VERSION >= 8000
STATIC_ASSERT_CUDNN_VERSION_GE(6000);
template <typename DType>
class QuantizedCuDNNPoolingOp {
public:
QuantizedCuDNNPoolingOp() {
CUDNN_CALL(cudnnCreatePoolingDescriptor(&pool_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc_));
}
void Init(const PoolingParam& param, const mxnet::TShape& dshape, const mxnet::TShape& oshape) {
const int N = 0, H = 2, W = 3, C = 1;
const cudnnDataType_t dtype = mshadow::DataType<DType>::kCudnnFlag;
CHECK(param.kernel.ndim() == 2) << "Only support 2D pooling";
if (param.pool_type == pool_enum::kMaxPooling) {
mode_ = CUDNN_POOLING_MAX;
} else if (param.pool_type == pool_enum::kAvgPooling) {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
} else {
LOG(FATAL) << "QuantizedCuDNNPoolingOp only supports pool_type=max/avg";
}
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc_, CUDNN_TENSOR_NCHW, dtype, dshape[N], dshape[C], dshape[H], dshape[W]));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc_, CUDNN_TENSOR_NCHW, dtype, oshape[N], oshape[C], oshape[H], oshape[W]));
CUDNN_CALL(cudnnSetPooling2dDescriptor(pool_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
param.global_pool ? dshape[2] : param.kernel[0],
param.global_pool ? dshape[3] : param.kernel[1],
param.pad[0],
param.pad[1],
param.global_pool ? 1 : param.stride[0],
param.global_pool ? 1 : param.stride[1]));
}
~QuantizedCuDNNPoolingOp() {
CUDNN_CALL(cudnnDestroyTensorDescriptor(in_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(out_desc_));
CUDNN_CALL(cudnnDestroyPoolingDescriptor(pool_desc_));
}
void Forward(mshadow::Stream<gpu>* s,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 3U);
CHECK_EQ(outputs.size(), 3U);
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(s->dnn_handle_ownership_, mshadow::Stream<gpu>::OwnHandle);
float alpha = 1.0f;
float beta = 0.0f;
CUDNN_CALL(cudnnPoolingForward(s->dnn_handle_,
pool_desc_,
&alpha,
in_desc_,
inputs[0].dptr_,
&beta,
out_desc_,
outputs[0].dptr_));
Tensor<gpu, 1, float> omin_range = outputs[1].FlatTo1D<gpu, float>(s);
Tensor<gpu, 1, float> omax_range = outputs[2].FlatTo1D<gpu, float>(s);
ASSIGN_DISPATCH(omin_range, req[1], F<mshadow_op::identity>(inputs[1].FlatTo1D<gpu, float>(s)));
ASSIGN_DISPATCH(omax_range, req[2], F<mshadow_op::identity>(inputs[2].FlatTo1D<gpu, float>(s)));
}
private:
cudnnPoolingMode_t mode_;
cudnnTensorDescriptor_t in_desc_;
cudnnTensorDescriptor_t out_desc_;
cudnnPoolingDescriptor_t pool_desc_;
}; // class QuantizedCuDNNPoolingOp
#endif // MXNET_USE_CUDNN == 1 && TORCH_HIP_VERSION >= 8000
void QuantizedPoolingForwardGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const PoolingParam& param = nnvm::get<PoolingParam>(attrs.parsed);
CHECK_EQ(param.kernel.ndim(), 2U)
<< "QuantizedPoolingForward<gpu> only supports 2D convolution for now";
#if MXNET_USE_CUDNN == 1 && TORCH_HIP_VERSION >= 8000
#if DMLC_CXX11_THREAD_LOCAL
static thread_local QuantizedCuDNNPoolingOp<int8_t> op;
#else
static MX_THREAD_LOCAL QuantizedCuDNNPoolingOp<int8_t> op;
#endif // DMLC_CXX11_THREAD_LOCAL
op.Init(param, {inputs[0].shape_}, {outputs[0].shape_});
op.Forward(ctx.get_stream<gpu>(), inputs, req, outputs);
#else
LOG(FATAL) << "QuantizedPoolingForward<gpu> only supports cudnnPoolingForward "
"with CUDNN >= 6.0 and CUDA >= 8.0";
#endif // MXNET_USE_CUDNN == 1 && TORCH_HIP_VERSION >= 8000
}
NNVM_REGISTER_OP(_contrib_quantized_pooling)
.set_attr<FCompute>("FCompute<gpu>", QuantizedPoolingForwardGPU);
} // namespace op
} // namespace mxnet
| 4e27c3da50c0e412454bc60df3797e684538ee1f.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file quantized_pooling.cu
*/
#include <mxnet/operator_util.h>
#include <vector>
#include "../nn/pooling-inl.h"
#include "../mshadow_op.h"
namespace mxnet {
namespace op {
#if MXNET_USE_CUDNN == 1 && CUDA_VERSION >= 8000
STATIC_ASSERT_CUDNN_VERSION_GE(6000);
template <typename DType>
class QuantizedCuDNNPoolingOp {
public:
QuantizedCuDNNPoolingOp() {
CUDNN_CALL(cudnnCreatePoolingDescriptor(&pool_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc_));
}
void Init(const PoolingParam& param, const mxnet::TShape& dshape, const mxnet::TShape& oshape) {
const int N = 0, H = 2, W = 3, C = 1;
const cudnnDataType_t dtype = mshadow::DataType<DType>::kCudnnFlag;
CHECK(param.kernel.ndim() == 2) << "Only support 2D pooling";
if (param.pool_type == pool_enum::kMaxPooling) {
mode_ = CUDNN_POOLING_MAX;
} else if (param.pool_type == pool_enum::kAvgPooling) {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
} else {
LOG(FATAL) << "QuantizedCuDNNPoolingOp only supports pool_type=max/avg";
}
CUDNN_CALL(cudnnSetTensor4dDescriptor(
in_desc_, CUDNN_TENSOR_NCHW, dtype, dshape[N], dshape[C], dshape[H], dshape[W]));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc_, CUDNN_TENSOR_NCHW, dtype, oshape[N], oshape[C], oshape[H], oshape[W]));
CUDNN_CALL(cudnnSetPooling2dDescriptor(pool_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
param.global_pool ? dshape[2] : param.kernel[0],
param.global_pool ? dshape[3] : param.kernel[1],
param.pad[0],
param.pad[1],
param.global_pool ? 1 : param.stride[0],
param.global_pool ? 1 : param.stride[1]));
}
~QuantizedCuDNNPoolingOp() {
CUDNN_CALL(cudnnDestroyTensorDescriptor(in_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(out_desc_));
CUDNN_CALL(cudnnDestroyPoolingDescriptor(pool_desc_));
}
void Forward(mshadow::Stream<gpu>* s,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 3U);
CHECK_EQ(outputs.size(), 3U);
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(s->dnn_handle_ownership_, mshadow::Stream<gpu>::OwnHandle);
float alpha = 1.0f;
float beta = 0.0f;
CUDNN_CALL(cudnnPoolingForward(s->dnn_handle_,
pool_desc_,
&alpha,
in_desc_,
inputs[0].dptr_,
&beta,
out_desc_,
outputs[0].dptr_));
Tensor<gpu, 1, float> omin_range = outputs[1].FlatTo1D<gpu, float>(s);
Tensor<gpu, 1, float> omax_range = outputs[2].FlatTo1D<gpu, float>(s);
ASSIGN_DISPATCH(omin_range, req[1], F<mshadow_op::identity>(inputs[1].FlatTo1D<gpu, float>(s)));
ASSIGN_DISPATCH(omax_range, req[2], F<mshadow_op::identity>(inputs[2].FlatTo1D<gpu, float>(s)));
}
private:
cudnnPoolingMode_t mode_;
cudnnTensorDescriptor_t in_desc_;
cudnnTensorDescriptor_t out_desc_;
cudnnPoolingDescriptor_t pool_desc_;
}; // class QuantizedCuDNNPoolingOp
#endif // MXNET_USE_CUDNN == 1 && CUDA_VERSION >= 8000
void QuantizedPoolingForwardGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const PoolingParam& param = nnvm::get<PoolingParam>(attrs.parsed);
CHECK_EQ(param.kernel.ndim(), 2U)
<< "QuantizedPoolingForward<gpu> only supports 2D convolution for now";
#if MXNET_USE_CUDNN == 1 && CUDA_VERSION >= 8000
#if DMLC_CXX11_THREAD_LOCAL
static thread_local QuantizedCuDNNPoolingOp<int8_t> op;
#else
static MX_THREAD_LOCAL QuantizedCuDNNPoolingOp<int8_t> op;
#endif // DMLC_CXX11_THREAD_LOCAL
op.Init(param, {inputs[0].shape_}, {outputs[0].shape_});
op.Forward(ctx.get_stream<gpu>(), inputs, req, outputs);
#else
LOG(FATAL) << "QuantizedPoolingForward<gpu> only supports cudnnPoolingForward "
"with CUDNN >= 6.0 and CUDA >= 8.0";
#endif // MXNET_USE_CUDNN == 1 && CUDA_VERSION >= 8000
}
NNVM_REGISTER_OP(_contrib_quantized_pooling)
.set_attr<FCompute>("FCompute<gpu>", QuantizedPoolingForwardGPU);
} // namespace op
} // namespace mxnet
|
6952e5c792116f00c03b9889be8d75f30da5072a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS 40
// Variables
unsigned* h_A;
unsigned* h_B;
unsigned* h_C;
unsigned* d_A;
unsigned* d_B;
unsigned* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(unsigned*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1=0;
unsigned Value2=0;
unsigned Value3=0;
unsigned Value=0;
unsigned I1=A[i];
unsigned I2=B[i];
// Excessive Addition access
if( ((i%32)<=23) ){
for(unsigned k=0; k<ITERATIONS;k++) {
Value2= I1+I2;
Value3=I1-I2;
Value1-=Value2;
Value3+=Value1;
Value2-=Value3;
Value1+=Value3;
// Value2= I1+I2;
// Value3=I1-I2;
// Value1=I1-Value2;
// Value3+=Value1;
// Value2-=Value3;
// Value1+=Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value;
__syncthreads();
}
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(unsigned);
// Allocate input vectors h_A and h_B in host memory
h_A = (unsigned*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (unsigned*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (unsigned*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
hipLaunchKernelGGL((
PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(unsigned* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
| 6952e5c792116f00c03b9889be8d75f30da5072a.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS 40
// Variables
unsigned* h_A;
unsigned* h_B;
unsigned* h_C;
unsigned* d_A;
unsigned* d_B;
unsigned* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(unsigned*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
unsigned Value1=0;
unsigned Value2=0;
unsigned Value3=0;
unsigned Value=0;
unsigned I1=A[i];
unsigned I2=B[i];
// Excessive Addition access
if( ((i%32)<=23) ){
for(unsigned k=0; k<ITERATIONS;k++) {
Value2= I1+I2;
Value3=I1-I2;
Value1-=Value2;
Value3+=Value1;
Value2-=Value3;
Value1+=Value3;
// Value2= I1+I2;
// Value3=I1-I2;
// Value1=I1-Value2;
// Value3+=Value1;
// Value2-=Value3;
// Value1+=Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value;
__syncthreads();
}
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(unsigned);
// Allocate input vectors h_A and h_B in host memory
h_A = (unsigned*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (unsigned*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (unsigned*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(unsigned* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
|
d73df50994aeb5bcb705d2dac2b26b0281484997.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/pair.h>
#include <thrust/tuple.h>
#include <algorithm>
#include <limits>
#include <sstream>
#include <tuple>
#include <type_traits>
#include "open3d/core/CUDAState.cuh"
#include "open3d/core/CUDAUtils.h"
#include "open3d/core/Device.h"
#include "open3d/core/Dispatch.h"
#include "open3d/core/FuncionTraits.h"
#include "open3d/core/Indexer.h"
#include "open3d/core/MemoryManager.h"
#include "open3d/core/SizeVector.h"
#include "open3d/core/Tensor.h"
#include "open3d/core/kernel/CUDALauncher.cuh"
#include "open3d/core/kernel/Reduction.h"
#include "open3d/utility/Console.h"
// CUDA reduction is based on PyTorch's CUDA reduction implementation.
// See: aten/src/ATen/native/cuda/Reduce.cuh
#if __CUDA_ARCH__ >= 750
constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 1024;
#else
constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 2048;
#endif
constexpr uint32_t CUDA_MAX_THREADS_PER_BLOCK = 1024;
constexpr uint32_t CUDA_THREADS_PER_BLOCK_FALLBACK = 256;
#define OPEN3D_MAX_THREADS_PER_BLOCK(val) \
(((val) <= CUDA_MAX_THREADS_PER_BLOCK) ? (val) \
: CUDA_THREADS_PER_BLOCK_FALLBACK)
#define OPEN3D_MIN_BLOCKS_PER_SM(threads_per_block, blocks_per_sm) \
((((threads_per_block) * (blocks_per_sm) <= CUDA_MAX_THREADS_PER_SM) \
? (blocks_per_sm) \
: ((CUDA_MAX_THREADS_PER_SM + (threads_per_block)-1) / \
(threads_per_block))))
#define OPEN3D_LAUNCH_BOUNDS_2(max_threads_per_block, min_blocks_per_sm) \
__launch_bounds__((OPEN3D_MAX_THREADS_PER_BLOCK((max_threads_per_block))), \
(OPEN3D_MIN_BLOCKS_PER_SM((max_threads_per_block), \
(min_blocks_per_sm))))
template <typename T>
OPEN3D_DEVICE __forceinline__ T WARP_SHFL_DOWN(T value,
unsigned int delta,
int width = warpSize,
unsigned int mask = 0xffffffff) {
#if TORCH_HIP_VERSION >= 9000
return __shfl_down_sync(mask, value, delta, width);
#else
return __shfl_down(value, delta, width);
#endif
}
namespace open3d {
namespace core {
namespace kernel {
static inline int64_t DivUp(int64_t a, int64_t b) { return (a + b - 1) / b; }
// Returns reduced fraction numerator & denominator
OPEN3D_HOST_DEVICE static void ReduceFraction(int64_t& numerator,
int64_t& denominator) {
// Get GCD of num and denom using Euclid's algorithm.
// Can replace this with std::gcd if we ever support c++17.
int64_t a = denominator;
int64_t b = numerator;
while (b != 0) {
a %= b;
int64_t tmp = a;
a = b;
b = tmp;
}
// a is now the GCD
numerator /= a;
denominator /= a;
}
class ReduceConfig {
public:
static constexpr int BLOCK_X = 0;
static constexpr int BLOCK_Y = 1;
static constexpr int CTA = 2;
static constexpr int MAX_NUM_THREADS = 512;
int num_inputs_per_output_;
int num_outputs_;
int step_input_ = 1;
int step_output_ = 1;
int ctas_per_output_ = 1;
private:
int element_size_bytes_;
int input_mult_[3] = {0, 0, 0};
int output_mult_[2] = {0, 0};
int block_width_;
int block_height_;
int num_threads_;
public:
ReduceConfig(int element_size_bytes, const Indexer& indexer)
: element_size_bytes_(element_size_bytes) {
num_outputs_ = indexer.NumOutputElements();
num_inputs_per_output_ = indexer.NumWorkloads() / num_outputs_;
// Adjust block size to map block width to fastest changing dimension of
// input tensor. This grants the best possible memory accessing pattern,
// given that for non-contiguous tensor with space in between, we cannot
// have perfect memory coalescing.
bool reduction_on_fastest_striding_dimension =
(indexer.NumReductionDims() == indexer.NumDims()) ||
(indexer.GetInput(0).byte_strides_[0] <
indexer.GetInput(0).byte_strides_[indexer.NumReductionDims()]);
// Notice that dim0 & dim1 does NOT guarantee any launch configuration
// here! dim0 & dim1 are more like the upper bound of the block
// dimension. The actual launch config and reduction scheme is
// determined by setting values to `input_mult_` and
// `output_mult_`. We try to max out dim1 so that we have enough
// threads per CTA to deliver performance for larger problem size.
int64_t dim0;
int64_t dim1;
if (reduction_on_fastest_striding_dimension) {
// Map block.x to the fastest reducing dimension. It implies:
// 1. BlockXReduce is required.
// 2. block.y now max out to num_outputs.
dim0 = indexer.GetMasterShape()[0];
dim1 = num_outputs_;
} else {
// Map block.x to the fastest non reducing dimension. It implies:
// 1. BlockXReduce is turned off.
// 2. block.y now max out to num_inputs_per_output_.
dim0 = indexer.GetMasterShape()[indexer.NumReductionDims()];
dim1 = num_inputs_per_output_;
}
// Adjust block_width and block_height
SetBlockDimension(dim0, dim1);
int block_width = block_width_;
int block_height = block_height_;
if (indexer.NumDims() == 0 || reduction_on_fastest_striding_dimension) {
// Split the input across lanes if the input is contiguous in the
// reduced dimension. This will require reduction between threads
// using warp shuffle instructions and shared memory (if
// block_width > warpSize).
input_mult_[0] = SplitInput(block_width);
} else {
// Otherwise split the output across lanes in a warp.
output_mult_[0] = SplitOutput(block_width);
}
if (ValuesPerThread() >= block_height * 16 ||
ValuesPerThread() >= 256) {
// Divide the input across warps in a thread-block, if that leaves
// at least 16 elements to be summed by each thread. This will
// require inter-warp reduction using shared memory.
input_mult_[1] = SplitInput(block_height);
} else {
// Otherwise, each warp handles a separate output.
output_mult_[1] = SplitOutput(block_height);
}
if (input_mult_[1] != 0 && ValuesPerThread() >= 256 &&
num_outputs_ <= 4096) {
// Divide the input across thread-blocks if the amount of work
// per-thread is large enough and the size of the output is small
// enough. This will require a reduction using global memory.
ctas_per_output_ = DivUp(ValuesPerThread(), 16);
if (ctas_per_output_ > 65535) {
ctas_per_output_ = 65535;
}
input_mult_[2] = SplitInput(ctas_per_output_);
}
}
/// Returns floor(log2(n))
static inline int LastPow2(int n) {
// Dtype.h asserts sizeof(int) == 4.
n |= (n >> 1);
n |= (n >> 2);
n |= (n >> 4);
n |= (n >> 8);
n |= (n >> 16);
return ::max(1, n - (n >> 1));
}
void SetBlockDimension(int64_t dim0, int64_t dim1) {
int dim0_pow2 = dim0 < MAX_NUM_THREADS
? static_cast<int>(LastPow2(dim0))
: MAX_NUM_THREADS;
int dim1_pow2 = dim1 < MAX_NUM_THREADS
? static_cast<int>(LastPow2(dim1))
: MAX_NUM_THREADS;
block_width_ =
::min(dim0_pow2, CUDAState::GetInstance()->GetWarpSize());
block_height_ =
::min(dim1_pow2, int(MAX_NUM_THREADS / block_width_));
block_width_ =
::min(dim0_pow2, int(MAX_NUM_THREADS / block_height_));
num_threads_ = block_width_ * block_height_;
}
int SplitInput(int parallelism) {
int step = step_input_;
step_input_ *= parallelism;
return step;
}
int SplitOutput(int parallelism) {
int step = step_output_;
step_output_ *= parallelism;
return step;
}
dim3 BlockDim() const { return dim3(block_width_, block_height_); }
dim3 GridDim() const {
return dim3(DivUp(num_outputs_, step_output_), ctas_per_output_);
}
OPEN3D_HOST_DEVICE bool ShouldBlockXReduce() const {
return input_mult_[BLOCK_X] != 0;
}
OPEN3D_HOST_DEVICE bool ShouldBlockYReduce() const {
return input_mult_[BLOCK_Y] != 0;
}
OPEN3D_HOST_DEVICE bool ShouldGlobalReduce() const {
return input_mult_[CTA] != 0;
}
OPEN3D_DEVICE bool ShouldStore(int output_idx) const {
return output_idx < num_outputs_ &&
(!ShouldBlockXReduce() || threadIdx.x == 0) &&
(!ShouldBlockYReduce() || threadIdx.y == 0);
}
OPEN3D_HOST_DEVICE int InputIdx() const {
int lane = threadIdx.x;
int warp = threadIdx.y;
int cta2 = blockIdx.y;
return (lane * input_mult_[BLOCK_X] + warp * input_mult_[BLOCK_Y] +
cta2 * input_mult_[CTA]);
}
OPEN3D_HOST_DEVICE int OutputIdx() const {
int lane = threadIdx.x;
int warp = threadIdx.y;
int cta1 = blockIdx.x;
return (lane * output_mult_[BLOCK_X] + warp * output_mult_[BLOCK_Y] +
cta1 * step_output_);
}
OPEN3D_DEVICE int SharedMemoryOffset(int offset) const {
return threadIdx.x + (threadIdx.y + offset) * blockDim.x;
}
OPEN3D_DEVICE int StagingMemoryOffset(int cta2) const {
int offset = cta2 + blockIdx.x * gridDim.y;
if (!ShouldBlockXReduce()) {
offset = threadIdx.x + offset * blockDim.x;
}
return offset;
}
int SharedMemorySize() const {
if (!ShouldBlockYReduce() &&
(!ShouldBlockXReduce() ||
block_width_ <= CUDAState::GetInstance()->GetWarpSize())) {
return 0;
}
return element_size_bytes_ * num_threads_;
}
int64_t GlobalMemorySize() const {
if (!ShouldGlobalReduce()) {
return 0;
}
auto size =
(int64_t)element_size_bytes_ * num_outputs_ * ctas_per_output_;
if (!ShouldBlockXReduce()) {
size *= BlockDim().x;
}
return size;
}
int SemaphoreSize() const {
if (!ShouldGlobalReduce()) {
return 0;
}
return sizeof(int) * GridDim().x;
}
int ValuesPerThread() const {
return DivUp(num_inputs_per_output_, step_input_);
}
std::string ToString() const {
std::string input_mult_str = fmt::format(
"[{},{},{}]", input_mult_[0], input_mult_[1], input_mult_[2]);
std::string output_mult_str =
fmt::format("[{},{}]", output_mult_[0], output_mult_[1]);
std::string block_str = fmt::format("[{},{},{}]", BlockDim().x,
BlockDim().y, BlockDim().z);
std::string grid_str = fmt::format("[{},{},{}]", GridDim().x,
GridDim().y, GridDim().z);
std::string str = fmt::format(
"REDUCEConfig(element_size_bytes_={}, "
"num_inputs_per_output_={}, num_outputs_={}, "
"step_input_={}, step_output_={}, ctas_per_output_={}, "
"input_mult_={}, output_mult_={}, values_per_thread={}, "
"block={}, grid={}, global_memory_size={})",
element_size_bytes_, num_inputs_per_output_, num_outputs_,
step_input_, step_output_, ctas_per_output_, input_mult_str,
output_mult_str, ValuesPerThread(), block_str, grid_str,
GlobalMemorySize());
return str;
}
};
template <int nt, typename R>
OPEN3D_LAUNCH_BOUNDS_2(nt, 4)
__global__ void ReduceKernel(R reduction) {
reduction.Run();
}
template <typename index_t>
static OffsetCalculator<2, index_t> MakeOutputCalculator(
const Indexer& indexer) {
int num_reduction_dims = indexer.NumReductionDims();
int num_output_dims = indexer.NumDims() - num_reduction_dims;
std::array<const int64_t*, 2> strides = {
indexer.GetOutput().byte_strides_ + num_reduction_dims,
indexer.GetInput(0).byte_strides_ + num_reduction_dims,
};
const int64_t* shape = indexer.GetMasterShape() + num_reduction_dims;
return OffsetCalculator<2, index_t>(num_output_dims, shape, strides.data());
}
template <typename index_t>
static OffsetCalculator<1, index_t> MakeInputCalculator(
const Indexer& indexer) {
int num_reduction_dims = indexer.NumReductionDims();
std::array<const int64_t*, 1> strides = {
indexer.GetInput(0).byte_strides_,
};
return OffsetCalculator<1, index_t>(
num_reduction_dims, indexer.GetMasterShape(), strides.data());
}
template <int vt, typename index_t, typename func_t>
OPEN3D_DEVICE void StridedIterate(func_t f,
index_t begin,
index_t end,
index_t stride) {
if (begin + (vt - 1) * stride < end) {
#pragma unroll
for (index_t i = 0; i < vt; i++) {
f(i, begin + i * stride);
}
} else {
#pragma unroll
for (index_t i = 0; i < vt; i++) {
index_t idx = begin + i * stride;
if (idx < end) {
f(i, idx);
}
}
}
}
/// Combime() and Reduce() are the same for regular reduction ops.
template <typename out_scalar_t, typename func_t>
class RegularReduceOps {
using arg_t = typename BinaryFunctionTraits<func_t>::arg0_t;
using scalar_t = typename BinaryFunctionTraits<func_t>::arg1_t;
public:
RegularReduceOps(const func_t& op) : reduce_func_(op) {}
static inline OPEN3D_DEVICE out_scalar_t Project(arg_t arg) {
return (out_scalar_t)arg;
}
static inline OPEN3D_DEVICE arg_t WarpShflDown(arg_t arg, int offset) {
return WARP_SHFL_DOWN(arg, offset);
}
OPEN3D_DEVICE inline arg_t Combine(arg_t acc, scalar_t val) const {
return reduce_func_(acc, val);
}
/// Idx is ignored for RegularReduceOps.
OPEN3D_DEVICE inline arg_t Reduce(arg_t acc,
scalar_t val,
int64_t idx) const {
return reduce_func_(acc, val);
}
private:
func_t reduce_func_ = nullptr;
};
template <typename scalar_t, typename func_t>
RegularReduceOps<scalar_t, func_t> WrapRegularReduceOps(const func_t& op) {
return RegularReduceOps<scalar_t, func_t>{op};
}
template <typename func_t>
class ArgReduceOps {
using scalar_t = typename BinaryFunctionTraits<func_t>::arg1_t;
using index_t = int64_t;
using arg_t = thrust::pair<scalar_t, index_t>;
public:
ArgReduceOps(const func_t comp_func) : comp_func_(comp_func) {}
static OPEN3D_DEVICE index_t Project(arg_t arg) { return arg.second; }
static OPEN3D_DEVICE arg_t WarpShflDown(arg_t arg, int offset) {
return arg_t(WARP_SHFL_DOWN(arg.first, offset),
WARP_SHFL_DOWN(arg.second, offset));
}
/// Combine(pair<val_t, idx_t>, pair<val_t, idx_t>) -> pair<val_t, idx_t>.
/// Called at subsequent rounds of reduction, when values are already
/// associated with indices.
OPEN3D_DEVICE inline arg_t Combine(arg_t a, arg_t b) const {
return comp_func_(a.first, b.first) ? a : b;
}
/// Reduce(pair<val_t, idx_t>, val_t, idx_t) -> pair<val_t, idx_t>.
/// Called at the first round of reduction, when values are not yet
/// associated with indices.
OPEN3D_DEVICE inline arg_t Reduce(arg_t arg,
scalar_t val,
int64_t idx) const {
return comp_func_(arg.first, val) ? arg : arg_t(val, idx);
}
private:
func_t comp_func_ = nullptr;
};
template <typename func_t>
ArgReduceOps<func_t> WrapArgReduceOps(const func_t& comp_func) {
return ArgReduceOps<func_t>{comp_func};
}
template <typename scalar_t,
typename ops_t,
typename index_t,
typename out_scalar_t = scalar_t,
int vt0 = 4>
class ReduceOp {
using traits = FunctionTraits<decltype(&ops_t::Reduce)>;
using arg_t =
typename std::decay<typename traits::template arg<0>::type>::type;
using InputCalculator = OffsetCalculator<1, index_t>;
using OutputCalculator = OffsetCalculator<2, index_t>;
public:
ReduceOp(ops_t ops,
ReduceConfig config,
InputCalculator input_calc,
OutputCalculator output_calc,
const void* src,
char* dst,
void* acc_buf,
void* cta_buf,
int* semaphores,
arg_t identity,
bool accumulate,
bool final_output)
: ops_(ops),
config_(config),
input_calc_(input_calc),
output_calc_(output_calc),
src_(src),
dst_(dst),
acc_buf_(acc_buf),
cta_buf_(cta_buf),
semaphores_(semaphores),
identity_(identity),
accumulate_(accumulate),
final_output_(final_output) {}
OPEN3D_DEVICE void Run() const {
extern __shared__ char shared_memory[];
index_t output_idx = config_.OutputIdx();
index_t input_idx = config_.InputIdx();
auto base_offsets = output_calc_.get(output_idx);
arg_t value = identity_;
if (output_idx < config_.num_outputs_ &&
input_idx < config_.num_inputs_per_output_) {
auto input_slice = (const char*)src_ + base_offsets[1];
value = ThreadReduce((const scalar_t*)input_slice);
}
if (config_.ShouldBlockYReduce()) {
value = BlockYReduce(value, shared_memory);
}
if (config_.ShouldBlockXReduce()) {
value = BlockXReduce(value, shared_memory);
}
auto out = (out_scalar_t*)((char*)dst_ + base_offsets[0]);
arg_t* acc = nullptr;
if (acc_buf_ != nullptr) {
int64_t numerator = (int64_t)sizeof(arg_t);
int64_t denominator = (int64_t)sizeof(out_scalar_t);
ReduceFraction(numerator, denominator);
acc = (arg_t*)((char*)acc_buf_ +
(base_offsets[0] * numerator / denominator));
}
if (config_.ShouldGlobalReduce()) {
value = GlobalReduce(value, acc, shared_memory);
} else if (config_.ShouldStore(output_idx)) {
if (acc == nullptr) {
if (accumulate_) {
value = AccumulateInOutput<can_accumulate_in_output>(out,
value);
}
if (final_output_) {
SetResultsToOutput(value, base_offsets[0]);
} else {
*out = GetAccumulatedOutput<can_accumulate_in_output>(
out, value);
}
} else {
if (accumulate_) {
value = ops_.Combine(*acc, value);
}
if (final_output_) {
SetResultsToOutput(value, base_offsets[0]);
} else {
*acc = value;
}
}
}
}
OPEN3D_DEVICE arg_t ThreadReduce(const scalar_t* data) const {
index_t idx = config_.InputIdx();
// Multiple accumulators to remove dependency between unrolled loops.
arg_t value_list[vt0];
#pragma unroll
for (int i = 0; i < vt0; i++) {
value_list[i] = identity_;
}
index_t end = config_.num_inputs_per_output_;
index_t stride = config_.step_input_;
index_t element_stride = input_calc_.strides_[0][0] / sizeof(scalar_t);
// Reducing layers of function calls so compiler could do proper loop
// unroll that exposes instruction level parallelism.
while (idx < config_.num_inputs_per_output_) {
// load input
SmallArray<scalar_t, vt0> values;
if (input_calc_.dims_ == 1) {
StridedIterate<vt0>(
[&](index_t i, index_t idx) {
values[i] = data[idx * element_stride];
},
idx, end, stride);
} else {
StridedIterate<vt0>(
[&](index_t i, index_t idx) {
values[i] = data[input_calc_.get(idx)[0] /
sizeof(scalar_t)];
},
idx, end, stride);
}
// compute
StridedIterate<vt0, index_t>(
[&](index_t i, index_t idx) {
value_list[i] =
ops_.Reduce(value_list[i], values[i], idx);
},
idx, config_.num_inputs_per_output_, config_.step_input_);
// step offset
idx += config_.step_input_ * vt0;
}
#pragma unroll
for (int i = 1; i < vt0; i++) {
value_list[0] = ops_.Combine(value_list[0], value_list[i]);
}
return value_list[0];
}
OPEN3D_DEVICE arg_t BlockXReduce(arg_t value, char* shared_memory) const {
int dim_x = blockDim.x;
arg_t* shared = (arg_t*)shared_memory;
if (dim_x > warpSize) {
int address_base = threadIdx.x + threadIdx.y * blockDim.x;
shared[address_base] = value;
for (int offset = dim_x / 2; offset >= warpSize; offset >>= 1) {
__syncthreads();
if (threadIdx.x < offset && threadIdx.x + offset < blockDim.x) {
arg_t other = shared[address_base + offset];
value = ops_.Combine(value, other);
shared[address_base] = value;
}
}
dim_x = warpSize;
}
__syncthreads();
for (int offset = 1; offset < dim_x; offset <<= 1) {
arg_t other = ops_.WarpShflDown(value, offset);
value = ops_.Combine(value, other);
}
return value;
}
OPEN3D_DEVICE arg_t BlockYReduce(arg_t value, char* shared_memory) const {
arg_t* shared = (arg_t*)shared_memory;
shared[config_.SharedMemoryOffset(0)] = value;
for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) {
__syncthreads();
if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) {
arg_t other = shared[config_.SharedMemoryOffset(offset)];
value = ops_.Combine(value, other);
shared[config_.SharedMemoryOffset(0)] = value;
}
}
return value;
}
OPEN3D_DEVICE bool MarkBlockFinished() const {
__shared__ bool is_last_block_done_shared;
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0) {
int prev_blocks_finished = atomicAdd(&semaphores_[blockIdx.x], 1);
is_last_block_done_shared = (prev_blocks_finished == gridDim.y - 1);
}
__syncthreads();
return is_last_block_done_shared;
}
template <bool can_acc>
OPEN3D_DEVICE arg_t AccumulateInOutput(
out_scalar_t* out,
arg_t value,
typename std::enable_if<can_acc>::type* = nullptr) const {
return ops_.Combine(*out, value);
}
// This function should never be called --
// it's the version of `AccumulateInOutput`
// when accumulation in the output is not possible.
template <bool can_acc>
OPEN3D_DEVICE arg_t AccumulateInOutput(
out_scalar_t*,
arg_t,
typename std::enable_if<!can_acc>::type* = nullptr) const {
assert(false); // can't use AT_ASSERT in Cuda.
return arg_t{};
}
template <bool can_acc>
OPEN3D_DEVICE out_scalar_t GetAccumulatedOutput(
out_scalar_t* out,
arg_t value,
typename std::enable_if<can_acc>::type* = nullptr) const {
assert(!final_output_);
return (out_scalar_t)value;
}
// This function should never be called --
// it's the version of `GetAccumulatedOutput`
// when accumulation in the output is not possible.
template <bool can_acc>
OPEN3D_DEVICE out_scalar_t GetAccumulatedOutput(
out_scalar_t* out,
arg_t value,
typename std::enable_if<!can_acc>::type* = nullptr) const {
assert(false);
return *out;
}
template <class T>
OPEN3D_DEVICE void SetResults(const T x, const index_t base_offset) const {
auto res = (out_scalar_t*)((char*)dst_ + base_offset);
*res = x;
}
OPEN3D_DEVICE void SetResultsToOutput(arg_t value,
index_t base_offset) const {
assert(final_output_);
SetResults(ops_.Project(value), base_offset);
}
OPEN3D_DEVICE arg_t GlobalReduce(arg_t value,
arg_t* acc,
char* shared_memory) const {
arg_t* reduce_buffer = (arg_t*)cta_buf_;
index_t output_idx = config_.OutputIdx();
auto base_offsets = output_calc_.get(output_idx);
auto out = (out_scalar_t*)((char*)dst_ + base_offsets[0]);
bool should_store = config_.ShouldStore(config_.OutputIdx());
if (should_store) {
index_t offset = config_.StagingMemoryOffset(blockIdx.y);
reduce_buffer[offset] = value;
}
__threadfence(); // make sure writes are globally visible
__syncthreads(); // if multiple warps in this block wrote to staging,
// make sure they're all done
bool is_last_block_done = MarkBlockFinished();
if (is_last_block_done) {
value = identity_;
if (config_.ShouldBlockXReduce()) {
index_t input_offset = threadIdx.x + threadIdx.y * blockDim.x;
index_t step = blockDim.x * blockDim.y;
for (; input_offset < config_.ctas_per_output_;
input_offset += step) {
index_t idx = config_.StagingMemoryOffset(input_offset);
arg_t next = reduce_buffer[idx];
value = ops_.Combine(value, next);
}
} else {
index_t input_offset = threadIdx.y;
index_t step = blockDim.y;
for (; input_offset < config_.ctas_per_output_;
input_offset += step) {
index_t idx = config_.StagingMemoryOffset(input_offset);
arg_t next = reduce_buffer[idx];
value = ops_.Combine(value, next);
}
}
value = BlockYReduce(value, shared_memory);
if (config_.ShouldBlockXReduce()) {
value = BlockXReduce(value, shared_memory);
}
if (should_store) {
if (acc == nullptr) {
if (accumulate_) {
value = AccumulateInOutput<can_accumulate_in_output>(
out, value);
}
if (final_output_) {
SetResultsToOutput(value, base_offsets[0]);
} else {
*out = GetAccumulatedOutput<can_accumulate_in_output>(
out, value);
}
} else {
if (accumulate_) {
value = ops_.Combine(*acc, value);
}
if (final_output_) {
SetResultsToOutput(value, base_offsets[0]);
} else {
*acc = value;
}
}
}
}
return value;
}
private:
static constexpr bool can_accumulate_in_output =
std::is_convertible<arg_t, out_scalar_t>::value &&
std::is_convertible<out_scalar_t, arg_t>::value;
static constexpr float acc_buffer_multiplier =
(float)sizeof(arg_t) / sizeof(out_scalar_t);
ops_t ops_;
ReduceConfig config_;
InputCalculator input_calc_;
OutputCalculator output_calc_;
const void* src_;
const char* dst_;
// acc_buf_ used for accumulation among sub Tensor Iterator when
// accumulation on output is not permissible
void* acc_buf_;
// cta_buf_ used for accumulation between blocks during global reduction
void* cta_buf_;
int* semaphores_;
arg_t identity_;
bool accumulate_;
bool final_output_;
};
class AccumulationBuffer {
public:
AccumulationBuffer() {}
AccumulationBuffer(int64_t acc_t_size,
int64_t out_t_size,
char* out_ptr,
int64_t size) {
out_ptr_ = (char*)out_ptr;
if (out_t_size >= acc_t_size) {
// reusing output buffer for accumulation.
acc_ptr_ = (char*)out_ptr;
numerator_ = 1;
denominator_ = 1;
} else {
int device_id = CUDAState::GetInstance()->GetCurentDeviceID();
Device device(Device::DeviceType::CUDA, device_id);
buffer_ = (char*)MemoryManager::Malloc(size, device);
acc_ptr_ = (char*)buffer_;
numerator_ = acc_t_size;
denominator_ = out_t_size;
ReduceFraction(numerator_, denominator_);
}
}
char* GetAccSlice(char* out_ptr) {
if (numerator_ == -1 || acc_ptr_ == nullptr) {
return nullptr;
}
return acc_ptr_ + ((out_ptr - out_ptr_) * numerator_ / denominator_);
}
private:
char* acc_ptr_ = nullptr;
char* out_ptr_ = nullptr;
float size_factor_ = -1;
int64_t numerator_ = -1;
int64_t denominator_ = -1;
char* buffer_ = nullptr;
};
class CUDAReductionEngine {
public:
CUDAReductionEngine(const CUDAReductionEngine&) = delete;
CUDAReductionEngine& operator=(const CUDAReductionEngine&) = delete;
CUDAReductionEngine(const Indexer& indexer) : indexer_(indexer) {}
template <typename func_t, typename scalar_t>
void Run(const func_t& reduce_func, scalar_t identity) {
if (indexer_.NumWorkloads() == 0) {
utility::LogError(
"0-sized input should be handled outside of the reudction "
"engine.");
}
if (indexer_.NumInputs() != 1) {
utility::LogError("Reduction op must have exactly one input.");
}
OPEN3D_ASSERT_HOST_DEVICE_LAMBDA(func_t);
using arg0_t = typename BinaryFunctionTraits<func_t>::arg0_t;
using arg1_t = typename BinaryFunctionTraits<func_t>::arg1_t;
if (!std::is_same<scalar_t, arg0_t>::value ||
!std::is_same<scalar_t, arg1_t>::value) {
utility::LogError(
"Function input type must match with the identity's type.");
}
using res_t = typename BinaryFunctionTraits<func_t>::res_t;
if (std::is_same<res_t, bool>::value) {
// func_t is a comparison function (for arg-reduction).
// Signature: (scalar_t, scalar_t) -> bool.
RunReduce<scalar_t, int64_t>(
indexer_, WrapArgReduceOps(reduce_func),
thrust::pair<scalar_t, int64_t>(identity, 0));
} else {
// func_t is a regular reduction function.
// Signature: (scalar_t, scalar_t) -> scalar_t.
RunReduce<scalar_t, scalar_t>(
indexer_, WrapRegularReduceOps<scalar_t>(reduce_func),
identity);
}
}
private:
/// If the index cannot be represented in 32 bits, RunReduce calls itself
/// recursively.
template <typename scalar_t,
typename out_scalar_t,
int vt0 = 4,
typename ops_t,
typename ident_t>
static void RunReduce(Indexer& indexer,
const ops_t& ops,
ident_t identity,
AccumulationBuffer* acc_buf_ptr = nullptr) {
using traits = FunctionTraits<decltype(&ops_t::Reduce)>;
using arg_t = typename traits::template arg<0>::type;
static constexpr bool can_accumulate_in_output =
std::is_convertible<arg_t, out_scalar_t>::value;
bool can_use_32bit_indexing = indexer.CanUse32BitIndexing();
std::unique_ptr<AccumulationBuffer> owned_buf_ptr;
// The acc_buf_ptr is a shared pointer. It is create at the first
// entrance reused by all recursive function calls.
if (acc_buf_ptr == nullptr) {
// acc_buf_ptr holds buffer used for accumulation among multiple
// sub_iter when accumulation in output is not possible.
if (!can_accumulate_in_output && !can_use_32bit_indexing) {
int64_t output_memory_size = 1;
for (int dim = 0; dim < indexer.NumDims(); dim++) {
output_memory_size = ::max(
output_memory_size,
indexer.GetMasterShape()[dim] *
indexer.GetOutput().byte_strides_[dim]);
}
owned_buf_ptr.reset(new AccumulationBuffer(
sizeof(arg_t), sizeof(out_scalar_t),
(char*)indexer.GetOutput().data_ptr_,
output_memory_size * sizeof(arg_t)));
} else {
owned_buf_ptr.reset(new AccumulationBuffer());
}
acc_buf_ptr = owned_buf_ptr.get();
}
if (!can_use_32bit_indexing) {
for (auto& sub_indexer : indexer.SplitTo32BitIndexing()) {
RunReduce<scalar_t, out_scalar_t, vt0>(sub_indexer, ops,
identity, acc_buf_ptr);
}
return;
}
ReduceConfig config(sizeof(arg_t), indexer);
void* buffer = nullptr;
void* semaphores = nullptr;
if (config.ShouldGlobalReduce()) {
int device_id = CUDAState::GetInstance()->GetCurentDeviceID();
Device device(Device::DeviceType::CUDA, device_id);
buffer = MemoryManager::Malloc(config.GlobalMemorySize(), device);
semaphores = MemoryManager::Malloc(config.SemaphoreSize(), device);
OPEN3D_CUDA_CHECK(
hipMemset(semaphores, 0, config.SemaphoreSize()));
}
assert(can_use_32bit_indexing);
const char* in_data = (char*)indexer.GetInput(0).data_ptr_;
char* out_data = (char*)indexer.GetOutput().data_ptr_;
char* acc_data = acc_buf_ptr->GetAccSlice(out_data);
auto output_calc = MakeOutputCalculator<uint32_t>(indexer);
auto input_calc = MakeInputCalculator<uint32_t>(indexer);
auto reduce_op = ReduceOp<scalar_t, ops_t, uint32_t, out_scalar_t, vt0>(
ops, config, input_calc, output_calc, in_data, out_data,
acc_data, buffer, (int*)semaphores, identity,
indexer.ShouldAccumulate(), indexer.IsFinalOutput());
// Launch reduce kernel
int shared_memory = config.SharedMemorySize();
hipLaunchKernelGGL(( ReduceKernel<ReduceConfig::MAX_NUM_THREADS>)
, dim3(config.GridDim()), dim3(config.BlockDim()), shared_memory, 0,
reduce_op);
OPEN3D_CUDA_CHECK(hipDeviceSynchronize());
OPEN3D_CUDA_CHECK(hipGetLastError());
}
private:
Indexer indexer_;
};
void ReductionCUDA(const Tensor& src,
Tensor& dst,
const SizeVector& dims,
bool keepdim,
ReductionOpCode op_code) {
if (s_regular_reduce_ops.find(op_code) != s_regular_reduce_ops.end()) {
Indexer indexer({src}, dst, DtypePolicy::ALL_SAME, dims);
CUDAReductionEngine re(indexer);
Dtype dtype = src.GetDtype();
CUDADeviceSwitcher switcher(src.GetDevice());
DISPATCH_DTYPE_TO_TEMPLATE(dtype, [&]() {
switch (op_code) {
case ReductionOpCode::Sum:
if (indexer.NumWorkloads() == 0) {
// 0-sized input can be reduced to non-0-sized outputs,
// where identity elements should be filled.
// E.g. np.sum(np.ones((0, 5)), axis=0).shape == (5,).
dst.Fill(0);
} else {
re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b)
-> scalar_t { return a + b; },
static_cast<scalar_t>(0));
}
break;
case ReductionOpCode::Prod:
if (indexer.NumWorkloads() == 0) {
dst.Fill(1);
} else {
re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b)
-> scalar_t { return a * b; },
static_cast<scalar_t>(1));
}
break;
case ReductionOpCode::Min:
if (indexer.NumWorkloads() == 0) {
utility::LogError(
"Zero-size Tensor does not suport Min.");
} else {
re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b)
-> scalar_t { return a < b ? a : b; },
static_cast<scalar_t>(
std::numeric_limits<scalar_t>::max()));
}
break;
case ReductionOpCode::Max:
if (indexer.NumWorkloads() == 0) {
utility::LogError(
"Zero-size Tensor does not suport Max.");
} else {
re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b)
-> scalar_t { return a > b ? a : b; },
static_cast<scalar_t>(std::numeric_limits<
scalar_t>::lowest()));
}
break;
default:
utility::LogError("Unsupported op code.");
break;
}
});
} else if (s_arg_reduce_ops.find(op_code) != s_arg_reduce_ops.end()) {
if (dst.GetDtype() != Dtype::Int64) {
utility::LogError("Arg-reduction must have int64 output dtype.");
}
Indexer indexer({src}, dst, DtypePolicy::INPUT_SAME, dims);
CUDAReductionEngine re(indexer);
Dtype dtype = src.GetDtype();
CUDADeviceSwitcher switcher(src.GetDevice());
DISPATCH_DTYPE_TO_TEMPLATE(dtype, [&]() {
switch (op_code) {
case ReductionOpCode::ArgMin:
if (indexer.NumWorkloads() == 0) {
utility::LogError(
"Zero-size Tensor does not suport ArgMin.");
} else {
re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b)
-> bool { return a < b; },
static_cast<scalar_t>(
std::numeric_limits<scalar_t>::max()));
}
break;
case ReductionOpCode::ArgMax:
if (indexer.NumWorkloads() == 0) {
utility::LogError(
"Zero-size Tensor does not suport ArgMax.");
} else {
re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b)
-> bool { return a > b; },
static_cast<scalar_t>(std::numeric_limits<
scalar_t>::lowest()));
}
break;
default:
utility::LogError("Unsupported op code.");
break;
}
});
} else if (s_boolean_reduce_ops.find(op_code) !=
s_boolean_reduce_ops.end()) {
if (src.GetDtype() != Dtype::Bool) {
utility::LogError(
"Boolean reduction only supports boolean input tensor.");
}
if (dst.GetDtype() != Dtype::Bool) {
utility::LogError(
"Boolean reduction only supports boolean output tensor.");
}
Indexer indexer({src}, dst, DtypePolicy::ALL_SAME, dims);
CUDAReductionEngine re(indexer);
CUDADeviceSwitcher switcher(src.GetDevice());
switch (op_code) {
case ReductionOpCode::All:
if (indexer.NumWorkloads() == 0) {
dst.Fill(true);
} else {
re.Run([] OPEN3D_HOST_DEVICE(uint8_t a, uint8_t b)
-> uint8_t { return a && b; },
static_cast<uint8_t>(true));
}
break;
case ReductionOpCode::Any:
if (indexer.NumWorkloads() == 0) {
dst.Fill(false);
} else {
re.Run([] OPEN3D_HOST_DEVICE(uint8_t a, uint8_t b)
-> uint8_t { return a || b; },
static_cast<uint8_t>(false));
}
break;
default:
utility::LogError("Unsupported op code.");
break;
}
} else {
utility::LogError("Unsupported op code.");
}
}
} // namespace kernel
} // namespace core
} // namespace open3d
| d73df50994aeb5bcb705d2dac2b26b0281484997.cu | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/pair.h>
#include <thrust/tuple.h>
#include <algorithm>
#include <limits>
#include <sstream>
#include <tuple>
#include <type_traits>
#include "open3d/core/CUDAState.cuh"
#include "open3d/core/CUDAUtils.h"
#include "open3d/core/Device.h"
#include "open3d/core/Dispatch.h"
#include "open3d/core/FuncionTraits.h"
#include "open3d/core/Indexer.h"
#include "open3d/core/MemoryManager.h"
#include "open3d/core/SizeVector.h"
#include "open3d/core/Tensor.h"
#include "open3d/core/kernel/CUDALauncher.cuh"
#include "open3d/core/kernel/Reduction.h"
#include "open3d/utility/Console.h"
// CUDA reduction is based on PyTorch's CUDA reduction implementation.
// See: aten/src/ATen/native/cuda/Reduce.cuh
#if __CUDA_ARCH__ >= 750
constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 1024;
#else
constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 2048;
#endif
constexpr uint32_t CUDA_MAX_THREADS_PER_BLOCK = 1024;
constexpr uint32_t CUDA_THREADS_PER_BLOCK_FALLBACK = 256;
#define OPEN3D_MAX_THREADS_PER_BLOCK(val) \
(((val) <= CUDA_MAX_THREADS_PER_BLOCK) ? (val) \
: CUDA_THREADS_PER_BLOCK_FALLBACK)
#define OPEN3D_MIN_BLOCKS_PER_SM(threads_per_block, blocks_per_sm) \
((((threads_per_block) * (blocks_per_sm) <= CUDA_MAX_THREADS_PER_SM) \
? (blocks_per_sm) \
: ((CUDA_MAX_THREADS_PER_SM + (threads_per_block)-1) / \
(threads_per_block))))
#define OPEN3D_LAUNCH_BOUNDS_2(max_threads_per_block, min_blocks_per_sm) \
__launch_bounds__((OPEN3D_MAX_THREADS_PER_BLOCK((max_threads_per_block))), \
(OPEN3D_MIN_BLOCKS_PER_SM((max_threads_per_block), \
(min_blocks_per_sm))))
template <typename T>
OPEN3D_DEVICE __forceinline__ T WARP_SHFL_DOWN(T value,
unsigned int delta,
int width = warpSize,
unsigned int mask = 0xffffffff) {
#if CUDA_VERSION >= 9000
return __shfl_down_sync(mask, value, delta, width);
#else
return __shfl_down(value, delta, width);
#endif
}
namespace open3d {
namespace core {
namespace kernel {
static inline int64_t DivUp(int64_t a, int64_t b) { return (a + b - 1) / b; }
// Returns reduced fraction numerator & denominator
OPEN3D_HOST_DEVICE static void ReduceFraction(int64_t& numerator,
int64_t& denominator) {
// Get GCD of num and denom using Euclid's algorithm.
// Can replace this with std::gcd if we ever support c++17.
int64_t a = denominator;
int64_t b = numerator;
while (b != 0) {
a %= b;
int64_t tmp = a;
a = b;
b = tmp;
}
// a is now the GCD
numerator /= a;
denominator /= a;
}
class ReduceConfig {
public:
static constexpr int BLOCK_X = 0;
static constexpr int BLOCK_Y = 1;
static constexpr int CTA = 2;
static constexpr int MAX_NUM_THREADS = 512;
int num_inputs_per_output_;
int num_outputs_;
int step_input_ = 1;
int step_output_ = 1;
int ctas_per_output_ = 1;
private:
int element_size_bytes_;
int input_mult_[3] = {0, 0, 0};
int output_mult_[2] = {0, 0};
int block_width_;
int block_height_;
int num_threads_;
public:
ReduceConfig(int element_size_bytes, const Indexer& indexer)
: element_size_bytes_(element_size_bytes) {
num_outputs_ = indexer.NumOutputElements();
num_inputs_per_output_ = indexer.NumWorkloads() / num_outputs_;
// Adjust block size to map block width to fastest changing dimension of
// input tensor. This grants the best possible memory accessing pattern,
// given that for non-contiguous tensor with space in between, we cannot
// have perfect memory coalescing.
bool reduction_on_fastest_striding_dimension =
(indexer.NumReductionDims() == indexer.NumDims()) ||
(indexer.GetInput(0).byte_strides_[0] <
indexer.GetInput(0).byte_strides_[indexer.NumReductionDims()]);
// Notice that dim0 & dim1 does NOT guarantee any launch configuration
// here! dim0 & dim1 are more like the upper bound of the block
// dimension. The actual launch config and reduction scheme is
// determined by setting values to `input_mult_` and
// `output_mult_`. We try to max out dim1 so that we have enough
// threads per CTA to deliver performance for larger problem size.
int64_t dim0;
int64_t dim1;
if (reduction_on_fastest_striding_dimension) {
// Map block.x to the fastest reducing dimension. It implies:
// 1. BlockXReduce is required.
// 2. block.y now max out to num_outputs.
dim0 = indexer.GetMasterShape()[0];
dim1 = num_outputs_;
} else {
// Map block.x to the fastest non reducing dimension. It implies:
// 1. BlockXReduce is turned off.
// 2. block.y now max out to num_inputs_per_output_.
dim0 = indexer.GetMasterShape()[indexer.NumReductionDims()];
dim1 = num_inputs_per_output_;
}
// Adjust block_width and block_height
SetBlockDimension(dim0, dim1);
int block_width = block_width_;
int block_height = block_height_;
if (indexer.NumDims() == 0 || reduction_on_fastest_striding_dimension) {
// Split the input across lanes if the input is contiguous in the
// reduced dimension. This will require reduction between threads
// using warp shuffle instructions and shared memory (if
// block_width > warpSize).
input_mult_[0] = SplitInput(block_width);
} else {
// Otherwise split the output across lanes in a warp.
output_mult_[0] = SplitOutput(block_width);
}
if (ValuesPerThread() >= block_height * 16 ||
ValuesPerThread() >= 256) {
// Divide the input across warps in a thread-block, if that leaves
// at least 16 elements to be summed by each thread. This will
// require inter-warp reduction using shared memory.
input_mult_[1] = SplitInput(block_height);
} else {
// Otherwise, each warp handles a separate output.
output_mult_[1] = SplitOutput(block_height);
}
if (input_mult_[1] != 0 && ValuesPerThread() >= 256 &&
num_outputs_ <= 4096) {
// Divide the input across thread-blocks if the amount of work
// per-thread is large enough and the size of the output is small
// enough. This will require a reduction using global memory.
ctas_per_output_ = DivUp(ValuesPerThread(), 16);
if (ctas_per_output_ > 65535) {
ctas_per_output_ = 65535;
}
input_mult_[2] = SplitInput(ctas_per_output_);
}
}
/// Returns floor(log2(n))
static inline int LastPow2(int n) {
// Dtype.h asserts sizeof(int) == 4.
n |= (n >> 1);
n |= (n >> 2);
n |= (n >> 4);
n |= (n >> 8);
n |= (n >> 16);
return std::max(1, n - (n >> 1));
}
void SetBlockDimension(int64_t dim0, int64_t dim1) {
int dim0_pow2 = dim0 < MAX_NUM_THREADS
? static_cast<int>(LastPow2(dim0))
: MAX_NUM_THREADS;
int dim1_pow2 = dim1 < MAX_NUM_THREADS
? static_cast<int>(LastPow2(dim1))
: MAX_NUM_THREADS;
block_width_ =
std::min(dim0_pow2, CUDAState::GetInstance()->GetWarpSize());
block_height_ =
std::min(dim1_pow2, int(MAX_NUM_THREADS / block_width_));
block_width_ =
std::min(dim0_pow2, int(MAX_NUM_THREADS / block_height_));
num_threads_ = block_width_ * block_height_;
}
int SplitInput(int parallelism) {
int step = step_input_;
step_input_ *= parallelism;
return step;
}
int SplitOutput(int parallelism) {
int step = step_output_;
step_output_ *= parallelism;
return step;
}
dim3 BlockDim() const { return dim3(block_width_, block_height_); }
dim3 GridDim() const {
return dim3(DivUp(num_outputs_, step_output_), ctas_per_output_);
}
OPEN3D_HOST_DEVICE bool ShouldBlockXReduce() const {
return input_mult_[BLOCK_X] != 0;
}
OPEN3D_HOST_DEVICE bool ShouldBlockYReduce() const {
return input_mult_[BLOCK_Y] != 0;
}
OPEN3D_HOST_DEVICE bool ShouldGlobalReduce() const {
return input_mult_[CTA] != 0;
}
OPEN3D_DEVICE bool ShouldStore(int output_idx) const {
return output_idx < num_outputs_ &&
(!ShouldBlockXReduce() || threadIdx.x == 0) &&
(!ShouldBlockYReduce() || threadIdx.y == 0);
}
OPEN3D_HOST_DEVICE int InputIdx() const {
int lane = threadIdx.x;
int warp = threadIdx.y;
int cta2 = blockIdx.y;
return (lane * input_mult_[BLOCK_X] + warp * input_mult_[BLOCK_Y] +
cta2 * input_mult_[CTA]);
}
OPEN3D_HOST_DEVICE int OutputIdx() const {
int lane = threadIdx.x;
int warp = threadIdx.y;
int cta1 = blockIdx.x;
return (lane * output_mult_[BLOCK_X] + warp * output_mult_[BLOCK_Y] +
cta1 * step_output_);
}
OPEN3D_DEVICE int SharedMemoryOffset(int offset) const {
return threadIdx.x + (threadIdx.y + offset) * blockDim.x;
}
OPEN3D_DEVICE int StagingMemoryOffset(int cta2) const {
int offset = cta2 + blockIdx.x * gridDim.y;
if (!ShouldBlockXReduce()) {
offset = threadIdx.x + offset * blockDim.x;
}
return offset;
}
int SharedMemorySize() const {
if (!ShouldBlockYReduce() &&
(!ShouldBlockXReduce() ||
block_width_ <= CUDAState::GetInstance()->GetWarpSize())) {
return 0;
}
return element_size_bytes_ * num_threads_;
}
int64_t GlobalMemorySize() const {
if (!ShouldGlobalReduce()) {
return 0;
}
auto size =
(int64_t)element_size_bytes_ * num_outputs_ * ctas_per_output_;
if (!ShouldBlockXReduce()) {
size *= BlockDim().x;
}
return size;
}
int SemaphoreSize() const {
if (!ShouldGlobalReduce()) {
return 0;
}
return sizeof(int) * GridDim().x;
}
int ValuesPerThread() const {
return DivUp(num_inputs_per_output_, step_input_);
}
std::string ToString() const {
std::string input_mult_str = fmt::format(
"[{},{},{}]", input_mult_[0], input_mult_[1], input_mult_[2]);
std::string output_mult_str =
fmt::format("[{},{}]", output_mult_[0], output_mult_[1]);
std::string block_str = fmt::format("[{},{},{}]", BlockDim().x,
BlockDim().y, BlockDim().z);
std::string grid_str = fmt::format("[{},{},{}]", GridDim().x,
GridDim().y, GridDim().z);
std::string str = fmt::format(
"REDUCEConfig(element_size_bytes_={}, "
"num_inputs_per_output_={}, num_outputs_={}, "
"step_input_={}, step_output_={}, ctas_per_output_={}, "
"input_mult_={}, output_mult_={}, values_per_thread={}, "
"block={}, grid={}, global_memory_size={})",
element_size_bytes_, num_inputs_per_output_, num_outputs_,
step_input_, step_output_, ctas_per_output_, input_mult_str,
output_mult_str, ValuesPerThread(), block_str, grid_str,
GlobalMemorySize());
return str;
}
};
template <int nt, typename R>
OPEN3D_LAUNCH_BOUNDS_2(nt, 4)
__global__ void ReduceKernel(R reduction) {
reduction.Run();
}
template <typename index_t>
static OffsetCalculator<2, index_t> MakeOutputCalculator(
const Indexer& indexer) {
int num_reduction_dims = indexer.NumReductionDims();
int num_output_dims = indexer.NumDims() - num_reduction_dims;
std::array<const int64_t*, 2> strides = {
indexer.GetOutput().byte_strides_ + num_reduction_dims,
indexer.GetInput(0).byte_strides_ + num_reduction_dims,
};
const int64_t* shape = indexer.GetMasterShape() + num_reduction_dims;
return OffsetCalculator<2, index_t>(num_output_dims, shape, strides.data());
}
template <typename index_t>
static OffsetCalculator<1, index_t> MakeInputCalculator(
const Indexer& indexer) {
int num_reduction_dims = indexer.NumReductionDims();
std::array<const int64_t*, 1> strides = {
indexer.GetInput(0).byte_strides_,
};
return OffsetCalculator<1, index_t>(
num_reduction_dims, indexer.GetMasterShape(), strides.data());
}
template <int vt, typename index_t, typename func_t>
OPEN3D_DEVICE void StridedIterate(func_t f,
index_t begin,
index_t end,
index_t stride) {
if (begin + (vt - 1) * stride < end) {
#pragma unroll
for (index_t i = 0; i < vt; i++) {
f(i, begin + i * stride);
}
} else {
#pragma unroll
for (index_t i = 0; i < vt; i++) {
index_t idx = begin + i * stride;
if (idx < end) {
f(i, idx);
}
}
}
}
/// Combime() and Reduce() are the same for regular reduction ops.
template <typename out_scalar_t, typename func_t>
class RegularReduceOps {
using arg_t = typename BinaryFunctionTraits<func_t>::arg0_t;
using scalar_t = typename BinaryFunctionTraits<func_t>::arg1_t;
public:
RegularReduceOps(const func_t& op) : reduce_func_(op) {}
static inline OPEN3D_DEVICE out_scalar_t Project(arg_t arg) {
return (out_scalar_t)arg;
}
static inline OPEN3D_DEVICE arg_t WarpShflDown(arg_t arg, int offset) {
return WARP_SHFL_DOWN(arg, offset);
}
OPEN3D_DEVICE inline arg_t Combine(arg_t acc, scalar_t val) const {
return reduce_func_(acc, val);
}
/// Idx is ignored for RegularReduceOps.
OPEN3D_DEVICE inline arg_t Reduce(arg_t acc,
scalar_t val,
int64_t idx) const {
return reduce_func_(acc, val);
}
private:
func_t reduce_func_ = nullptr;
};
template <typename scalar_t, typename func_t>
RegularReduceOps<scalar_t, func_t> WrapRegularReduceOps(const func_t& op) {
return RegularReduceOps<scalar_t, func_t>{op};
}
template <typename func_t>
class ArgReduceOps {
using scalar_t = typename BinaryFunctionTraits<func_t>::arg1_t;
using index_t = int64_t;
using arg_t = thrust::pair<scalar_t, index_t>;
public:
ArgReduceOps(const func_t comp_func) : comp_func_(comp_func) {}
static OPEN3D_DEVICE index_t Project(arg_t arg) { return arg.second; }
static OPEN3D_DEVICE arg_t WarpShflDown(arg_t arg, int offset) {
return arg_t(WARP_SHFL_DOWN(arg.first, offset),
WARP_SHFL_DOWN(arg.second, offset));
}
/// Combine(pair<val_t, idx_t>, pair<val_t, idx_t>) -> pair<val_t, idx_t>.
/// Called at subsequent rounds of reduction, when values are already
/// associated with indices.
OPEN3D_DEVICE inline arg_t Combine(arg_t a, arg_t b) const {
return comp_func_(a.first, b.first) ? a : b;
}
/// Reduce(pair<val_t, idx_t>, val_t, idx_t) -> pair<val_t, idx_t>.
/// Called at the first round of reduction, when values are not yet
/// associated with indices.
OPEN3D_DEVICE inline arg_t Reduce(arg_t arg,
scalar_t val,
int64_t idx) const {
return comp_func_(arg.first, val) ? arg : arg_t(val, idx);
}
private:
func_t comp_func_ = nullptr;
};
template <typename func_t>
ArgReduceOps<func_t> WrapArgReduceOps(const func_t& comp_func) {
return ArgReduceOps<func_t>{comp_func};
}
template <typename scalar_t,
typename ops_t,
typename index_t,
typename out_scalar_t = scalar_t,
int vt0 = 4>
class ReduceOp {
using traits = FunctionTraits<decltype(&ops_t::Reduce)>;
using arg_t =
typename std::decay<typename traits::template arg<0>::type>::type;
using InputCalculator = OffsetCalculator<1, index_t>;
using OutputCalculator = OffsetCalculator<2, index_t>;
public:
ReduceOp(ops_t ops,
ReduceConfig config,
InputCalculator input_calc,
OutputCalculator output_calc,
const void* src,
char* dst,
void* acc_buf,
void* cta_buf,
int* semaphores,
arg_t identity,
bool accumulate,
bool final_output)
: ops_(ops),
config_(config),
input_calc_(input_calc),
output_calc_(output_calc),
src_(src),
dst_(dst),
acc_buf_(acc_buf),
cta_buf_(cta_buf),
semaphores_(semaphores),
identity_(identity),
accumulate_(accumulate),
final_output_(final_output) {}
OPEN3D_DEVICE void Run() const {
extern __shared__ char shared_memory[];
index_t output_idx = config_.OutputIdx();
index_t input_idx = config_.InputIdx();
auto base_offsets = output_calc_.get(output_idx);
arg_t value = identity_;
if (output_idx < config_.num_outputs_ &&
input_idx < config_.num_inputs_per_output_) {
auto input_slice = (const char*)src_ + base_offsets[1];
value = ThreadReduce((const scalar_t*)input_slice);
}
if (config_.ShouldBlockYReduce()) {
value = BlockYReduce(value, shared_memory);
}
if (config_.ShouldBlockXReduce()) {
value = BlockXReduce(value, shared_memory);
}
auto out = (out_scalar_t*)((char*)dst_ + base_offsets[0]);
arg_t* acc = nullptr;
if (acc_buf_ != nullptr) {
int64_t numerator = (int64_t)sizeof(arg_t);
int64_t denominator = (int64_t)sizeof(out_scalar_t);
ReduceFraction(numerator, denominator);
acc = (arg_t*)((char*)acc_buf_ +
(base_offsets[0] * numerator / denominator));
}
if (config_.ShouldGlobalReduce()) {
value = GlobalReduce(value, acc, shared_memory);
} else if (config_.ShouldStore(output_idx)) {
if (acc == nullptr) {
if (accumulate_) {
value = AccumulateInOutput<can_accumulate_in_output>(out,
value);
}
if (final_output_) {
SetResultsToOutput(value, base_offsets[0]);
} else {
*out = GetAccumulatedOutput<can_accumulate_in_output>(
out, value);
}
} else {
if (accumulate_) {
value = ops_.Combine(*acc, value);
}
if (final_output_) {
SetResultsToOutput(value, base_offsets[0]);
} else {
*acc = value;
}
}
}
}
OPEN3D_DEVICE arg_t ThreadReduce(const scalar_t* data) const {
index_t idx = config_.InputIdx();
// Multiple accumulators to remove dependency between unrolled loops.
arg_t value_list[vt0];
#pragma unroll
for (int i = 0; i < vt0; i++) {
value_list[i] = identity_;
}
index_t end = config_.num_inputs_per_output_;
index_t stride = config_.step_input_;
index_t element_stride = input_calc_.strides_[0][0] / sizeof(scalar_t);
// Reducing layers of function calls so compiler could do proper loop
// unroll that exposes instruction level parallelism.
while (idx < config_.num_inputs_per_output_) {
// load input
SmallArray<scalar_t, vt0> values;
if (input_calc_.dims_ == 1) {
StridedIterate<vt0>(
[&](index_t i, index_t idx) {
values[i] = data[idx * element_stride];
},
idx, end, stride);
} else {
StridedIterate<vt0>(
[&](index_t i, index_t idx) {
values[i] = data[input_calc_.get(idx)[0] /
sizeof(scalar_t)];
},
idx, end, stride);
}
// compute
StridedIterate<vt0, index_t>(
[&](index_t i, index_t idx) {
value_list[i] =
ops_.Reduce(value_list[i], values[i], idx);
},
idx, config_.num_inputs_per_output_, config_.step_input_);
// step offset
idx += config_.step_input_ * vt0;
}
#pragma unroll
for (int i = 1; i < vt0; i++) {
value_list[0] = ops_.Combine(value_list[0], value_list[i]);
}
return value_list[0];
}
OPEN3D_DEVICE arg_t BlockXReduce(arg_t value, char* shared_memory) const {
int dim_x = blockDim.x;
arg_t* shared = (arg_t*)shared_memory;
if (dim_x > warpSize) {
int address_base = threadIdx.x + threadIdx.y * blockDim.x;
shared[address_base] = value;
for (int offset = dim_x / 2; offset >= warpSize; offset >>= 1) {
__syncthreads();
if (threadIdx.x < offset && threadIdx.x + offset < blockDim.x) {
arg_t other = shared[address_base + offset];
value = ops_.Combine(value, other);
shared[address_base] = value;
}
}
dim_x = warpSize;
}
__syncthreads();
for (int offset = 1; offset < dim_x; offset <<= 1) {
arg_t other = ops_.WarpShflDown(value, offset);
value = ops_.Combine(value, other);
}
return value;
}
OPEN3D_DEVICE arg_t BlockYReduce(arg_t value, char* shared_memory) const {
arg_t* shared = (arg_t*)shared_memory;
shared[config_.SharedMemoryOffset(0)] = value;
for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) {
__syncthreads();
if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) {
arg_t other = shared[config_.SharedMemoryOffset(offset)];
value = ops_.Combine(value, other);
shared[config_.SharedMemoryOffset(0)] = value;
}
}
return value;
}
OPEN3D_DEVICE bool MarkBlockFinished() const {
__shared__ bool is_last_block_done_shared;
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0) {
int prev_blocks_finished = atomicAdd(&semaphores_[blockIdx.x], 1);
is_last_block_done_shared = (prev_blocks_finished == gridDim.y - 1);
}
__syncthreads();
return is_last_block_done_shared;
}
template <bool can_acc>
OPEN3D_DEVICE arg_t AccumulateInOutput(
out_scalar_t* out,
arg_t value,
typename std::enable_if<can_acc>::type* = nullptr) const {
return ops_.Combine(*out, value);
}
// This function should never be called --
// it's the version of `AccumulateInOutput`
// when accumulation in the output is not possible.
template <bool can_acc>
OPEN3D_DEVICE arg_t AccumulateInOutput(
out_scalar_t*,
arg_t,
typename std::enable_if<!can_acc>::type* = nullptr) const {
assert(false); // can't use AT_ASSERT in Cuda.
return arg_t{};
}
template <bool can_acc>
OPEN3D_DEVICE out_scalar_t GetAccumulatedOutput(
out_scalar_t* out,
arg_t value,
typename std::enable_if<can_acc>::type* = nullptr) const {
assert(!final_output_);
return (out_scalar_t)value;
}
// This function should never be called --
// it's the version of `GetAccumulatedOutput`
// when accumulation in the output is not possible.
template <bool can_acc>
OPEN3D_DEVICE out_scalar_t GetAccumulatedOutput(
out_scalar_t* out,
arg_t value,
typename std::enable_if<!can_acc>::type* = nullptr) const {
assert(false);
return *out;
}
template <class T>
OPEN3D_DEVICE void SetResults(const T x, const index_t base_offset) const {
auto res = (out_scalar_t*)((char*)dst_ + base_offset);
*res = x;
}
OPEN3D_DEVICE void SetResultsToOutput(arg_t value,
index_t base_offset) const {
assert(final_output_);
SetResults(ops_.Project(value), base_offset);
}
OPEN3D_DEVICE arg_t GlobalReduce(arg_t value,
arg_t* acc,
char* shared_memory) const {
arg_t* reduce_buffer = (arg_t*)cta_buf_;
index_t output_idx = config_.OutputIdx();
auto base_offsets = output_calc_.get(output_idx);
auto out = (out_scalar_t*)((char*)dst_ + base_offsets[0]);
bool should_store = config_.ShouldStore(config_.OutputIdx());
if (should_store) {
index_t offset = config_.StagingMemoryOffset(blockIdx.y);
reduce_buffer[offset] = value;
}
__threadfence(); // make sure writes are globally visible
__syncthreads(); // if multiple warps in this block wrote to staging,
// make sure they're all done
bool is_last_block_done = MarkBlockFinished();
if (is_last_block_done) {
value = identity_;
if (config_.ShouldBlockXReduce()) {
index_t input_offset = threadIdx.x + threadIdx.y * blockDim.x;
index_t step = blockDim.x * blockDim.y;
for (; input_offset < config_.ctas_per_output_;
input_offset += step) {
index_t idx = config_.StagingMemoryOffset(input_offset);
arg_t next = reduce_buffer[idx];
value = ops_.Combine(value, next);
}
} else {
index_t input_offset = threadIdx.y;
index_t step = blockDim.y;
for (; input_offset < config_.ctas_per_output_;
input_offset += step) {
index_t idx = config_.StagingMemoryOffset(input_offset);
arg_t next = reduce_buffer[idx];
value = ops_.Combine(value, next);
}
}
value = BlockYReduce(value, shared_memory);
if (config_.ShouldBlockXReduce()) {
value = BlockXReduce(value, shared_memory);
}
if (should_store) {
if (acc == nullptr) {
if (accumulate_) {
value = AccumulateInOutput<can_accumulate_in_output>(
out, value);
}
if (final_output_) {
SetResultsToOutput(value, base_offsets[0]);
} else {
*out = GetAccumulatedOutput<can_accumulate_in_output>(
out, value);
}
} else {
if (accumulate_) {
value = ops_.Combine(*acc, value);
}
if (final_output_) {
SetResultsToOutput(value, base_offsets[0]);
} else {
*acc = value;
}
}
}
}
return value;
}
private:
static constexpr bool can_accumulate_in_output =
std::is_convertible<arg_t, out_scalar_t>::value &&
std::is_convertible<out_scalar_t, arg_t>::value;
static constexpr float acc_buffer_multiplier =
(float)sizeof(arg_t) / sizeof(out_scalar_t);
ops_t ops_;
ReduceConfig config_;
InputCalculator input_calc_;
OutputCalculator output_calc_;
const void* src_;
const char* dst_;
// acc_buf_ used for accumulation among sub Tensor Iterator when
// accumulation on output is not permissible
void* acc_buf_;
// cta_buf_ used for accumulation between blocks during global reduction
void* cta_buf_;
int* semaphores_;
arg_t identity_;
bool accumulate_;
bool final_output_;
};
class AccumulationBuffer {
public:
AccumulationBuffer() {}
AccumulationBuffer(int64_t acc_t_size,
int64_t out_t_size,
char* out_ptr,
int64_t size) {
out_ptr_ = (char*)out_ptr;
if (out_t_size >= acc_t_size) {
// reusing output buffer for accumulation.
acc_ptr_ = (char*)out_ptr;
numerator_ = 1;
denominator_ = 1;
} else {
int device_id = CUDAState::GetInstance()->GetCurentDeviceID();
Device device(Device::DeviceType::CUDA, device_id);
buffer_ = (char*)MemoryManager::Malloc(size, device);
acc_ptr_ = (char*)buffer_;
numerator_ = acc_t_size;
denominator_ = out_t_size;
ReduceFraction(numerator_, denominator_);
}
}
char* GetAccSlice(char* out_ptr) {
if (numerator_ == -1 || acc_ptr_ == nullptr) {
return nullptr;
}
return acc_ptr_ + ((out_ptr - out_ptr_) * numerator_ / denominator_);
}
private:
char* acc_ptr_ = nullptr;
char* out_ptr_ = nullptr;
float size_factor_ = -1;
int64_t numerator_ = -1;
int64_t denominator_ = -1;
char* buffer_ = nullptr;
};
class CUDAReductionEngine {
public:
CUDAReductionEngine(const CUDAReductionEngine&) = delete;
CUDAReductionEngine& operator=(const CUDAReductionEngine&) = delete;
CUDAReductionEngine(const Indexer& indexer) : indexer_(indexer) {}
template <typename func_t, typename scalar_t>
void Run(const func_t& reduce_func, scalar_t identity) {
if (indexer_.NumWorkloads() == 0) {
utility::LogError(
"0-sized input should be handled outside of the reudction "
"engine.");
}
if (indexer_.NumInputs() != 1) {
utility::LogError("Reduction op must have exactly one input.");
}
OPEN3D_ASSERT_HOST_DEVICE_LAMBDA(func_t);
using arg0_t = typename BinaryFunctionTraits<func_t>::arg0_t;
using arg1_t = typename BinaryFunctionTraits<func_t>::arg1_t;
if (!std::is_same<scalar_t, arg0_t>::value ||
!std::is_same<scalar_t, arg1_t>::value) {
utility::LogError(
"Function input type must match with the identity's type.");
}
using res_t = typename BinaryFunctionTraits<func_t>::res_t;
if (std::is_same<res_t, bool>::value) {
// func_t is a comparison function (for arg-reduction).
// Signature: (scalar_t, scalar_t) -> bool.
RunReduce<scalar_t, int64_t>(
indexer_, WrapArgReduceOps(reduce_func),
thrust::pair<scalar_t, int64_t>(identity, 0));
} else {
// func_t is a regular reduction function.
// Signature: (scalar_t, scalar_t) -> scalar_t.
RunReduce<scalar_t, scalar_t>(
indexer_, WrapRegularReduceOps<scalar_t>(reduce_func),
identity);
}
}
private:
/// If the index cannot be represented in 32 bits, RunReduce calls itself
/// recursively.
template <typename scalar_t,
typename out_scalar_t,
int vt0 = 4,
typename ops_t,
typename ident_t>
static void RunReduce(Indexer& indexer,
const ops_t& ops,
ident_t identity,
AccumulationBuffer* acc_buf_ptr = nullptr) {
using traits = FunctionTraits<decltype(&ops_t::Reduce)>;
using arg_t = typename traits::template arg<0>::type;
static constexpr bool can_accumulate_in_output =
std::is_convertible<arg_t, out_scalar_t>::value;
bool can_use_32bit_indexing = indexer.CanUse32BitIndexing();
std::unique_ptr<AccumulationBuffer> owned_buf_ptr;
// The acc_buf_ptr is a shared pointer. It is create at the first
// entrance reused by all recursive function calls.
if (acc_buf_ptr == nullptr) {
// acc_buf_ptr holds buffer used for accumulation among multiple
// sub_iter when accumulation in output is not possible.
if (!can_accumulate_in_output && !can_use_32bit_indexing) {
int64_t output_memory_size = 1;
for (int dim = 0; dim < indexer.NumDims(); dim++) {
output_memory_size = std::max(
output_memory_size,
indexer.GetMasterShape()[dim] *
indexer.GetOutput().byte_strides_[dim]);
}
owned_buf_ptr.reset(new AccumulationBuffer(
sizeof(arg_t), sizeof(out_scalar_t),
(char*)indexer.GetOutput().data_ptr_,
output_memory_size * sizeof(arg_t)));
} else {
owned_buf_ptr.reset(new AccumulationBuffer());
}
acc_buf_ptr = owned_buf_ptr.get();
}
if (!can_use_32bit_indexing) {
for (auto& sub_indexer : indexer.SplitTo32BitIndexing()) {
RunReduce<scalar_t, out_scalar_t, vt0>(sub_indexer, ops,
identity, acc_buf_ptr);
}
return;
}
ReduceConfig config(sizeof(arg_t), indexer);
void* buffer = nullptr;
void* semaphores = nullptr;
if (config.ShouldGlobalReduce()) {
int device_id = CUDAState::GetInstance()->GetCurentDeviceID();
Device device(Device::DeviceType::CUDA, device_id);
buffer = MemoryManager::Malloc(config.GlobalMemorySize(), device);
semaphores = MemoryManager::Malloc(config.SemaphoreSize(), device);
OPEN3D_CUDA_CHECK(
cudaMemset(semaphores, 0, config.SemaphoreSize()));
}
assert(can_use_32bit_indexing);
const char* in_data = (char*)indexer.GetInput(0).data_ptr_;
char* out_data = (char*)indexer.GetOutput().data_ptr_;
char* acc_data = acc_buf_ptr->GetAccSlice(out_data);
auto output_calc = MakeOutputCalculator<uint32_t>(indexer);
auto input_calc = MakeInputCalculator<uint32_t>(indexer);
auto reduce_op = ReduceOp<scalar_t, ops_t, uint32_t, out_scalar_t, vt0>(
ops, config, input_calc, output_calc, in_data, out_data,
acc_data, buffer, (int*)semaphores, identity,
indexer.ShouldAccumulate(), indexer.IsFinalOutput());
// Launch reduce kernel
int shared_memory = config.SharedMemorySize();
ReduceKernel<ReduceConfig::MAX_NUM_THREADS>
<<<config.GridDim(), config.BlockDim(), shared_memory>>>(
reduce_op);
OPEN3D_CUDA_CHECK(cudaDeviceSynchronize());
OPEN3D_CUDA_CHECK(cudaGetLastError());
}
private:
Indexer indexer_;
};
void ReductionCUDA(const Tensor& src,
Tensor& dst,
const SizeVector& dims,
bool keepdim,
ReductionOpCode op_code) {
if (s_regular_reduce_ops.find(op_code) != s_regular_reduce_ops.end()) {
Indexer indexer({src}, dst, DtypePolicy::ALL_SAME, dims);
CUDAReductionEngine re(indexer);
Dtype dtype = src.GetDtype();
CUDADeviceSwitcher switcher(src.GetDevice());
DISPATCH_DTYPE_TO_TEMPLATE(dtype, [&]() {
switch (op_code) {
case ReductionOpCode::Sum:
if (indexer.NumWorkloads() == 0) {
// 0-sized input can be reduced to non-0-sized outputs,
// where identity elements should be filled.
// E.g. np.sum(np.ones((0, 5)), axis=0).shape == (5,).
dst.Fill(0);
} else {
re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b)
-> scalar_t { return a + b; },
static_cast<scalar_t>(0));
}
break;
case ReductionOpCode::Prod:
if (indexer.NumWorkloads() == 0) {
dst.Fill(1);
} else {
re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b)
-> scalar_t { return a * b; },
static_cast<scalar_t>(1));
}
break;
case ReductionOpCode::Min:
if (indexer.NumWorkloads() == 0) {
utility::LogError(
"Zero-size Tensor does not suport Min.");
} else {
re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b)
-> scalar_t { return a < b ? a : b; },
static_cast<scalar_t>(
std::numeric_limits<scalar_t>::max()));
}
break;
case ReductionOpCode::Max:
if (indexer.NumWorkloads() == 0) {
utility::LogError(
"Zero-size Tensor does not suport Max.");
} else {
re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b)
-> scalar_t { return a > b ? a : b; },
static_cast<scalar_t>(std::numeric_limits<
scalar_t>::lowest()));
}
break;
default:
utility::LogError("Unsupported op code.");
break;
}
});
} else if (s_arg_reduce_ops.find(op_code) != s_arg_reduce_ops.end()) {
if (dst.GetDtype() != Dtype::Int64) {
utility::LogError("Arg-reduction must have int64 output dtype.");
}
Indexer indexer({src}, dst, DtypePolicy::INPUT_SAME, dims);
CUDAReductionEngine re(indexer);
Dtype dtype = src.GetDtype();
CUDADeviceSwitcher switcher(src.GetDevice());
DISPATCH_DTYPE_TO_TEMPLATE(dtype, [&]() {
switch (op_code) {
case ReductionOpCode::ArgMin:
if (indexer.NumWorkloads() == 0) {
utility::LogError(
"Zero-size Tensor does not suport ArgMin.");
} else {
re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b)
-> bool { return a < b; },
static_cast<scalar_t>(
std::numeric_limits<scalar_t>::max()));
}
break;
case ReductionOpCode::ArgMax:
if (indexer.NumWorkloads() == 0) {
utility::LogError(
"Zero-size Tensor does not suport ArgMax.");
} else {
re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b)
-> bool { return a > b; },
static_cast<scalar_t>(std::numeric_limits<
scalar_t>::lowest()));
}
break;
default:
utility::LogError("Unsupported op code.");
break;
}
});
} else if (s_boolean_reduce_ops.find(op_code) !=
s_boolean_reduce_ops.end()) {
if (src.GetDtype() != Dtype::Bool) {
utility::LogError(
"Boolean reduction only supports boolean input tensor.");
}
if (dst.GetDtype() != Dtype::Bool) {
utility::LogError(
"Boolean reduction only supports boolean output tensor.");
}
Indexer indexer({src}, dst, DtypePolicy::ALL_SAME, dims);
CUDAReductionEngine re(indexer);
CUDADeviceSwitcher switcher(src.GetDevice());
switch (op_code) {
case ReductionOpCode::All:
if (indexer.NumWorkloads() == 0) {
dst.Fill(true);
} else {
re.Run([] OPEN3D_HOST_DEVICE(uint8_t a, uint8_t b)
-> uint8_t { return a && b; },
static_cast<uint8_t>(true));
}
break;
case ReductionOpCode::Any:
if (indexer.NumWorkloads() == 0) {
dst.Fill(false);
} else {
re.Run([] OPEN3D_HOST_DEVICE(uint8_t a, uint8_t b)
-> uint8_t { return a || b; },
static_cast<uint8_t>(false));
}
break;
default:
utility::LogError("Unsupported op code.");
break;
}
} else {
utility::LogError("Unsupported op code.");
}
}
} // namespace kernel
} // namespace core
} // namespace open3d
|
3540e5eb2ae94d5ec197c11c2b1d6c0aa67b08c7.hip | // !!! This is a file automatically generated by hipify!!!
/**
* syrk.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.005
#define GPU_DEVICE 0
/* Problem size */
#define N 512 //was 1024 //Du
#define M 512 //was 1024//Du
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Declared constant values for alpha and beta (same as values in PolyBench 2.0) */
#define alpha 12435
#define beta 4546
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE* A, DATA_TYPE* C)
{
int i, j;
for (i = 0; i < N; i++)
{
for (j = 0; j < M; j++)
{
A[i*M + j] = ((DATA_TYPE) i*j) / N;
}
for (j = 0; j < N; j++)
{
C[i*M + j] = ((DATA_TYPE) i*j + 2) / N;
}
}
}
void syrk(DATA_TYPE* A, DATA_TYPE* C)
{
int i, j, k;
/* C := alpha*A*A' + beta*C */
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
C[i*M + j] *= beta;
}
}
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
for (k = 0; k < M; k++)
{
C[i*N + j] += alpha * A[i*M + k] * A[j*M + k];
}
}
}
}
void compareResults(DATA_TYPE* C, DATA_TYPE* C_outputFromGpu)
{
int i,j,fail;
fail = 0;
// Compare C with D
for (i=0; i<N; i++)
{
for (j=0; j<M; j++)
{
if (percentDiff(C[i*M + j], C_outputFromGpu[i*M + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( GPU_DEVICE );
return;
}
__global__ void syrk_kernel(DATA_TYPE ALPHA, DATA_TYPE BETA, DATA_TYPE *a, DATA_TYPE *c)
{
/* C := alpha*A*A' + beta*C */
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < N) && (j < N))
{
c[i * N + j] *= beta;
int k;
for(k=0; k< M; k++)
{
c[i * N + j] += alpha * a[i * M + k] * a[j * M + k];
}
}
}
void syrkCuda(DATA_TYPE* A, DATA_TYPE* C, DATA_TYPE* C_outputFromGpu)
{
double t_start, t_end;
DATA_TYPE* A_gpu;
DATA_TYPE* C_gpu;
hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * N * M);
hipMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * N * N);
hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * N * M, hipMemcpyHostToDevice);
hipMemcpy(C_gpu, C, sizeof(DATA_TYPE) * N * N, hipMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)(ceil(((float)N) / ((float)DIM_THREAD_BLOCK_X))), (size_t)ceil(((float)N) / ((float)DIM_THREAD_BLOCK_Y)));
printf("%d, %d, %d, %d\n", block.x, block.y, grid.x, grid.y);//Du
hipDeviceSetCacheConfig(hipFuncCachePreferL1);//Du//48KB L1 cache
//hipDeviceSetCacheConfig(hipFuncCachePreferShared);//Du//16KB L1 cache
hipDeviceSynchronize();
t_start = rtclock();
hipLaunchKernelGGL(( syrk_kernel), dim3(grid),dim3(block), 0, 0, alpha, beta, A_gpu,C_gpu);
hipDeviceSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
fprintf(stderr, "%0.6lf\n", t_end - t_start);
hipMemcpy(C_outputFromGpu, C_gpu, sizeof(DATA_TYPE) * N * N, hipMemcpyDeviceToHost);
hipFree(A_gpu);
hipFree(C_gpu);
}
int main()
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* C;
DATA_TYPE* C_outputFromGpu;
A = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
C = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
C_outputFromGpu = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
init_arrays(A, C);
GPU_argv_init();
syrkCuda(A, C, C_outputFromGpu);
t_start = rtclock();
syrk(A, C);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(C, C_outputFromGpu);
free(A);
free(C);
free(C_outputFromGpu);
return 0;
}
| 3540e5eb2ae94d5ec197c11c2b1d6c0aa67b08c7.cu | /**
* syrk.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda.h>
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.005
#define GPU_DEVICE 0
/* Problem size */
#define N 512 //was 1024 //Du
#define M 512 //was 1024//Du
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Declared constant values for alpha and beta (same as values in PolyBench 2.0) */
#define alpha 12435
#define beta 4546
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE* A, DATA_TYPE* C)
{
int i, j;
for (i = 0; i < N; i++)
{
for (j = 0; j < M; j++)
{
A[i*M + j] = ((DATA_TYPE) i*j) / N;
}
for (j = 0; j < N; j++)
{
C[i*M + j] = ((DATA_TYPE) i*j + 2) / N;
}
}
}
void syrk(DATA_TYPE* A, DATA_TYPE* C)
{
int i, j, k;
/* C := alpha*A*A' + beta*C */
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
C[i*M + j] *= beta;
}
}
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
for (k = 0; k < M; k++)
{
C[i*N + j] += alpha * A[i*M + k] * A[j*M + k];
}
}
}
}
void compareResults(DATA_TYPE* C, DATA_TYPE* C_outputFromGpu)
{
int i,j,fail;
fail = 0;
// Compare C with D
for (i=0; i<N; i++)
{
for (j=0; j<M; j++)
{
if (percentDiff(C[i*M + j], C_outputFromGpu[i*M + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
return;
}
__global__ void syrk_kernel(DATA_TYPE ALPHA, DATA_TYPE BETA, DATA_TYPE *a, DATA_TYPE *c)
{
/* C := alpha*A*A' + beta*C */
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < N) && (j < N))
{
c[i * N + j] *= beta;
int k;
for(k=0; k< M; k++)
{
c[i * N + j] += alpha * a[i * M + k] * a[j * M + k];
}
}
}
void syrkCuda(DATA_TYPE* A, DATA_TYPE* C, DATA_TYPE* C_outputFromGpu)
{
double t_start, t_end;
DATA_TYPE* A_gpu;
DATA_TYPE* C_gpu;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * N * M);
cudaMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * N * N);
cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * N * M, cudaMemcpyHostToDevice);
cudaMemcpy(C_gpu, C, sizeof(DATA_TYPE) * N * N, cudaMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)(ceil(((float)N) / ((float)DIM_THREAD_BLOCK_X))), (size_t)ceil(((float)N) / ((float)DIM_THREAD_BLOCK_Y)));
printf("%d, %d, %d, %d\n", block.x, block.y, grid.x, grid.y);//Du
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);//Du//48KB L1 cache
//cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);//Du//16KB L1 cache
cudaDeviceSynchronize();
t_start = rtclock();
syrk_kernel<<<grid,block>>>(alpha, beta, A_gpu,C_gpu);
cudaDeviceSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
fprintf(stderr, "%0.6lf\n", t_end - t_start);
cudaMemcpy(C_outputFromGpu, C_gpu, sizeof(DATA_TYPE) * N * N, cudaMemcpyDeviceToHost);
cudaFree(A_gpu);
cudaFree(C_gpu);
}
int main()
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* C;
DATA_TYPE* C_outputFromGpu;
A = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
C = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
C_outputFromGpu = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
init_arrays(A, C);
GPU_argv_init();
syrkCuda(A, C, C_outputFromGpu);
t_start = rtclock();
syrk(A, C);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(C, C_outputFromGpu);
free(A);
free(C);
free(C_outputFromGpu);
return 0;
}
|
0a4471794c78d252a9d30bad57a4026ec86fd355.hip | // !!! This is a file automatically generated by hipify!!!
#include "ATen/Context.h"
#include "ATen/hip/HIPContext.h"
#include "ATen/Dispatch.h"
#include "ATen/NativeFunctions.h"
#include "ATen/hip/PinnedMemoryAllocator.h"
#include "ATen/hip/HIPApplyUtils.cuh"
#include "ATen/native/LinearAlgebraUtils.h"
#include "ATen/native/hip/MiscUtils.h"
#include "THH.h" // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma.h>
#include <magma_types.h>
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaGesvBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
AT_ERROR("gesv only takes float or double Tensors");
}
template<class scalar_t>
void magmaGetrfBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
AT_ERROR("getrf only takes float or double Tensors");
}
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("getri only takes float or double Tensors");
}
template<class scalar_t>
void magmaPotrsBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("potrs only takes float or double Tensors");
}
template<>
void magmaGesvBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
}
template<>
void magmaGesvBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
}
template<>
void magmaGetrfBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaGetrfBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaPotrsBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
}
template<>
void magmaPotrsBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
}
#endif
#define ALLOCATE_ARRAY(name, type, size, dummy_tensor) \
auto storage_##name = pin_memory<type>(size, dummy_tensor); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ gesv ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_gesv(Tensor& b, Tensor& A, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("gesv: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data<scalar_t>();
auto b_data = b.data<scalar_t>();
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, b);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n, b);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size, b);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size, b);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size, b);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
magmaGesvBatched<scalar_t>(
n, nrhs, A_array, n, ipiv_array, b_array, n,
info_array, batch_size, magma_queue);
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
#endif
}
std::tuple<Tensor, Tensor> _gesv_helper_cuda(const Tensor& self, const Tensor& A) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.type(), "gesv", [&]{
apply_gesv<scalar_t>(self_working_copy, A_working_copy, infos);
});
batchCheckErrors(infos, "gesv");
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_inverse(Tensor &self, Tensor &self_inv, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, self);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n, self);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size, self);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size, self);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size, self_inv);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(self.get_device());
magmaGetrfBatched<scalar_t>(
n, n, self_array, n, ipiv_array, info_array,
batch_size, magma_queue);
magmaGetriBatched<scalar_t>(
n, self_array, n, ipiv_array, self_inv_array,
n, info_array, batch_size, magma_queue);
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
#endif
}
// Because this is out-of-place inverse, the predefined macros will
// not work
Tensor _inverse_helper_cuda(const Tensor& self) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_working_copy = cloneBatchedColumnMajor(self);
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.type(), "inverse", [&]{
apply_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos);
});
batchCheckErrors(infos, "inverse");
return self_inv_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ potrs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_potrs(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("potrs: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data<scalar_t>();
auto b_data = b.data<scalar_t>();
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t info_tmp;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n, b);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size, b);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size, b);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size, b);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
magmaPotrsBatched<scalar_t>(
uplo, n, nrhs, A_array, n, b_array, n,
info_tmp, batch_size, magma_queue);
info = info_tmp;
#endif
}
Tensor _potrs_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.type(), "potrs", [&]{
apply_potrs<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
AT_CHECK(info == 0, "MAGMA potrs : invalid argument: ", -info);
return self_working_copy;
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
| 0a4471794c78d252a9d30bad57a4026ec86fd355.cu | #include "ATen/Context.h"
#include "ATen/cuda/CUDAContext.h"
#include "ATen/Dispatch.h"
#include "ATen/NativeFunctions.h"
#include "ATen/cuda/PinnedMemoryAllocator.h"
#include "ATen/cuda/CUDAApplyUtils.cuh"
#include "ATen/native/LinearAlgebraUtils.h"
#include "ATen/native/cuda/MiscUtils.h"
#include "THC.h" // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma.h>
#include <magma_types.h>
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaGesvBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
AT_ERROR("gesv only takes float or double Tensors");
}
template<class scalar_t>
void magmaGetrfBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
AT_ERROR("getrf only takes float or double Tensors");
}
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("getri only takes float or double Tensors");
}
template<class scalar_t>
void magmaPotrsBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("potrs only takes float or double Tensors");
}
template<>
void magmaGesvBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
}
template<>
void magmaGesvBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
}
template<>
void magmaGetrfBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaGetrfBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaPotrsBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
}
template<>
void magmaPotrsBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
}
#endif
#define ALLOCATE_ARRAY(name, type, size, dummy_tensor) \
auto storage_##name = pin_memory<type>(size, dummy_tensor); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ gesv ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_gesv(Tensor& b, Tensor& A, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("gesv: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data<scalar_t>();
auto b_data = b.data<scalar_t>();
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, b);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n, b);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size, b);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size, b);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size, b);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
magmaGesvBatched<scalar_t>(
n, nrhs, A_array, n, ipiv_array, b_array, n,
info_array, batch_size, magma_queue);
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
#endif
}
std::tuple<Tensor, Tensor> _gesv_helper_cuda(const Tensor& self, const Tensor& A) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.type(), "gesv", [&]{
apply_gesv<scalar_t>(self_working_copy, A_working_copy, infos);
});
batchCheckErrors(infos, "gesv");
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_inverse(Tensor &self, Tensor &self_inv, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, self);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n, self);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size, self);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size, self);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size, self_inv);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(self.get_device());
magmaGetrfBatched<scalar_t>(
n, n, self_array, n, ipiv_array, info_array,
batch_size, magma_queue);
magmaGetriBatched<scalar_t>(
n, self_array, n, ipiv_array, self_inv_array,
n, info_array, batch_size, magma_queue);
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
#endif
}
// Because this is out-of-place inverse, the predefined macros will
// not work
Tensor _inverse_helper_cuda(const Tensor& self) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_working_copy = cloneBatchedColumnMajor(self);
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.type(), "inverse", [&]{
apply_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos);
});
batchCheckErrors(infos, "inverse");
return self_inv_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ potrs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_potrs(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("potrs: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data<scalar_t>();
auto b_data = b.data<scalar_t>();
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t info_tmp;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n, b);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size, b);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size, b);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size, b);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
magmaPotrsBatched<scalar_t>(
uplo, n, nrhs, A_array, n, b_array, n,
info_tmp, batch_size, magma_queue);
info = info_tmp;
#endif
}
Tensor _potrs_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.type(), "potrs", [&]{
apply_potrs<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
AT_CHECK(info == 0, "MAGMA potrs : invalid argument: ", -info);
return self_working_copy;
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
|
57032e303e8120add405a97a2aa91c805fad0082.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2014 BVLC and contributors.
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/layers/resample_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include <opencv2/opencv.hpp>
//#include <opencv2/gpu/gpu.hpp>
namespace caffe {
static __device__ __forceinline__ float bicubicCoeff(float x_)
{
float x = fabsf(x_);
if (x <= 1.0f) return x * x * (1.5f * x - 2.5f) + 1.0f;
else if (x < 2.0f) return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f;
else return 0.0f;
}
static __device__ __forceinline__ float boxCoeff(float x)
{
if (-0.5 <= x && x<0.5) return 1.0;
return 0;
}
static __device__ __forceinline__ float triangleCoeff(float x)
{
if (-1<=x && x<0) return x+1;
if (0<=x && x<=1) return 1-x;
return 0;
}
#define FILTER_BICUBIC 0
#define FILTER_BOX 1
#define FILTER_TRIANGLE 2
template <typename Dtype>
__global__ void InterpolationKernel(
const int nthreads,
const int in_channelsize,
const int out_channelsize,
const Dtype* in_ptr,
const int in_width,
const int in_height,
const float fx,
const float fy,
Dtype* out_ptr,
const int out_width,
const int out_height,
int filter_type,
int kernel_width,
const bool antialias)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int c = index / out_channelsize;
int x_out = (index % out_channelsize) % out_width;
int y_out = (index % out_channelsize) / out_width;
float x_in = x_out * fx + fy / 2.0f - 0.5f;
float y_in = y_out * fy + fx / 2.0f - 0.5f;
int x_in_round = round(x_in);
int y_in_round = round(y_in);
Dtype sum=0;
Dtype wsum=0;
float ax = 1.0f / (antialias ? fx : 1.0f);
float ay = 1.0f / (antialias ? fy : 1.0f);
int rx = (fx < 1.0f) ? 2 : ceil(float(kernel_width)/ax);
int ry = (fy < 1.0f) ? 2 : ceil(float(kernel_width)/ay);
for(int y=y_in_round-ry; y<=y_in_round+ry; y++)
for(int x=x_in_round-rx; x<=x_in_round+rx; x++)
{
if(y<0 || x<0) continue;
if(y>=in_height || x>=in_width) continue;
float dx = x_in - x;
float dy = y_in - y;
float w;
if(filter_type == FILTER_BICUBIC) w = ax*bicubicCoeff(ax*dx) * ay*bicubicCoeff(ay*dy);
else if(filter_type == FILTER_BOX) w = ax*boxCoeff(ax*dx) * ay*boxCoeff(ay*dy);
else w = ax*triangleCoeff(ax*dx) * ay*triangleCoeff(ay*dy);
sum += w * in_ptr[c*in_channelsize + y*in_width+x];
wsum += w;
}
out_ptr[index] = (!wsum) ? 0 : (sum / wsum);
}
}
template <typename Dtype>
__global__ void NearestNeighborKernel(
const int nthreads,
const int in_channelsize,
const int out_channelsize,
const Dtype* in_ptr,
const int in_width,
const int in_height,
const float fx,
const float fy,
Dtype* out_ptr,
const int out_width,
const int out_height)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int c = index / out_channelsize;
int x_out = (index % out_channelsize) % out_width;
int y_out = (index % out_channelsize) / out_width;
float x_in = x_out * fx + fy / 2.0f - 0.5f;
float y_in = y_out * fy + fx / 2.0f - 0.5f;
int x_in_round = round(x_in);
int y_in_round = round(y_in);
out_ptr[index] = in_ptr[c*in_channelsize + y_in_round*in_width+x_in_round];
}
}
template <typename Dtype>
void ResampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* top_data = top[0]->mutable_gpu_data(); // dest
int topwidth = top[0]->width();
int topheight = top[0]->height();
int topchannels = top[0]->channels();
int topcount = top[0]->count();
Dtype* bottom_data = bottom[0]->mutable_gpu_data(); // source
int bottomnum = (bottom)[0]->num();
int bottomchannels = (bottom)[0]->channels();
int bottomwidth = (bottom)[0]->width();
int bottomheight = (bottom)[0]->height();
int bottomcount = (bottom)[0]->count();
CHECK_EQ(topchannels, bottomchannels) << "ResampleLayer top channel count must match bottom channel count";
float fx = float(bottomwidth)/float(topwidth);
float fy = float(bottomheight)/float(topheight);
//int botsize = bottomwidth*bottomheight*bottomchannels*bottomnum;
int topsize = topwidth*topheight*topchannels*bottomnum;
int topchannelsize = topwidth*topheight;
int botchannelsize = bottomwidth*bottomheight;
if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_NEAREST)
{
hipLaunchKernelGGL(( NearestNeighborKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(topsize)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
topsize,
botchannelsize,
topchannelsize,
(Dtype*)bottom_data,
bottomwidth,
bottomheight,
fx,
fy,
(Dtype*)top_data,
topwidth,
topheight
);
CUDA_POST_KERNEL_CHECK;
}
else if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_CUBIC || this->layer_param().resample_param().type() == ResampleParameter_ResampleType_LINEAR)
{
int filter_type;
if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_CUBIC)
filter_type = FILTER_BICUBIC;
else if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_LINEAR)
filter_type = FILTER_TRIANGLE;
bool isDownsample = (fx > 1) || (fy > 1);
bool antialias = isDownsample && this->layer_param_.resample_param().antialias();
int kernel_width;
if(filter_type == FILTER_BICUBIC) kernel_width = 4;
else if(filter_type == FILTER_BOX) kernel_width = 1;
else kernel_width = 2;
hipLaunchKernelGGL(( InterpolationKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(topsize)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
topsize,
botchannelsize,
topchannelsize,
(Dtype*)bottom_data,
bottomwidth,
bottomheight,
fx,
fy,
(Dtype*)top_data,
topwidth,
topheight,
filter_type,
kernel_width,
antialias);
CUDA_POST_KERNEL_CHECK;
}
else
LOG(FATAL) << "unsupported downsampling type";
}
template <typename Dtype>
void ResampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
LOG(FATAL) << "ResampleLayer cannot do backward.";
}
INSTANTIATE_LAYER_GPU_FUNCS(ResampleLayer);
} // namespace caffe
// cv::gpu::GpuMat input(bottomheight, bottomwidth, CV_32FC3);
// float* input_ptr=(float*)input.data;
// int input_stride=input.step/4;
// BlobToOpenCV<Dtype><<<CAFFE_GET_BLOCKS(bottomwidth*bottomheight), CAFFE_CUDA_NUM_THREADS>>>(
// bottomwidth*bottomheight,
// (Dtype*)bottom_data,
// bottomwidth,
// bottomheight,
// input_stride,
// (Dtype*)input_ptr);
// cv::gpu::GpuMat output;
// cv::Size output_size;
// output_size.width = topwidth;
// output_size.height = topheight;
// cv::gpu::resize(input,output,output_size,0,0,interpolation,cv::gpu::Stream::Null(),false);
// float* output_ptr=(float*)output.data;
// int output_stride=output.step/4;
// OpenCVToBlob<Dtype><<<CAFFE_GET_BLOCKS(topwidth*topheight), CAFFE_CUDA_NUM_THREADS>>>(
// topwidth*topheight,
// (Dtype*)output_ptr,
// topwidth,
// topheight,
// output_stride,
// (Dtype*)top_data);
// top_data += topsize;
// bottom_data += botsize;
//template <typename Dtype>
//__global__ void BlobToOpenCV(
// const int nthreads,
// const Dtype* blob_ptr,
// const int width,
// const int height,
// const int stride,
// Dtype* mat_ptr)
//{
// CUDA_KERNEL_LOOP(index, nthreads)
// {
// int x=index % width;
// int y=index / width;
// for(int c=0; c<3; c++)
// mat_ptr[y*stride+x*3+c]=blob_ptr[((c*height)+y)*width+x];
// }
//}
//template <typename Dtype>
//__global__ void OpenCVToBlob(
// const int nthreads,
// const Dtype* mat_ptr,
// const int width,
// const int height,
// const int stride,
// Dtype* blob_ptr)
//{
// CUDA_KERNEL_LOOP(index, nthreads)
// {
// int x=index % width;
// int y=index / width;
// for(int c=0; c<3; c++)
// blob_ptr[((c*height)+y)*width+x]=mat_ptr[y*stride+x*3+c];
// }
//}
| 57032e303e8120add405a97a2aa91c805fad0082.cu | // Copyright 2014 BVLC and contributors.
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/layers/resample_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include <opencv2/opencv.hpp>
//#include <opencv2/gpu/gpu.hpp>
namespace caffe {
static __device__ __forceinline__ float bicubicCoeff(float x_)
{
float x = fabsf(x_);
if (x <= 1.0f) return x * x * (1.5f * x - 2.5f) + 1.0f;
else if (x < 2.0f) return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f;
else return 0.0f;
}
static __device__ __forceinline__ float boxCoeff(float x)
{
if (-0.5 <= x && x<0.5) return 1.0;
return 0;
}
static __device__ __forceinline__ float triangleCoeff(float x)
{
if (-1<=x && x<0) return x+1;
if (0<=x && x<=1) return 1-x;
return 0;
}
#define FILTER_BICUBIC 0
#define FILTER_BOX 1
#define FILTER_TRIANGLE 2
template <typename Dtype>
__global__ void InterpolationKernel(
const int nthreads,
const int in_channelsize,
const int out_channelsize,
const Dtype* in_ptr,
const int in_width,
const int in_height,
const float fx,
const float fy,
Dtype* out_ptr,
const int out_width,
const int out_height,
int filter_type,
int kernel_width,
const bool antialias)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int c = index / out_channelsize;
int x_out = (index % out_channelsize) % out_width;
int y_out = (index % out_channelsize) / out_width;
float x_in = x_out * fx + fy / 2.0f - 0.5f;
float y_in = y_out * fy + fx / 2.0f - 0.5f;
int x_in_round = round(x_in);
int y_in_round = round(y_in);
Dtype sum=0;
Dtype wsum=0;
float ax = 1.0f / (antialias ? fx : 1.0f);
float ay = 1.0f / (antialias ? fy : 1.0f);
int rx = (fx < 1.0f) ? 2 : ceil(float(kernel_width)/ax);
int ry = (fy < 1.0f) ? 2 : ceil(float(kernel_width)/ay);
for(int y=y_in_round-ry; y<=y_in_round+ry; y++)
for(int x=x_in_round-rx; x<=x_in_round+rx; x++)
{
if(y<0 || x<0) continue;
if(y>=in_height || x>=in_width) continue;
float dx = x_in - x;
float dy = y_in - y;
float w;
if(filter_type == FILTER_BICUBIC) w = ax*bicubicCoeff(ax*dx) * ay*bicubicCoeff(ay*dy);
else if(filter_type == FILTER_BOX) w = ax*boxCoeff(ax*dx) * ay*boxCoeff(ay*dy);
else w = ax*triangleCoeff(ax*dx) * ay*triangleCoeff(ay*dy);
sum += w * in_ptr[c*in_channelsize + y*in_width+x];
wsum += w;
}
out_ptr[index] = (!wsum) ? 0 : (sum / wsum);
}
}
template <typename Dtype>
__global__ void NearestNeighborKernel(
const int nthreads,
const int in_channelsize,
const int out_channelsize,
const Dtype* in_ptr,
const int in_width,
const int in_height,
const float fx,
const float fy,
Dtype* out_ptr,
const int out_width,
const int out_height)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int c = index / out_channelsize;
int x_out = (index % out_channelsize) % out_width;
int y_out = (index % out_channelsize) / out_width;
float x_in = x_out * fx + fy / 2.0f - 0.5f;
float y_in = y_out * fy + fx / 2.0f - 0.5f;
int x_in_round = round(x_in);
int y_in_round = round(y_in);
out_ptr[index] = in_ptr[c*in_channelsize + y_in_round*in_width+x_in_round];
}
}
template <typename Dtype>
void ResampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* top_data = top[0]->mutable_gpu_data(); // dest
int topwidth = top[0]->width();
int topheight = top[0]->height();
int topchannels = top[0]->channels();
int topcount = top[0]->count();
Dtype* bottom_data = bottom[0]->mutable_gpu_data(); // source
int bottomnum = (bottom)[0]->num();
int bottomchannels = (bottom)[0]->channels();
int bottomwidth = (bottom)[0]->width();
int bottomheight = (bottom)[0]->height();
int bottomcount = (bottom)[0]->count();
CHECK_EQ(topchannels, bottomchannels) << "ResampleLayer top channel count must match bottom channel count";
float fx = float(bottomwidth)/float(topwidth);
float fy = float(bottomheight)/float(topheight);
//int botsize = bottomwidth*bottomheight*bottomchannels*bottomnum;
int topsize = topwidth*topheight*topchannels*bottomnum;
int topchannelsize = topwidth*topheight;
int botchannelsize = bottomwidth*bottomheight;
if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_NEAREST)
{
NearestNeighborKernel<Dtype><<<CAFFE_GET_BLOCKS(topsize), CAFFE_CUDA_NUM_THREADS>>>(
topsize,
botchannelsize,
topchannelsize,
(Dtype*)bottom_data,
bottomwidth,
bottomheight,
fx,
fy,
(Dtype*)top_data,
topwidth,
topheight
);
CUDA_POST_KERNEL_CHECK;
}
else if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_CUBIC || this->layer_param().resample_param().type() == ResampleParameter_ResampleType_LINEAR)
{
int filter_type;
if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_CUBIC)
filter_type = FILTER_BICUBIC;
else if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_LINEAR)
filter_type = FILTER_TRIANGLE;
bool isDownsample = (fx > 1) || (fy > 1);
bool antialias = isDownsample && this->layer_param_.resample_param().antialias();
int kernel_width;
if(filter_type == FILTER_BICUBIC) kernel_width = 4;
else if(filter_type == FILTER_BOX) kernel_width = 1;
else kernel_width = 2;
InterpolationKernel<Dtype><<<CAFFE_GET_BLOCKS(topsize), CAFFE_CUDA_NUM_THREADS>>>(
topsize,
botchannelsize,
topchannelsize,
(Dtype*)bottom_data,
bottomwidth,
bottomheight,
fx,
fy,
(Dtype*)top_data,
topwidth,
topheight,
filter_type,
kernel_width,
antialias);
CUDA_POST_KERNEL_CHECK;
}
else
LOG(FATAL) << "unsupported downsampling type";
}
template <typename Dtype>
void ResampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
LOG(FATAL) << "ResampleLayer cannot do backward.";
}
INSTANTIATE_LAYER_GPU_FUNCS(ResampleLayer);
} // namespace caffe
// cv::gpu::GpuMat input(bottomheight, bottomwidth, CV_32FC3);
// float* input_ptr=(float*)input.data;
// int input_stride=input.step/4;
// BlobToOpenCV<Dtype><<<CAFFE_GET_BLOCKS(bottomwidth*bottomheight), CAFFE_CUDA_NUM_THREADS>>>(
// bottomwidth*bottomheight,
// (Dtype*)bottom_data,
// bottomwidth,
// bottomheight,
// input_stride,
// (Dtype*)input_ptr);
// cv::gpu::GpuMat output;
// cv::Size output_size;
// output_size.width = topwidth;
// output_size.height = topheight;
// cv::gpu::resize(input,output,output_size,0,0,interpolation,cv::gpu::Stream::Null(),false);
// float* output_ptr=(float*)output.data;
// int output_stride=output.step/4;
// OpenCVToBlob<Dtype><<<CAFFE_GET_BLOCKS(topwidth*topheight), CAFFE_CUDA_NUM_THREADS>>>(
// topwidth*topheight,
// (Dtype*)output_ptr,
// topwidth,
// topheight,
// output_stride,
// (Dtype*)top_data);
// top_data += topsize;
// bottom_data += botsize;
//template <typename Dtype>
//__global__ void BlobToOpenCV(
// const int nthreads,
// const Dtype* blob_ptr,
// const int width,
// const int height,
// const int stride,
// Dtype* mat_ptr)
//{
// CUDA_KERNEL_LOOP(index, nthreads)
// {
// int x=index % width;
// int y=index / width;
// for(int c=0; c<3; c++)
// mat_ptr[y*stride+x*3+c]=blob_ptr[((c*height)+y)*width+x];
// }
//}
//template <typename Dtype>
//__global__ void OpenCVToBlob(
// const int nthreads,
// const Dtype* mat_ptr,
// const int width,
// const int height,
// const int stride,
// Dtype* blob_ptr)
//{
// CUDA_KERNEL_LOOP(index, nthreads)
// {
// int x=index % width;
// int y=index / width;
// for(int c=0; c<3; c++)
// blob_ptr[((c*height)+y)*width+x]=mat_ptr[y*stride+x*3+c];
// }
//}
|
ef6e4c2de91187ee1ee2ae6f7796c52b51b479ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_Riemann_kernel [6][1];
static int dims_Riemann_kernel_h [6][1] = {0};
//user function
__device__
void Riemann_kernel_gpu(const ACC<double>& rho_new,
const ACC<double> &rhou_new,
const ACC<double>& rhoE_new,
ACC<double>& alam,
ACC<double>& r,
ACC<double>& al) {
double rl, rr, rho, u, hl, hr, h, Vsq, csq, c;
double dw1, dw2, dw3, delpc2, rdeluc;
rl = sqrt(rho_new(0));
rr = sqrt(rho_new(1));
rho = rl + rr;
u = ((rhou_new(0) / rl) + (rhou_new(1) / rr)) / rho ;
double fni = rhou_new(0) * rhou_new(0) / rho_new(0) ;
double p = gam1 * (rhoE_new(0) - 0.5 * fni);
hl = (rhoE_new(0) + p) / rl ;
fni = rhou_new(1) * rhou_new(1) / rho_new(1) ;
p = gam1 * (rhoE_new(1) - 0.5 * fni);
hr = (rhoE_new(1) + p) / rr ;
h = (hl + hr)/rho;
Vsq = u*u;
csq = gam1 * (h - 0.5 * Vsq);
c = sqrt(csq);
alam(0,0) = u - c;
alam(1,0) = u;
alam(2,0) = u + c;
r(0,0) = 1.0;
r(1,0) = 1.0;
r(2,0) = 1.0;
r(3,0) = u - c;
r(4,0) = u;
r(5,0) = u + c;
r(6,0) = h - u * c;
r(7,0) = 0.5 * Vsq;
r(8,0) = h + u * c;
for (int m=0; m<9; m++)
r(m,0) = r(m,0) / csq;
dw1 = rho_new(1) - rho_new(0);
dw2 = rhou_new(1) - rhou_new(0);
dw3 = rhoE_new(1) - rhoE_new(0);
delpc2 = gam1 * ( dw3 + 0.50 * Vsq * dw1 - u * dw2) / csq;
rdeluc = ( dw2 - u * dw1) / c ;
al(0,0) = 0.5 * (delpc2 - rdeluc);
al(1,0) = dw1 - delpc2 ;
al(2,0) = 0.5 * ( delpc2 + rdeluc );
for (int m=0; m<3; m++)
al(m,0) = al(m,0) * csq;
}
__global__ void ops_Riemann_kernel(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
int size0 ){
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1;
arg1 += idx_x * 1*1;
arg2 += idx_x * 1*1;
arg3 += idx_x * 1*3;
arg4 += idx_x * 1*9;
arg5 += idx_x * 1*3;
if (idx_x < size0) {
const ACC<double> argp0(arg0);
const ACC<double> argp1(arg1);
const ACC<double> argp2(arg2);
ACC<double> argp3(3, dims_Riemann_kernel[3][0], arg3);
ACC<double> argp4(9, dims_Riemann_kernel[4][0], arg4);
ACC<double> argp5(3, dims_Riemann_kernel[5][0], arg5);
Riemann_kernel_gpu(argp0, argp1, argp2, argp3,
argp4, argp5);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_Riemann_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5) {
#else
void ops_par_loop_Riemann_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[6] = { arg0, arg1, arg2, arg3, arg4, arg5};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,6,range,7)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(7,"Riemann_kernel");
OPS_kernels[7].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[1];
int end[1];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[1];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 6,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<1; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
if (xdim0 != dims_Riemann_kernel_h[0][0] || xdim1 != dims_Riemann_kernel_h[1][0] || xdim2 != dims_Riemann_kernel_h[2][0] || xdim3 != dims_Riemann_kernel_h[3][0] || xdim4 != dims_Riemann_kernel_h[4][0] || xdim5 != dims_Riemann_kernel_h[5][0]) {
dims_Riemann_kernel_h[0][0] = xdim0;
dims_Riemann_kernel_h[1][0] = xdim1;
dims_Riemann_kernel_h[2][0] = xdim2;
dims_Riemann_kernel_h[3][0] = xdim3;
dims_Riemann_kernel_h[4][0] = xdim4;
dims_Riemann_kernel_h[5][0] = xdim5;
cutilSafeCall(hipMemcpyToSymbol( dims_Riemann_kernel, dims_Riemann_kernel_h, sizeof(dims_Riemann_kernel)));
}
int x_size = MAX(0,end[0]-start[0]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, 1, 1);
dim3 tblock(OPS_block_size_x,1,1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[6];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 6);
ops_halo_exchanges(args,6,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[7].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0)
hipLaunchKernelGGL(( ops_Riemann_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],x_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[7].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 6);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[7].mpi_time += t2-t1;
OPS_kernels[7].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[7].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[7].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[7].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[7].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[7].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_Riemann_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 7;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 7;
for ( int i=0; i<2; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 6;
desc->args = (ops_arg*)malloc(6*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->function = ops_par_loop_Riemann_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(7,"Riemann_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
| ef6e4c2de91187ee1ee2ae6f7796c52b51b479ca.cu | //
// auto-generated by ops.py
//
__constant__ int dims_Riemann_kernel [6][1];
static int dims_Riemann_kernel_h [6][1] = {0};
//user function
__device__
void Riemann_kernel_gpu(const ACC<double>& rho_new,
const ACC<double> &rhou_new,
const ACC<double>& rhoE_new,
ACC<double>& alam,
ACC<double>& r,
ACC<double>& al) {
double rl, rr, rho, u, hl, hr, h, Vsq, csq, c;
double dw1, dw2, dw3, delpc2, rdeluc;
rl = sqrt(rho_new(0));
rr = sqrt(rho_new(1));
rho = rl + rr;
u = ((rhou_new(0) / rl) + (rhou_new(1) / rr)) / rho ;
double fni = rhou_new(0) * rhou_new(0) / rho_new(0) ;
double p = gam1 * (rhoE_new(0) - 0.5 * fni);
hl = (rhoE_new(0) + p) / rl ;
fni = rhou_new(1) * rhou_new(1) / rho_new(1) ;
p = gam1 * (rhoE_new(1) - 0.5 * fni);
hr = (rhoE_new(1) + p) / rr ;
h = (hl + hr)/rho;
Vsq = u*u;
csq = gam1 * (h - 0.5 * Vsq);
c = sqrt(csq);
alam(0,0) = u - c;
alam(1,0) = u;
alam(2,0) = u + c;
r(0,0) = 1.0;
r(1,0) = 1.0;
r(2,0) = 1.0;
r(3,0) = u - c;
r(4,0) = u;
r(5,0) = u + c;
r(6,0) = h - u * c;
r(7,0) = 0.5 * Vsq;
r(8,0) = h + u * c;
for (int m=0; m<9; m++)
r(m,0) = r(m,0) / csq;
dw1 = rho_new(1) - rho_new(0);
dw2 = rhou_new(1) - rhou_new(0);
dw3 = rhoE_new(1) - rhoE_new(0);
delpc2 = gam1 * ( dw3 + 0.50 * Vsq * dw1 - u * dw2) / csq;
rdeluc = ( dw2 - u * dw1) / c ;
al(0,0) = 0.5 * (delpc2 - rdeluc);
al(1,0) = dw1 - delpc2 ;
al(2,0) = 0.5 * ( delpc2 + rdeluc );
for (int m=0; m<3; m++)
al(m,0) = al(m,0) * csq;
}
__global__ void ops_Riemann_kernel(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
int size0 ){
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1;
arg1 += idx_x * 1*1;
arg2 += idx_x * 1*1;
arg3 += idx_x * 1*3;
arg4 += idx_x * 1*9;
arg5 += idx_x * 1*3;
if (idx_x < size0) {
const ACC<double> argp0(arg0);
const ACC<double> argp1(arg1);
const ACC<double> argp2(arg2);
ACC<double> argp3(3, dims_Riemann_kernel[3][0], arg3);
ACC<double> argp4(9, dims_Riemann_kernel[4][0], arg4);
ACC<double> argp5(3, dims_Riemann_kernel[5][0], arg5);
Riemann_kernel_gpu(argp0, argp1, argp2, argp3,
argp4, argp5);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_Riemann_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5) {
#else
void ops_par_loop_Riemann_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[6] = { arg0, arg1, arg2, arg3, arg4, arg5};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,6,range,7)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(7,"Riemann_kernel");
OPS_kernels[7].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[1];
int end[1];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[1];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 6,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<1; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
if (xdim0 != dims_Riemann_kernel_h[0][0] || xdim1 != dims_Riemann_kernel_h[1][0] || xdim2 != dims_Riemann_kernel_h[2][0] || xdim3 != dims_Riemann_kernel_h[3][0] || xdim4 != dims_Riemann_kernel_h[4][0] || xdim5 != dims_Riemann_kernel_h[5][0]) {
dims_Riemann_kernel_h[0][0] = xdim0;
dims_Riemann_kernel_h[1][0] = xdim1;
dims_Riemann_kernel_h[2][0] = xdim2;
dims_Riemann_kernel_h[3][0] = xdim3;
dims_Riemann_kernel_h[4][0] = xdim4;
dims_Riemann_kernel_h[5][0] = xdim5;
cutilSafeCall(cudaMemcpyToSymbol( dims_Riemann_kernel, dims_Riemann_kernel_h, sizeof(dims_Riemann_kernel)));
}
int x_size = MAX(0,end[0]-start[0]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, 1, 1);
dim3 tblock(OPS_block_size_x,1,1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[6];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 6);
ops_halo_exchanges(args,6,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[7].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0)
ops_Riemann_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],x_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[7].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 6);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[7].mpi_time += t2-t1;
OPS_kernels[7].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[7].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[7].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[7].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[7].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[7].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_Riemann_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 7;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 7;
for ( int i=0; i<2; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 6;
desc->args = (ops_arg*)malloc(6*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->function = ops_par_loop_Riemann_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(7,"Riemann_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
3934a109c2635eaabe7588dd36c2417d367fd9c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2/core/cuda_devptrs.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/gpu/gpu.hpp>
#include <opencv2/gpu/stream_accessor.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/gpu/devmem2d.hpp>
#include <opencv2/gpu/gpumat.hpp>
#define MIDDLEWIDTH 100
#define LEFT 1256
#define RIGHT 723
#define LARGEST1 1340
#define LARGEST2 600
using namespace cv;
using namespace cv::gpu;
using namespace std;
__global__ void stitch_kernel(const PtrStepSz<uchar3> src1, const PtrStepSz<uchar3> src2, PtrStep<uchar3> dst, int limit)
{
int abs_x = threadIdx.x + blockDim.x * blockIdx.x;
int abs_y = threadIdx.y + blockDim.y * blockIdx.y;
if (abs_y < 0 || abs_y >= limit || abs_x < 0 || abs_x >= MIDDLEWIDTH) {
return;
}
int pointOnLeft = abs_x + LEFT;
int pointOnRight = RIGHT - MIDDLEWIDTH + abs_x;
uchar3 value1 = src1(abs_y, pointOnLeft);
uchar3 value2 = src2(abs_y, pointOnRight);
unsigned char newRed = (((LEFT + MIDDLEWIDTH) - pointOnLeft) / (MIDDLEWIDTH*1.0)) * value1.x + ((pointOnLeft - LEFT) / (MIDDLEWIDTH*1.0)) * value2.x;
unsigned char newGreen = (((LEFT + MIDDLEWIDTH) - pointOnLeft) / (MIDDLEWIDTH*1.0)) * value1.y + ((pointOnLeft - LEFT) / (MIDDLEWIDTH*1.0)) * value2.y;
unsigned char newBlue = (((LEFT + MIDDLEWIDTH) - pointOnLeft) / (MIDDLEWIDTH*1.0)) * value1.z + ((pointOnLeft - LEFT) / (MIDDLEWIDTH*1.0)) * value2.z;
dst(abs_y, pointOnLeft) = make_uchar3(newRed, newGreen, newBlue);
}
void stitch_caller(const PtrStepSz<uchar3>& src1, const PtrStepSz<uchar3>& src2, PtrStep<uchar3> dst, int limit, hipStream_t stream)
{
int blockWidth = 32;
int blockHeight = 8;
dim3 blockSize(blockWidth, blockHeight);
int blocksY = src1.rows / blockHeight;
int blocksX = src1.cols / blockWidth;
const dim3 gridSize(blocksX, blocksY);
stitch_kernel << <gridSize, blockSize, 0, stream >> >(src1, src2, dst, limit);
if (stream == 0)
{
hipDeviceSynchronize();
}
}
void stitch(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, int limit, Stream& stream = Stream::Null())
{
CV_Assert(src1.type() == CV_8UC3);
CV_Assert(src2.type() == CV_8UC3);
hipStream_t s = StreamAccessor::getStream(stream);
stitch_caller(src1, src2, dst, limit, s);
}
int main()
{
Mat imageRight = imread("D:\\Documents\\Images\\resulti_phi.jpg");
Mat imageLeft = imread("D:\\Documents\\Images\\resultii_phi.jpg");
int width = LEFT + MIDDLEWIDTH + imageRight.cols - RIGHT;
Mat combine(max(imageRight.rows, imageLeft.rows), width, CV_8UC3, Scalar(0, 0, 0));
GpuMat gpuMatRight, gpuMatLeft, output;
gpuMatRight.upload(imageRight);
gpuMatLeft.upload(imageLeft);
output.upload(combine);
GpuMat left_roi(output, Rect(0, 0, LEFT, gpuMatLeft.size().height));
Size middleSize(MIDDLEWIDTH, gpuMatLeft.size().height);
GpuMat middleZone(middleSize, CV_8UC3, Scalar(0, 0, 0));
GpuMat croppedImage1;
Rect roi1(0, 0, LEFT, imageLeft.rows);
croppedImage1 = gpuMatLeft(roi1);
croppedImage1.copyTo(left_roi);
GpuMat right_roi(output, Rect(LEFT + MIDDLEWIDTH, 0, imageRight.cols - RIGHT, imageRight.rows));
GpuMat croppedImage2;
Rect roi2(RIGHT, 0, imageRight.cols - RIGHT, imageRight.rows);
croppedImage2 = gpuMatRight(roi2);
croppedImage2.copyTo(right_roi);
stitch(gpuMatLeft, gpuMatRight, output, gpuMatLeft.rows);
output.download(combine);
imwrite("D:\\Documents\\Images\\temp.jpg", combine);
return 0;
} | 3934a109c2635eaabe7588dd36c2417d367fd9c8.cu | #include <opencv2/core/cuda_devptrs.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/gpu/gpu.hpp>
#include <opencv2/gpu/stream_accessor.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/gpu/devmem2d.hpp>
#include <opencv2/gpu/gpumat.hpp>
#define MIDDLEWIDTH 100
#define LEFT 1256
#define RIGHT 723
#define LARGEST1 1340
#define LARGEST2 600
using namespace cv;
using namespace cv::gpu;
using namespace std;
__global__ void stitch_kernel(const PtrStepSz<uchar3> src1, const PtrStepSz<uchar3> src2, PtrStep<uchar3> dst, int limit)
{
int abs_x = threadIdx.x + blockDim.x * blockIdx.x;
int abs_y = threadIdx.y + blockDim.y * blockIdx.y;
if (abs_y < 0 || abs_y >= limit || abs_x < 0 || abs_x >= MIDDLEWIDTH) {
return;
}
int pointOnLeft = abs_x + LEFT;
int pointOnRight = RIGHT - MIDDLEWIDTH + abs_x;
uchar3 value1 = src1(abs_y, pointOnLeft);
uchar3 value2 = src2(abs_y, pointOnRight);
unsigned char newRed = (((LEFT + MIDDLEWIDTH) - pointOnLeft) / (MIDDLEWIDTH*1.0)) * value1.x + ((pointOnLeft - LEFT) / (MIDDLEWIDTH*1.0)) * value2.x;
unsigned char newGreen = (((LEFT + MIDDLEWIDTH) - pointOnLeft) / (MIDDLEWIDTH*1.0)) * value1.y + ((pointOnLeft - LEFT) / (MIDDLEWIDTH*1.0)) * value2.y;
unsigned char newBlue = (((LEFT + MIDDLEWIDTH) - pointOnLeft) / (MIDDLEWIDTH*1.0)) * value1.z + ((pointOnLeft - LEFT) / (MIDDLEWIDTH*1.0)) * value2.z;
dst(abs_y, pointOnLeft) = make_uchar3(newRed, newGreen, newBlue);
}
void stitch_caller(const PtrStepSz<uchar3>& src1, const PtrStepSz<uchar3>& src2, PtrStep<uchar3> dst, int limit, cudaStream_t stream)
{
int blockWidth = 32;
int blockHeight = 8;
dim3 blockSize(blockWidth, blockHeight);
int blocksY = src1.rows / blockHeight;
int blocksX = src1.cols / blockWidth;
const dim3 gridSize(blocksX, blocksY);
stitch_kernel << <gridSize, blockSize, 0, stream >> >(src1, src2, dst, limit);
if (stream == 0)
{
cudaDeviceSynchronize();
}
}
void stitch(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, int limit, Stream& stream = Stream::Null())
{
CV_Assert(src1.type() == CV_8UC3);
CV_Assert(src2.type() == CV_8UC3);
cudaStream_t s = StreamAccessor::getStream(stream);
stitch_caller(src1, src2, dst, limit, s);
}
int main()
{
Mat imageRight = imread("D:\\Documents\\Images\\resulti_phi.jpg");
Mat imageLeft = imread("D:\\Documents\\Images\\resultii_phi.jpg");
int width = LEFT + MIDDLEWIDTH + imageRight.cols - RIGHT;
Mat combine(max(imageRight.rows, imageLeft.rows), width, CV_8UC3, Scalar(0, 0, 0));
GpuMat gpuMatRight, gpuMatLeft, output;
gpuMatRight.upload(imageRight);
gpuMatLeft.upload(imageLeft);
output.upload(combine);
GpuMat left_roi(output, Rect(0, 0, LEFT, gpuMatLeft.size().height));
Size middleSize(MIDDLEWIDTH, gpuMatLeft.size().height);
GpuMat middleZone(middleSize, CV_8UC3, Scalar(0, 0, 0));
GpuMat croppedImage1;
Rect roi1(0, 0, LEFT, imageLeft.rows);
croppedImage1 = gpuMatLeft(roi1);
croppedImage1.copyTo(left_roi);
GpuMat right_roi(output, Rect(LEFT + MIDDLEWIDTH, 0, imageRight.cols - RIGHT, imageRight.rows));
GpuMat croppedImage2;
Rect roi2(RIGHT, 0, imageRight.cols - RIGHT, imageRight.rows);
croppedImage2 = gpuMatRight(roi2);
croppedImage2.copyTo(right_roi);
stitch(gpuMatLeft, gpuMatRight, output, gpuMatLeft.rows);
output.download(combine);
imwrite("D:\\Documents\\Images\\temp.jpg", combine);
return 0;
} |
0b30df23486bb70c2d462d53a12380f48ea1c538.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "saber/funcs/impl/cuda/saber_axpy.h"
#include "hip/hip_fp16.h"
namespace anakin{
namespace saber{
template <typename DataDtype>
__global__ void ker_axpy_fwd(int n, int img_size,
const DataDtype* scale, const DataDtype* x, const DataDtype* y, DataDtype* dst) {
CUDA_KERNEL_LOOP(idx, n) {
int scale_id = idx / img_size;
dst[idx] = scale[scale_id] * x[idx] + y[idx];
}
}
template <DataType OpDtype ,
DataType inDtype,
DataType outDtype,
typename LayOutType_op,
typename LayOutType_in,
typename LayOutType_out>
SaberStatus SaberAxpy<NV, OpDtype, inDtype, outDtype,\
LayOutType_op, LayOutType_in, LayOutType_out>::dispatch(const std::vector<DataTensor_in *>& inputs,
std::vector<DataTensor_out *>& outputs,
AxpyParam<OpTensor>& param) {
hipStream_t cuda_stream = this->_ctx.get_compute_stream();
if (!(inputs[1]->valid_shape() == outputs[0]->valid_shape())
|| !(inputs[2]->valid_shape() == outputs[0]->valid_shape())) {
return SaberUnKownError;
}
const InDataType* scale = inputs[0]->data();
const InDataType* x = inputs[1]->data();
const InDataType* y = inputs[2]->data();
OutDataType* dst = outputs[0]->mutable_data();
int img_size = outputs[0]->height() * outputs[0]->width();
int count = outputs[0]->valid_size();
if (inputs[0]->is_continue_mem() && outputs[0]->is_continue_mem()
&& inputs[1]->is_continue_mem() && inputs[2]->is_continue_mem()) {
hipLaunchKernelGGL(( ker_axpy_fwd<InDataType>), dim3(CUDA_GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, cuda_stream, \
count, img_size, scale, x, y, dst);
}
return SaberSuccess;
}
} //namespace anakin
} //namespace anakin
| 0b30df23486bb70c2d462d53a12380f48ea1c538.cu | #include "saber/funcs/impl/cuda/saber_axpy.h"
#include "cuda_fp16.h"
namespace anakin{
namespace saber{
template <typename DataDtype>
__global__ void ker_axpy_fwd(int n, int img_size,
const DataDtype* scale, const DataDtype* x, const DataDtype* y, DataDtype* dst) {
CUDA_KERNEL_LOOP(idx, n) {
int scale_id = idx / img_size;
dst[idx] = scale[scale_id] * x[idx] + y[idx];
}
}
template <DataType OpDtype ,
DataType inDtype,
DataType outDtype,
typename LayOutType_op,
typename LayOutType_in,
typename LayOutType_out>
SaberStatus SaberAxpy<NV, OpDtype, inDtype, outDtype,\
LayOutType_op, LayOutType_in, LayOutType_out>::dispatch(const std::vector<DataTensor_in *>& inputs,
std::vector<DataTensor_out *>& outputs,
AxpyParam<OpTensor>& param) {
cudaStream_t cuda_stream = this->_ctx.get_compute_stream();
if (!(inputs[1]->valid_shape() == outputs[0]->valid_shape())
|| !(inputs[2]->valid_shape() == outputs[0]->valid_shape())) {
return SaberUnKownError;
}
const InDataType* scale = inputs[0]->data();
const InDataType* x = inputs[1]->data();
const InDataType* y = inputs[2]->data();
OutDataType* dst = outputs[0]->mutable_data();
int img_size = outputs[0]->height() * outputs[0]->width();
int count = outputs[0]->valid_size();
if (inputs[0]->is_continue_mem() && outputs[0]->is_continue_mem()
&& inputs[1]->is_continue_mem() && inputs[2]->is_continue_mem()) {
ker_axpy_fwd<InDataType><<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>\
(count, img_size, scale, x, y, dst);
}
return SaberSuccess;
}
} //namespace anakin
} //namespace anakin
|
28c7f8753bceda97db3641332668c2f32591a11c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
__global__ void saxpy(unsigned num_streams, unsigned addr, int n, float *x)
{
__shared__ float A[1000];
int id = blockIdx.x*blockDim.x + threadIdx.x;
float a = 0, b = 0, c = 0, d = 0, e = 0, f = 0, g = 0, h = 0;
if (id == 0) {
for (int i = 0 ; i < 1000 ; i += 8) {
a = A[i];
b = A[i + 1];
c = A[i + 2];
d = A[i + 3];
e = A[i + 4];
f = A[i + 5];
g = A[i + 6];
h = A[i + 7];
}
*x = a + b + c + d + e + f + g + h;
}
}
int main(void)
{
int N = 1000;
// Perform SAXPY on 1M elements
float *h_x = (float *)malloc(N*sizeof(float));
float *d_x = (float *)100;
float *d_x_copy;
hipMalloc(&d_x_copy, N*sizeof(float));
// hipMalloc(&d_x, 2*sizeof(float));
for (int i = 1 ; i <= N ; i++)
h_x[i-1] = (float)i;
hipMemcpy(d_x, h_x, N*sizeof(float), hipMemcpyHostToDevice);
float *h_dummy = (float *)malloc(sizeof(float));
float *d_dummy;
hipMalloc(&d_dummy, sizeof(float));
hipLaunchKernelGGL(( saxpy), dim3(1), dim3(8), 0, 0, 1, 100u, N, d_dummy);
hipMemcpy(h_dummy, d_dummy, sizeof(float), hipMemcpyDeviceToHost);
printf("%f\n", *h_dummy);
}
| 28c7f8753bceda97db3641332668c2f32591a11c.cu | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
__global__ void saxpy(unsigned num_streams, unsigned addr, int n, float *x)
{
__shared__ float A[1000];
int id = blockIdx.x*blockDim.x + threadIdx.x;
float a = 0, b = 0, c = 0, d = 0, e = 0, f = 0, g = 0, h = 0;
if (id == 0) {
for (int i = 0 ; i < 1000 ; i += 8) {
a = A[i];
b = A[i + 1];
c = A[i + 2];
d = A[i + 3];
e = A[i + 4];
f = A[i + 5];
g = A[i + 6];
h = A[i + 7];
}
*x = a + b + c + d + e + f + g + h;
}
}
int main(void)
{
int N = 1000;
// Perform SAXPY on 1M elements
float *h_x = (float *)malloc(N*sizeof(float));
float *d_x = (float *)100;
float *d_x_copy;
cudaMalloc(&d_x_copy, N*sizeof(float));
// cudaMalloc(&d_x, 2*sizeof(float));
for (int i = 1 ; i <= N ; i++)
h_x[i-1] = (float)i;
cudaMemcpy(d_x, h_x, N*sizeof(float), cudaMemcpyHostToDevice);
float *h_dummy = (float *)malloc(sizeof(float));
float *d_dummy;
cudaMalloc(&d_dummy, sizeof(float));
saxpy<<<1, 8>>>(1, 100u, N, d_dummy);
cudaMemcpy(h_dummy, d_dummy, sizeof(float), cudaMemcpyDeviceToHost);
printf("%f\n", *h_dummy);
}
|
ae6b3b5106005b75480103430430252f07e21f59.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <cudnn.h>
#define NANO 1e9
#define CUDNN_CALL(x) do \
{ \
cudnnStatus_t ___s = (x); \
if (___s != CUDNN_STATUS_SUCCESS) \
{ \
fprintf(stderr, "%s:%d ERROR: %s\n", __FILE__, __LINE__, cudnnGetErrorString(___s)); \
exit(-1); \
} \
} while(0); \
// Forward declarations
void init_I(double *, int, int, int);
void init_F(double *, int, int, int, int);
double get_checksum(double *, int, int, int);
double c1(int, double *, int, int, int, double *, int, int, int);
double c2(int, double *, int, int, int, double *, int, int, int);
__device__ void convolve(int, double *, int, int, int, double *, int, int, double *);
__global__ void convolve_tiles_with_shared_mem(int, double *, int, int, int, double *, int, int, int, double *);
int main(int argc, char * argv[])
{
// Initialize dimensions and I and F arrays
int C = 3, H = 1024, W = 1024, P = 1;
int K = 64, FH = 3, FW = 3;
double I[C * H * W], F[K * C * FH * FW];
init_I(I, C, H, W);
init_F(F, K, C, FH, FW);
// Execute programs and output results
double c1_kernel_time = 0;
double c2_kernel_time = 0;
int runs = 5;
int i;
printf("C2");
for (i = 0; i < runs; i++)
{
c2_kernel_time += c2(C, I, H, W, P, F, K, FH, FW);
}
printf("\n\nC1");
for (i = 0; i < runs; i++)
{
c1_kernel_time += c1(C, I, H, W, P, F, K, FH, FW);
}
printf("\n\n<Time>: Conv %lf s. cuDNN %lf s.\n", c1_kernel_time / runs, c2_kernel_time / runs);
}
// Function to initialize I array
void init_I(double * I, int C, int H, int W)
{
int c, h, w;
for (c = 0; c < C; c++)
for (h = 0; h < H; h++)
for (w = 0; w < W; w++)
I[(c * H * W) + (h * W) + w] = c * (h + w);
}
// Function to initialize F array
void init_F(double * F, int K, int C, int H, int W)
{
int k, c, h, w;
for (k = 0; k < K; k++)
for (c = 0; c < C; c++)
for (h = 0; h < H; h++)
for (w = 0; w < W; w++)
F[(k * C * H * W) + (c * H * W) + (h * W) + w] = (c + k) * (h + w);
}
double c1(int C, double * I, int H, int W, int P, double * F, int K, int FH, int FW)
{
// Determine array sizes, declare device arrays and allocate device memory
size_t I_size = C * H * W * sizeof(double);
size_t F_size = K * C * FH * FW * sizeof(double);
size_t O_size = K * H * W * sizeof(double);
double O[O_size], * dev_I, * dev_F, * dev_O;
hipMalloc(&dev_I, I_size);
hipMalloc(&dev_F, F_size);
hipMalloc(&dev_O, O_size);
struct timespec start, end;
clock_gettime(CLOCK_MONOTONIC, &start);
hipMemcpy(dev_I, I, I_size, hipMemcpyHostToDevice);
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &end);
double to_dev_time = (end.tv_sec - start.tv_sec) + ((end.tv_nsec - start.tv_nsec) / NANO);
hipMemcpy(dev_F, F, F_size, hipMemcpyHostToDevice);
// Set device properties and call kernel
int block_size = 4;
dim3 dimGrid(ceil(H / block_size), ceil(W / block_size));
dim3 dimBlock(block_size, block_size, K);
size_t tile_size = C * (block_size + (2 * P)) * (block_size + (2 * P));
clock_gettime(CLOCK_MONOTONIC, &start);
hipLaunchKernelGGL(( convolve_tiles_with_shared_mem), dim3(dimGrid), dim3(dimBlock), tile_size, 0, C, dev_I, H, W, P, dev_F, K, FH, FW, dev_O);
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &end);
double kernel_time = (end.tv_sec - start.tv_sec) + ((end.tv_nsec - start.tv_nsec) / NANO);
// Copy output array to host, free device memory and output results
clock_gettime(CLOCK_MONOTONIC, &start);
hipMemcpy(O, dev_O, O_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &end);
double to_host_time = (end.tv_sec - start.tv_sec) + ((end.tv_nsec - start.tv_nsec) / NANO);
hipFree(dev_I);
hipFree(dev_F);
hipFree(dev_O);
double I_checksum = get_checksum(I, C, H, W);
double O_checksum = get_checksum(O, K, H, W);
printf("\n\nI = checksum: %lf\nCopy host -> dev kernel: %lf s.\ntime kernel: %lf s.\nCopy dev -> host kernel: %lf s.\nCUDA O = checksum: %lf", I_checksum, to_dev_time, kernel_time, to_host_time, O_checksum);
return kernel_time;
}
double c2(int C, double * I, int H, int W, int P, double * F, int K, int FH, int FW)
{
// Determine array sizes, declare device arrays and workspace and allocate device memory
size_t I_size = C * H * W * sizeof(double);
size_t F_size = K * C * FH * FW * sizeof(double);
size_t O_size = K * H * W * sizeof(double);
double O[O_size], * dev_I, * dev_F, * dev_O;
void * workspace;
hipMalloc(&dev_I, I_size);
hipMalloc(&dev_F, F_size);
hipMalloc(&dev_O, O_size);
struct timespec start, end;
clock_gettime(CLOCK_MONOTONIC, &start);
hipMemcpy(dev_I, I, I_size, hipMemcpyHostToDevice);
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &end);
double to_dev_time = (end.tv_sec - start.tv_sec) + ((end.tv_nsec - start.tv_nsec) / NANO);
hipMemcpy(dev_F, F, F_size, hipMemcpyHostToDevice);
// Setup and execute CUDNN based convolution
cudnnHandle_t cudnn;
CUDNN_CALL(cudnnCreate(&cudnn));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_DOUBLE, 1, C, H, W));
cudnnFilterDescriptor_t filter_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filter_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(filter_desc, CUDNN_DATA_DOUBLE, CUDNN_TENSOR_NCHW, K, C, FH, FW));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_DOUBLE, 1, K, H, W));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(conv_desc, P, P, 1, 1, 1, 1, CUDNN_CONVOLUTION, CUDNN_DATA_DOUBLE));
cudnnConvolutionFwdAlgo_t conv_algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(cudnn, in_desc, filter_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv_algo));
size_t workspace_size = 0;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(cudnn, in_desc, filter_desc, conv_desc, out_desc, conv_algo, &workspace_size));
hipMallocManaged(&workspace, workspace_size);
double alpha = 1, beta = 0;
clock_gettime(CLOCK_MONOTONIC, &start);
CUDNN_CALL(cudnnConvolutionForward(cudnn, &alpha, in_desc, dev_I, filter_desc, dev_F, conv_desc, conv_algo, workspace, workspace_size, &beta, out_desc, dev_O));
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &end);
double kernel_time = (end.tv_sec - start.tv_sec) + ((end.tv_nsec - start.tv_nsec) / NANO);
// Copy output array to host, free device memory and output results
clock_gettime(CLOCK_MONOTONIC, &start);
hipMemcpy(O, dev_O, O_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &end);
double to_host_time = (end.tv_sec - start.tv_sec) + ((end.tv_nsec - start.tv_nsec) / NANO);
hipFree(dev_I);
hipFree(dev_F);
hipFree(dev_O);
hipFree(workspace);
cudnnDestroyTensorDescriptor(in_desc);
cudnnDestroyFilterDescriptor(filter_desc);
cudnnDestroyTensorDescriptor(out_desc);
cudnnDestroyConvolutionDescriptor(conv_desc);
cudnnDestroy(cudnn);
double I_checksum = get_checksum(I, C, H, W);
double O_checksum = get_checksum(O, K, H, W);
printf("\n\nI = checksum: %lf\nCopy host -> dev kernel: %lf s.\ntime cudnn: %lf s.\nCopy dev -> host kernel: %lf s.\nCUDA O = checksum: %lf", I_checksum, to_dev_time, kernel_time, to_host_time, O_checksum);
return kernel_time;
}
// Function to compute the sum of all elements of I and O arrays
double get_checksum(double * tensor, int C, int H, int W)
{
double checksum = 0;
int c, h, w;
for (c = 0; c < C; c++)
for (h = 0; h < H; h++)
for (w = 0; w < W; w++)
checksum += tensor[(c * H * W) + (h * W) + w];
return checksum;
}
// CUDA kernel to perform convolution using tiles and shared memory
__global__ void convolve_tiles_with_shared_mem(int C, double * I, int H, int W, int P, double * F, int K, int FH, int FW, double * O)
{
// Declare and populate tile array in shared memory
extern __shared__ double tile[];
int TH = blockDim.x + (2 * P);
int TW = blockDim.y + (2 * P);
int k = threadIdx.z;
int h = (blockIdx.x * blockDim.x) + threadIdx.x;
int w = (blockIdx.y * blockDim.y) + threadIdx.y;
if (k == 0 && h < H && w < W)
{
int th = threadIdx.x + P;
int tw = threadIdx.y + P;
int c;
for (c = 0; c < C; c++)
{
tile[(c * TH * TW) + (th * TW) + tw] = I[(c * H * W) + (h * W) + w];
int pad;
for (pad = 1; pad <= P; pad++)
{
// Fill top rows and corners
if (threadIdx.x == 0)
{
if (h > 0)
tile[(c * TH * TW) + ((th - pad) * TW) + tw] = I[(c * H * W) + ((h - pad) * W) + w];
else
tile[(c * TH * TW) + ((th - pad) * TW) + tw] = 0;
// Top left corner
if (threadIdx.y == 0)
{
int h_pad, w_pad;
for (h_pad = pad; h_pad > 0; h_pad--)
for (w_pad = pad; w_pad > 0; w_pad--)
{
if (h > 0 && w > 0)
tile[(c * TH * TW) + ((th - h_pad) * TW) + (tw - w_pad)] = I[(c * H * W) + ((h - h_pad) * W) + (w - w_pad)];
else
tile[(c * TH * TW) + ((th - h_pad) * TW) + (tw - w_pad)] = 0;
}
}
// Top right corner
if (threadIdx.y == (blockDim.y - 1))
{
int h_pad, w_pad;
for (h_pad = pad; h_pad > 0; h_pad--)
for (w_pad = pad; w_pad > 0; w_pad--)
{
if (h > 0 && w < (W - 1))
tile[(c * TH * TW) + ((th - h_pad) * TW) + (tw + w_pad)] = I[(c * H * W) + ((h - h_pad) * W) + (w + w_pad)];
else
tile[(c * TH * TW) + ((th - h_pad) * TW) + (tw + w_pad)] = 0;
}
}
}
// Fill bottom rows and corners
if (threadIdx.x == (blockDim.x - 1))
{
if (h < (H - 1))
tile[(c * TH * TW) + ((th + pad) * TW) + tw] = I[(c * H * W) + ((h + pad) * W) + w];
else
tile[(c * TH * TW) + ((th + pad) * TW) + tw] = 0;
// Bottom left corner
if (threadIdx.y == 0)
{
int h_pad, w_pad;
for (h_pad = pad; h_pad > 0; h_pad--)
for (w_pad = pad; w_pad > 0; w_pad--)
{
if (h < (H - 1) && w > 0)
tile[(c * TH * TW) + ((th + h_pad) * TW) + (tw - w_pad)] = I[(c * H * W) + ((h + h_pad) * W) + (w - w_pad)];
else
tile[(c * TH * TW) + ((th + h_pad) * TW) + (tw - w_pad)] = 0;
}
}
// Bottom right corner
if (threadIdx.y == (blockDim.y - 1))
{
int h_pad, w_pad;
for (h_pad = pad; h_pad > 0; h_pad--)
for (w_pad = pad; w_pad > 0; w_pad--)
{
if (h < (H - 1) && w < (W - 1))
tile[(c * TH * TW) + ((th + h_pad) * TW) + (tw + w_pad)] = I[(c * H * W) + ((h + h_pad) * W) + (w - w_pad)];
else
tile[(c * TH * TW) + ((th + h_pad) * TW) + (tw + w_pad)] = 0;
}
}
}
// Fill left columns
if (threadIdx.y == 0)
{
if (w > 0)
tile[(c * TH * TW) + (th * TW) + (tw - pad)] = I[(c * H * W) + (h * W) + (w - pad)];
else
tile[(c * TH * TW) + (th * TW) + (tw - pad)] = 0;
}
// Fill right columns
if (threadIdx.y == (blockDim.y - 1))
{
if (w < (W - 1))
tile[(c * TH * TW) + (th * TW) + (tw + pad)] = I[(c * H * W) + (h * W) + (w + pad)];
else
tile[(c * TH * TW) + (th * TW) + (tw + pad)] = 0;
}
}
}
}
__syncthreads();
// Perform convolution
convolve(C, tile, H, W, P, F, FH, FW, O);
}
// CUDA kernel to perform individual convolution computations
__device__ void convolve(int C, double * I, int H, int W, int P, double * F, int FH, int FW, double * O)
{
double val = 0;
int k = threadIdx.z;
int h = (blockIdx.x * blockDim.x) + threadIdx.x - P;
int w = (blockIdx.y * blockDim.y) + threadIdx.y - P;
int c, fh, fw, i, j;
for (c = 0; c < C; c++)
for (fh = 0; fh < FH; fh++)
{
i = h + fh;
for (fw = 0; fw < FW; fw++)
{
j = w + fw;
if (i < 0 || i >= H || j < 0 || j >= W)
continue;
val += I[(c * H * W) + (i * W) + j] * F[(k * C * FH * FW) + (c * FH * FW) + ((FH - 1 - fh) * FW) + (FW - 1 - fw)];
}
}
h += P;
w += P;
O[(k * H * W) + (h * W) + w] = val;
}
| ae6b3b5106005b75480103430430252f07e21f59.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <cuda.h>
#include <cudnn.h>
#define NANO 1e9
#define CUDNN_CALL(x) do \
{ \
cudnnStatus_t ___s = (x); \
if (___s != CUDNN_STATUS_SUCCESS) \
{ \
fprintf(stderr, "%s:%d ERROR: %s\n", __FILE__, __LINE__, cudnnGetErrorString(___s)); \
exit(-1); \
} \
} while(0); \
// Forward declarations
void init_I(double *, int, int, int);
void init_F(double *, int, int, int, int);
double get_checksum(double *, int, int, int);
double c1(int, double *, int, int, int, double *, int, int, int);
double c2(int, double *, int, int, int, double *, int, int, int);
__device__ void convolve(int, double *, int, int, int, double *, int, int, double *);
__global__ void convolve_tiles_with_shared_mem(int, double *, int, int, int, double *, int, int, int, double *);
int main(int argc, char * argv[])
{
// Initialize dimensions and I and F arrays
int C = 3, H = 1024, W = 1024, P = 1;
int K = 64, FH = 3, FW = 3;
double I[C * H * W], F[K * C * FH * FW];
init_I(I, C, H, W);
init_F(F, K, C, FH, FW);
// Execute programs and output results
double c1_kernel_time = 0;
double c2_kernel_time = 0;
int runs = 5;
int i;
printf("C2");
for (i = 0; i < runs; i++)
{
c2_kernel_time += c2(C, I, H, W, P, F, K, FH, FW);
}
printf("\n\nC1");
for (i = 0; i < runs; i++)
{
c1_kernel_time += c1(C, I, H, W, P, F, K, FH, FW);
}
printf("\n\n<Time>: Conv %lf s. cuDNN %lf s.\n", c1_kernel_time / runs, c2_kernel_time / runs);
}
// Function to initialize I array
void init_I(double * I, int C, int H, int W)
{
int c, h, w;
for (c = 0; c < C; c++)
for (h = 0; h < H; h++)
for (w = 0; w < W; w++)
I[(c * H * W) + (h * W) + w] = c * (h + w);
}
// Function to initialize F array
void init_F(double * F, int K, int C, int H, int W)
{
int k, c, h, w;
for (k = 0; k < K; k++)
for (c = 0; c < C; c++)
for (h = 0; h < H; h++)
for (w = 0; w < W; w++)
F[(k * C * H * W) + (c * H * W) + (h * W) + w] = (c + k) * (h + w);
}
double c1(int C, double * I, int H, int W, int P, double * F, int K, int FH, int FW)
{
// Determine array sizes, declare device arrays and allocate device memory
size_t I_size = C * H * W * sizeof(double);
size_t F_size = K * C * FH * FW * sizeof(double);
size_t O_size = K * H * W * sizeof(double);
double O[O_size], * dev_I, * dev_F, * dev_O;
cudaMalloc(&dev_I, I_size);
cudaMalloc(&dev_F, F_size);
cudaMalloc(&dev_O, O_size);
struct timespec start, end;
clock_gettime(CLOCK_MONOTONIC, &start);
cudaMemcpy(dev_I, I, I_size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &end);
double to_dev_time = (end.tv_sec - start.tv_sec) + ((end.tv_nsec - start.tv_nsec) / NANO);
cudaMemcpy(dev_F, F, F_size, cudaMemcpyHostToDevice);
// Set device properties and call kernel
int block_size = 4;
dim3 dimGrid(ceil(H / block_size), ceil(W / block_size));
dim3 dimBlock(block_size, block_size, K);
size_t tile_size = C * (block_size + (2 * P)) * (block_size + (2 * P));
clock_gettime(CLOCK_MONOTONIC, &start);
convolve_tiles_with_shared_mem<<<dimGrid, dimBlock, tile_size>>>(C, dev_I, H, W, P, dev_F, K, FH, FW, dev_O);
cudaDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &end);
double kernel_time = (end.tv_sec - start.tv_sec) + ((end.tv_nsec - start.tv_nsec) / NANO);
// Copy output array to host, free device memory and output results
clock_gettime(CLOCK_MONOTONIC, &start);
cudaMemcpy(O, dev_O, O_size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &end);
double to_host_time = (end.tv_sec - start.tv_sec) + ((end.tv_nsec - start.tv_nsec) / NANO);
cudaFree(dev_I);
cudaFree(dev_F);
cudaFree(dev_O);
double I_checksum = get_checksum(I, C, H, W);
double O_checksum = get_checksum(O, K, H, W);
printf("\n\nI = checksum: %lf\nCopy host -> dev kernel: %lf s.\ntime kernel: %lf s.\nCopy dev -> host kernel: %lf s.\nCUDA O = checksum: %lf", I_checksum, to_dev_time, kernel_time, to_host_time, O_checksum);
return kernel_time;
}
double c2(int C, double * I, int H, int W, int P, double * F, int K, int FH, int FW)
{
// Determine array sizes, declare device arrays and workspace and allocate device memory
size_t I_size = C * H * W * sizeof(double);
size_t F_size = K * C * FH * FW * sizeof(double);
size_t O_size = K * H * W * sizeof(double);
double O[O_size], * dev_I, * dev_F, * dev_O;
void * workspace;
cudaMalloc(&dev_I, I_size);
cudaMalloc(&dev_F, F_size);
cudaMalloc(&dev_O, O_size);
struct timespec start, end;
clock_gettime(CLOCK_MONOTONIC, &start);
cudaMemcpy(dev_I, I, I_size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &end);
double to_dev_time = (end.tv_sec - start.tv_sec) + ((end.tv_nsec - start.tv_nsec) / NANO);
cudaMemcpy(dev_F, F, F_size, cudaMemcpyHostToDevice);
// Setup and execute CUDNN based convolution
cudnnHandle_t cudnn;
CUDNN_CALL(cudnnCreate(&cudnn));
cudnnTensorDescriptor_t in_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_DOUBLE, 1, C, H, W));
cudnnFilterDescriptor_t filter_desc;
CUDNN_CALL(cudnnCreateFilterDescriptor(&filter_desc));
CUDNN_CALL(cudnnSetFilter4dDescriptor(filter_desc, CUDNN_DATA_DOUBLE, CUDNN_TENSOR_NCHW, K, C, FH, FW));
cudnnTensorDescriptor_t out_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_DOUBLE, 1, K, H, W));
cudnnConvolutionDescriptor_t conv_desc;
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc));
CUDNN_CALL(cudnnSetConvolution2dDescriptor(conv_desc, P, P, 1, 1, 1, 1, CUDNN_CONVOLUTION, CUDNN_DATA_DOUBLE));
cudnnConvolutionFwdAlgo_t conv_algo;
CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm(cudnn, in_desc, filter_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv_algo));
size_t workspace_size = 0;
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(cudnn, in_desc, filter_desc, conv_desc, out_desc, conv_algo, &workspace_size));
cudaMallocManaged(&workspace, workspace_size);
double alpha = 1, beta = 0;
clock_gettime(CLOCK_MONOTONIC, &start);
CUDNN_CALL(cudnnConvolutionForward(cudnn, &alpha, in_desc, dev_I, filter_desc, dev_F, conv_desc, conv_algo, workspace, workspace_size, &beta, out_desc, dev_O));
cudaDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &end);
double kernel_time = (end.tv_sec - start.tv_sec) + ((end.tv_nsec - start.tv_nsec) / NANO);
// Copy output array to host, free device memory and output results
clock_gettime(CLOCK_MONOTONIC, &start);
cudaMemcpy(O, dev_O, O_size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &end);
double to_host_time = (end.tv_sec - start.tv_sec) + ((end.tv_nsec - start.tv_nsec) / NANO);
cudaFree(dev_I);
cudaFree(dev_F);
cudaFree(dev_O);
cudaFree(workspace);
cudnnDestroyTensorDescriptor(in_desc);
cudnnDestroyFilterDescriptor(filter_desc);
cudnnDestroyTensorDescriptor(out_desc);
cudnnDestroyConvolutionDescriptor(conv_desc);
cudnnDestroy(cudnn);
double I_checksum = get_checksum(I, C, H, W);
double O_checksum = get_checksum(O, K, H, W);
printf("\n\nI = checksum: %lf\nCopy host -> dev kernel: %lf s.\ntime cudnn: %lf s.\nCopy dev -> host kernel: %lf s.\nCUDA O = checksum: %lf", I_checksum, to_dev_time, kernel_time, to_host_time, O_checksum);
return kernel_time;
}
// Function to compute the sum of all elements of I and O arrays
double get_checksum(double * tensor, int C, int H, int W)
{
double checksum = 0;
int c, h, w;
for (c = 0; c < C; c++)
for (h = 0; h < H; h++)
for (w = 0; w < W; w++)
checksum += tensor[(c * H * W) + (h * W) + w];
return checksum;
}
// CUDA kernel to perform convolution using tiles and shared memory
__global__ void convolve_tiles_with_shared_mem(int C, double * I, int H, int W, int P, double * F, int K, int FH, int FW, double * O)
{
// Declare and populate tile array in shared memory
extern __shared__ double tile[];
int TH = blockDim.x + (2 * P);
int TW = blockDim.y + (2 * P);
int k = threadIdx.z;
int h = (blockIdx.x * blockDim.x) + threadIdx.x;
int w = (blockIdx.y * blockDim.y) + threadIdx.y;
if (k == 0 && h < H && w < W)
{
int th = threadIdx.x + P;
int tw = threadIdx.y + P;
int c;
for (c = 0; c < C; c++)
{
tile[(c * TH * TW) + (th * TW) + tw] = I[(c * H * W) + (h * W) + w];
int pad;
for (pad = 1; pad <= P; pad++)
{
// Fill top rows and corners
if (threadIdx.x == 0)
{
if (h > 0)
tile[(c * TH * TW) + ((th - pad) * TW) + tw] = I[(c * H * W) + ((h - pad) * W) + w];
else
tile[(c * TH * TW) + ((th - pad) * TW) + tw] = 0;
// Top left corner
if (threadIdx.y == 0)
{
int h_pad, w_pad;
for (h_pad = pad; h_pad > 0; h_pad--)
for (w_pad = pad; w_pad > 0; w_pad--)
{
if (h > 0 && w > 0)
tile[(c * TH * TW) + ((th - h_pad) * TW) + (tw - w_pad)] = I[(c * H * W) + ((h - h_pad) * W) + (w - w_pad)];
else
tile[(c * TH * TW) + ((th - h_pad) * TW) + (tw - w_pad)] = 0;
}
}
// Top right corner
if (threadIdx.y == (blockDim.y - 1))
{
int h_pad, w_pad;
for (h_pad = pad; h_pad > 0; h_pad--)
for (w_pad = pad; w_pad > 0; w_pad--)
{
if (h > 0 && w < (W - 1))
tile[(c * TH * TW) + ((th - h_pad) * TW) + (tw + w_pad)] = I[(c * H * W) + ((h - h_pad) * W) + (w + w_pad)];
else
tile[(c * TH * TW) + ((th - h_pad) * TW) + (tw + w_pad)] = 0;
}
}
}
// Fill bottom rows and corners
if (threadIdx.x == (blockDim.x - 1))
{
if (h < (H - 1))
tile[(c * TH * TW) + ((th + pad) * TW) + tw] = I[(c * H * W) + ((h + pad) * W) + w];
else
tile[(c * TH * TW) + ((th + pad) * TW) + tw] = 0;
// Bottom left corner
if (threadIdx.y == 0)
{
int h_pad, w_pad;
for (h_pad = pad; h_pad > 0; h_pad--)
for (w_pad = pad; w_pad > 0; w_pad--)
{
if (h < (H - 1) && w > 0)
tile[(c * TH * TW) + ((th + h_pad) * TW) + (tw - w_pad)] = I[(c * H * W) + ((h + h_pad) * W) + (w - w_pad)];
else
tile[(c * TH * TW) + ((th + h_pad) * TW) + (tw - w_pad)] = 0;
}
}
// Bottom right corner
if (threadIdx.y == (blockDim.y - 1))
{
int h_pad, w_pad;
for (h_pad = pad; h_pad > 0; h_pad--)
for (w_pad = pad; w_pad > 0; w_pad--)
{
if (h < (H - 1) && w < (W - 1))
tile[(c * TH * TW) + ((th + h_pad) * TW) + (tw + w_pad)] = I[(c * H * W) + ((h + h_pad) * W) + (w - w_pad)];
else
tile[(c * TH * TW) + ((th + h_pad) * TW) + (tw + w_pad)] = 0;
}
}
}
// Fill left columns
if (threadIdx.y == 0)
{
if (w > 0)
tile[(c * TH * TW) + (th * TW) + (tw - pad)] = I[(c * H * W) + (h * W) + (w - pad)];
else
tile[(c * TH * TW) + (th * TW) + (tw - pad)] = 0;
}
// Fill right columns
if (threadIdx.y == (blockDim.y - 1))
{
if (w < (W - 1))
tile[(c * TH * TW) + (th * TW) + (tw + pad)] = I[(c * H * W) + (h * W) + (w + pad)];
else
tile[(c * TH * TW) + (th * TW) + (tw + pad)] = 0;
}
}
}
}
__syncthreads();
// Perform convolution
convolve(C, tile, H, W, P, F, FH, FW, O);
}
// CUDA kernel to perform individual convolution computations
__device__ void convolve(int C, double * I, int H, int W, int P, double * F, int FH, int FW, double * O)
{
double val = 0;
int k = threadIdx.z;
int h = (blockIdx.x * blockDim.x) + threadIdx.x - P;
int w = (blockIdx.y * blockDim.y) + threadIdx.y - P;
int c, fh, fw, i, j;
for (c = 0; c < C; c++)
for (fh = 0; fh < FH; fh++)
{
i = h + fh;
for (fw = 0; fw < FW; fw++)
{
j = w + fw;
if (i < 0 || i >= H || j < 0 || j >= W)
continue;
val += I[(c * H * W) + (i * W) + j] * F[(k * C * FH * FW) + (c * FH * FW) + ((FH - 1 - fh) * FW) + (FW - 1 - fw)];
}
}
h += P;
w += P;
O[(k * H * W) + (h * W) + w] = val;
}
|
a9dec32500aa1a32e02dce564f41b5bb0faeacea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kExp(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = __expf(gData[i]);
} | a9dec32500aa1a32e02dce564f41b5bb0faeacea.cu | #include "includes.h"
__global__ void kExp(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = __expf(gData[i]);
} |
1207731c7216cb49d258c85079c82dbf00816524.hip | // !!! This is a file automatically generated by hipify!!!
// Corresponding header file: /include/mirror_ops.h
#include <hip/hip_runtime.h>
#include <stdio.h>
/* Mirror operations */
__global__
void mirror(const uchar4* const inputChannel, uchar4* outputChannel, int numRows, int numCols, bool vertical)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if ( col >= numCols || row >= numRows )
{
return;
}
if(!vertical)
{
int thread_x = blockDim.x * blockIdx.x + threadIdx.x;
int thread_y = blockDim.y * blockIdx.y + threadIdx.y;
int thread_x_new = thread_x;
int thread_y_new = numRows-thread_y;
int myId = thread_y * numCols + thread_x;
int myId_new = thread_y_new * numCols + thread_x_new;
outputChannel[myId_new] = inputChannel[myId];
}
else
{
int thread_x = blockDim.x * blockIdx.x + threadIdx.x;
int thread_y = blockDim.y * blockIdx.y + threadIdx.y;
int thread_x_new = numCols-thread_x;
int thread_y_new = thread_y;
int myId = thread_y * numCols + thread_x;
int myId_new = thread_y_new * numCols + thread_x_new;
outputChannel[myId_new] = inputChannel[myId]; // linear data store in global memory
}
}
uchar4* mirror_ops(uchar4 *d_inputImageRGBA, size_t numRows, size_t numCols, bool vertical)
{
//Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(4,4,1);
//Calculate Grid SIze
int a=numCols/blockSize.x, b=numRows/blockSize.y;
const dim3 gridSize(a+1,b+1,1);
const size_t numPixels = numRows * numCols;
uchar4 *d_outputImageRGBA;
hipMalloc(&d_outputImageRGBA, sizeof(uchar4) * numPixels);
//Call mirror kernel.
hipLaunchKernelGGL(( mirror), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, d_outputImageRGBA, numRows, numCols, vertical);
hipDeviceSynchronize();
//Initialize memory on host for output uchar4*
uchar4* h_out;
h_out = (uchar4*)malloc(sizeof(uchar4) * numPixels);
//copy output from device to host
hipMemcpy(h_out, d_outputImageRGBA, sizeof(uchar4) * numPixels, hipMemcpyDeviceToHost);
//cleanup memory on device
hipFree(d_inputImageRGBA);
hipFree(d_outputImageRGBA);
//return h_out
return h_out;
} | 1207731c7216cb49d258c85079c82dbf00816524.cu | // Corresponding header file: /include/mirror_ops.h
#include <cuda_runtime.h>
#include <stdio.h>
/* Mirror operations */
__global__
void mirror(const uchar4* const inputChannel, uchar4* outputChannel, int numRows, int numCols, bool vertical)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if ( col >= numCols || row >= numRows )
{
return;
}
if(!vertical)
{
int thread_x = blockDim.x * blockIdx.x + threadIdx.x;
int thread_y = blockDim.y * blockIdx.y + threadIdx.y;
int thread_x_new = thread_x;
int thread_y_new = numRows-thread_y;
int myId = thread_y * numCols + thread_x;
int myId_new = thread_y_new * numCols + thread_x_new;
outputChannel[myId_new] = inputChannel[myId];
}
else
{
int thread_x = blockDim.x * blockIdx.x + threadIdx.x;
int thread_y = blockDim.y * blockIdx.y + threadIdx.y;
int thread_x_new = numCols-thread_x;
int thread_y_new = thread_y;
int myId = thread_y * numCols + thread_x;
int myId_new = thread_y_new * numCols + thread_x_new;
outputChannel[myId_new] = inputChannel[myId]; // linear data store in global memory
}
}
uchar4* mirror_ops(uchar4 *d_inputImageRGBA, size_t numRows, size_t numCols, bool vertical)
{
//Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(4,4,1);
//Calculate Grid SIze
int a=numCols/blockSize.x, b=numRows/blockSize.y;
const dim3 gridSize(a+1,b+1,1);
const size_t numPixels = numRows * numCols;
uchar4 *d_outputImageRGBA;
cudaMalloc(&d_outputImageRGBA, sizeof(uchar4) * numPixels);
//Call mirror kernel.
mirror<<<gridSize, blockSize>>>(d_inputImageRGBA, d_outputImageRGBA, numRows, numCols, vertical);
cudaDeviceSynchronize();
//Initialize memory on host for output uchar4*
uchar4* h_out;
h_out = (uchar4*)malloc(sizeof(uchar4) * numPixels);
//copy output from device to host
cudaMemcpy(h_out, d_outputImageRGBA, sizeof(uchar4) * numPixels, cudaMemcpyDeviceToHost);
//cleanup memory on device
cudaFree(d_inputImageRGBA);
cudaFree(d_outputImageRGBA);
//return h_out
return h_out;
} |
f990c0f1f705726596aaf971e579201604053990.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <stdint.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "fortran_array_2d.cuh"
// Working precision
typedef double wp_t;
// <<<number_of_blocks, size_of_block>>>
struct GridConstants
{
FortranArray2D<wp_t, 1, 1>* e1t = nullptr;
FortranArray2D<wp_t, 1, 1>* e2t = nullptr;
FortranArray2D<wp_t, 0, 1>* e1u = nullptr;
FortranArray2D<wp_t, 0, 1>* e2u = nullptr;
FortranArray2D<wp_t, 0, 0>* e1f = nullptr;
FortranArray2D<wp_t, 0, 0>* e2f = nullptr;
FortranArray2D<wp_t, 1, 0>* e1v = nullptr;
FortranArray2D<wp_t, 1, 0>* e2v = nullptr;
FortranArray2D<wp_t, 1, 1>* e12t = nullptr;
FortranArray2D<wp_t, 0, 1>* e12u = nullptr;
FortranArray2D<wp_t, 1, 0>* e12v = nullptr;
FortranArray2D<wp_t, 0, 1>* gphiu = nullptr;
FortranArray2D<wp_t, 1, 0>* gphiv = nullptr;
FortranArray2D<wp_t, 0, 0>* gphif = nullptr;
FortranArray2D<wp_t, 1, 1>* xt = nullptr;
FortranArray2D<wp_t, 1, 1>* yt = nullptr;
FortranArray2D<wp_t, 1, 1>* ht = nullptr;
FortranArray2D<wp_t, 0, 1>* hu = nullptr;
FortranArray2D<wp_t, 1, 0>* hv = nullptr;
// -1 = Water cell outside computational domain
// 0 = Land cell
// 1 = Water cell inside computational domain
FortranArray2D<int, 0, 0>* pt = nullptr;
GridConstants() {}
};
struct SimulationVariables
{
// Sea surface height - current values.
FortranArray2D<wp_t, 1, 1>* sshn = nullptr;
FortranArray2D<wp_t, 0, 1>* sshn_u = nullptr;
FortranArray2D<wp_t, 1, 0>* sshn_v = nullptr;
// Sea surface height - next step's values
FortranArray2D<wp_t, 1, 1>* ssha = nullptr;
FortranArray2D<wp_t, 0, 1>* ssha_u = nullptr;
FortranArray2D<wp_t, 1, 0>* ssha_v = nullptr;
// Velocities - current values
FortranArray2D<wp_t, 0, 1>* un = nullptr;
FortranArray2D<wp_t, 1, 0>* vn = nullptr;
// Velocities - next step's values
FortranArray2D<wp_t, 0, 1>* ua = nullptr;
FortranArray2D<wp_t, 1, 0>* va = nullptr;
SimulationVariables() {}
};
struct ModelParameters
{
// Number of columns in modle grid
int jpi = 0;
// Number of rows in model grid
int jpj = 0;
// Grid size in x and y directions (m)
wp_t dx = 0;
wp_t dy = 0;
// Constant depth (m)
wp_t dep_const = 0.0;
// First time step
int nit000 = 0;
// Final time step
int nitend = 0;
// Interval on which to save results
int irecord = 0;
// Size of time step (s)
wp_t rdt = 0.0;
// Bottom friction coefficient
wp_t cbfr = 0.0;
// Horizontal kinematic viscosity coefficient
wp_t visc = 0.0;
};
__global__ void
k_initialise_grid(const FortranArray2D<wp_t, 1, 1>& sshn,
const FortranArray2D<wp_t, 0, 1>& sshn_u,
const FortranArray2D<wp_t, 1, 0>& sshn_v,
const FortranArray2D<wp_t, 1, 1>& e1t,
const FortranArray2D<wp_t, 1, 1>& e2t,
const FortranArray2D<wp_t, 0, 1>& e1u,
const FortranArray2D<wp_t, 0, 1>& e2u,
const FortranArray2D<wp_t, 0, 0>& e1f,
const FortranArray2D<wp_t, 0, 0>& e2f,
const FortranArray2D<wp_t, 1, 0>& e1v,
const FortranArray2D<wp_t, 1, 0>& e2v,
const FortranArray2D<wp_t, 1, 1>& e12t,
const FortranArray2D<wp_t, 0, 1>& e12u,
const FortranArray2D<wp_t, 1, 0>& e12v,
const FortranArray2D<wp_t, 0, 1>& gphiu,
const FortranArray2D<wp_t, 1, 0>& gphiv,
const FortranArray2D<wp_t, 0, 0>& gphif,
const FortranArray2D<wp_t, 1, 1>& xt,
const FortranArray2D<wp_t, 1, 1>& yt,
const FortranArray2D<wp_t, 1, 1>& ht,
const FortranArray2D<wp_t, 0, 1>& hu,
const FortranArray2D<wp_t, 1, 0>& hv,
const FortranArray2D<int, 0, 0>& pt,
const int jpi,
const int jpj,
const wp_t dx,
const wp_t dy,
const wp_t dep_const);
__global__ void
k_setup_model_params(const int jpi,
const int jpj,
const wp_t dx,
const wp_t dy,
const wp_t dep_const,
const int nit000,
const int nitend,
const int irecord,
const wp_t rdt,
const wp_t cbfr,
const wp_t visc);
__global__ void
k_continuity();
__global__ void
k_boundary_conditions();
__global__ void
k_momentum();
__global__ void
k_next();
void
finalise();
FortranArray2D<wp_t, 0, 1>* global_array = nullptr;
extern "C"
{
void cuda_setup_model_params_(int jpi,
int jpj,
wp_t dx,
wp_t dy,
wp_t dep_const,
int nit000,
int nitend,
int irecord,
wp_t rdt,
wp_t cbfr,
wp_t visc);
void cuda_initialise_grid_();
void cuda_continuity_() {hipLaunchKernelGGL(( k_continuity), dim3(1), dim3(10), 0, 0, ); }
void cuda_boundary_conditions_() {hipLaunchKernelGGL(( k_boundary_conditions), dim3(1), dim3(10), 0, 0, ); }
void cuda_momentum_()
{
hipLaunchKernelGGL(( k_momentum), dim3(1), dim3(10), 0, 0, );
hipDeviceSynchronize();
}
void cuda_next_() {hipLaunchKernelGGL(( k_next), dim3(1), dim3(10), 0, 0, ); }
void cuda_finalise_();
};
GridConstants grid_constants;
SimulationVariables simulation_vars;
ModelParameters model_params;
void
cuda_initialise_grid_()
{
const int jpi = model_params.jpi;
const int jpj = model_params.jpj;
if (jpi == 0 || jpj == 0) {
fprintf(stderr,
"Invalid grid size: (%d, %d); have you setup model params?",
jpi,
jpj);
}
printf(
"[CUDA](Host) Initialising grid constants and simluation variables.\n");
// Create and allocate the grid constants
grid_constants.e1t = new FortranArray2D<wp_t, 1, 1>(jpi, jpj);
grid_constants.e2t = new FortranArray2D<wp_t, 1, 1>(jpi, jpj);
grid_constants.e1u = new FortranArray2D<wp_t, 0, 1>(jpi, jpj);
grid_constants.e2u = new FortranArray2D<wp_t, 0, 1>(jpi, jpj);
grid_constants.e1f = new FortranArray2D<wp_t, 0, 0>(jpi, jpj);
grid_constants.e2f = new FortranArray2D<wp_t, 0, 0>(jpi, jpj);
grid_constants.e1v = new FortranArray2D<wp_t, 1, 0>(jpi, jpj);
grid_constants.e2v = new FortranArray2D<wp_t, 1, 0>(jpi, jpj);
grid_constants.e12t = new FortranArray2D<wp_t, 1, 1>(jpi, jpj);
grid_constants.e12u = new FortranArray2D<wp_t, 0, 1>(jpi, jpj);
grid_constants.e12v = new FortranArray2D<wp_t, 1, 0>(jpi, jpj);
grid_constants.gphiu = new FortranArray2D<wp_t, 0, 1>(jpi, jpj);
grid_constants.gphiv = new FortranArray2D<wp_t, 1, 0>(jpi, jpj);
grid_constants.gphif = new FortranArray2D<wp_t, 0, 0>(jpi, jpj);
grid_constants.xt = new FortranArray2D<wp_t, 1, 1>(jpi, jpj);
grid_constants.yt = new FortranArray2D<wp_t, 1, 1>(jpi, jpj);
grid_constants.ht = new FortranArray2D<wp_t, 1, 1>(jpi, jpj);
grid_constants.hu = new FortranArray2D<wp_t, 0, 1>(jpi, jpj);
grid_constants.hv = new FortranArray2D<wp_t, 1, 0>(jpi, jpj);
grid_constants.pt = new FortranArray2D<int, 0, 0>(jpi + 1, jpj + 1);
// Create and allocate simulation variables
simulation_vars.sshn = new FortranArray2D<wp_t, 1, 1>(jpi, jpj);
simulation_vars.sshn_u = new FortranArray2D<wp_t, 0, 1>(jpi, jpj);
simulation_vars.sshn_v = new FortranArray2D<wp_t, 1, 0>(jpi, jpj);
simulation_vars.ssha = new FortranArray2D<wp_t, 1, 1>(jpi, jpj);
simulation_vars.ssha_u = new FortranArray2D<wp_t, 0, 1>(jpi, jpj);
simulation_vars.ssha_v = new FortranArray2D<wp_t, 1, 0>(jpi, jpj);
simulation_vars.un = new FortranArray2D<wp_t, 0, 1>(jpi, jpj);
simulation_vars.vn = new FortranArray2D<wp_t, 1, 0>(jpi, jpj);
simulation_vars.ua = new FortranArray2D<wp_t, 0, 1>(jpi, jpj);
simulation_vars.va = new FortranArray2D<wp_t, 1, 0>(jpi, jpj);
// Initialise simulation parameters
hipLaunchKernelGGL(( k_initialise_grid), dim3(jpi + 2), dim3(jpj + 2), 0, 0, *simulation_vars.sshn,
*simulation_vars.sshn_u,
*simulation_vars.sshn_v,
*grid_constants.e1t,
*grid_constants.e2t,
*grid_constants.e1u,
*grid_constants.e2u,
*grid_constants.e1f,
*grid_constants.e2f,
*grid_constants.e1v,
*grid_constants.e2v,
*grid_constants.e12t,
*grid_constants.e12u,
*grid_constants.e12v,
*grid_constants.gphiu,
*grid_constants.gphiv,
*grid_constants.gphif,
*grid_constants.xt,
*grid_constants.yt,
*grid_constants.ht,
*grid_constants.hu,
*grid_constants.hv,
*grid_constants.pt,
jpi,
jpj,
model_params.dx,
model_params.dy,
model_params.dep_const);
hipDeviceSynchronize();
}
__global__ void
k_initialise_grid(const FortranArray2D<wp_t, 1, 1>& sshn,
const FortranArray2D<wp_t, 0, 1>& sshn_u,
const FortranArray2D<wp_t, 1, 0>& sshn_v,
const FortranArray2D<wp_t, 1, 1>& e1t,
const FortranArray2D<wp_t, 1, 1>& e2t,
const FortranArray2D<wp_t, 0, 1>& e1u,
const FortranArray2D<wp_t, 0, 1>& e2u,
const FortranArray2D<wp_t, 0, 0>& e1f,
const FortranArray2D<wp_t, 0, 0>& e2f,
const FortranArray2D<wp_t, 1, 0>& e1v,
const FortranArray2D<wp_t, 1, 0>& e2v,
const FortranArray2D<wp_t, 1, 1>& e12t,
const FortranArray2D<wp_t, 0, 1>& e12u,
const FortranArray2D<wp_t, 1, 0>& e12v,
const FortranArray2D<wp_t, 0, 1>& gphiu,
const FortranArray2D<wp_t, 1, 0>& gphiv,
const FortranArray2D<wp_t, 0, 0>& gphif,
const FortranArray2D<wp_t, 1, 1>& xt,
const FortranArray2D<wp_t, 1, 1>& yt,
const FortranArray2D<wp_t, 1, 1>& ht,
const FortranArray2D<wp_t, 0, 1>& hu,
const FortranArray2D<wp_t, 1, 0>& hv,
const FortranArray2D<int, 0, 0>& pt,
const int jpi,
const int jpj,
const wp_t dx,
const wp_t dy,
const wp_t dep_const)
{
int ji = threadIdx.x * blockIdx.x + blockDim.x;
int jj = threadIdx.y * blockIdx.y + blockDim.y;
// Setup the grid constants values.
// Define model solid/open boundaries via the properties of t-cells.
if (jj <= jpj + 1 && ji <= jpi + 1) {
// All inner cells
pt(ji, jj) = 1;
// West, East and North have solid boundaries
if (ji == 0 || ji == jpi + 1 || jj == jpj + 1) {
pt(ji, jj) = 0;
}
// South open boundary
if (jj == 0) {
pt(ji, jj) = -1;
}
}
if (ji <= jpi && jj <= jpj) {
// 1:N, 1:M
if (ji > 0 && jj > 0) {
e1t(ji, jj) = dx;
e2t(ji, jj) = dy;
e12t(ji, jj) = e1t(ji, jj) * e2t(ji, jj);
// NOTE: The NEMOLite2D Fortran code was designed to handle a dx that
// varies, indicating a non-linear physical grid size (different cells
// have different sizes). Here we assume that the dx and dy are fixed and
// not variant on the grid cell. This makes the calculation much easier
// and makes parallelising the below xt, yt initilisation possible.
xt(ji, jj) = e1t(ji, jj) * (static_cast<wp_t>(ji) - 0.5);
yt(ji, jj) = e2t(ji, jj) * (static_cast<wp_t>(jj) - 0.5);
ht(ji, jj) = dep_const;
}
// 0:N, 1:M
if (jj > 0) {
e1u(ji, jj) = dx;
e2u(ji, jj) = dy;
e12u(ji, jj) = e1u(ji, jj) * e2u(ji, jj);
gphiu(ji, jj) = 50.0;
hu(ji, jj) = dep_const;
}
// 1:N, 0:M
if (ji > 0) {
e1v(ji, jj) = dx;
e2v(ji, jj) = dy;
e12v(ji, jj) = e1v(ji, jj) * e2v(ji, jj);
gphiv(ji, jj) = 50.0;
hv(ji, jj) = dep_const;
}
// 0:N, 0:M
e1f(ji, jj) = dx;
e2f(ji, jj) = dy;
gphif(ji, jj) = 50.0;
}
// Setup the simulation variables initial values.
if (ji <= jpi && jj > 0 && jj <= jpj) {
int itmp1 = min(ji + 1, jpi);
int itmp2 = max(ji, 1);
wp_t rtmp1 =
e12t(itmp1, jj) * sshn(itmp1, jj) + e12t(itmp2, jj) * sshn(itmp2, jj);
sshn_u(ji, jj) = 0.5 * rtmp1 / e12u(ji, jj);
}
if (ji > 0 && ji <= jpi && jj <= jpj) {
int itmp1 = min(jj + 1, jpj);
int itmp2 = max(jj, 1);
wp_t rtmp1 =
e12t(ji, itmp1) * sshn(ji, itmp1) + e12t(ji, itmp2) * sshn(ji, itmp2);
sshn_v(ji, jj) = 0.5 * rtmp1 / e12v(ji, jj);
}
}
void
cuda_setup_model_params_(int jpi,
int jpj,
wp_t dx,
wp_t dy,
wp_t dep_const,
int nit000,
int nitend,
int irecord,
wp_t rdt,
wp_t cbfr,
wp_t visc)
{
printf("[CUDA](Host) Initialising model params.\n");
model_params = {
.jpi = jpi,
.jpj = jpj,
.dx = dx,
.dy = dy,
.dep_const = dep_const,
.nit000 = nit000,
.nitend = nitend,
.irecord = irecord,
.rdt = rdt,
.cbfr = cbfr,
.visc = visc,
};
}
__global__ void
k_continuity()
{
// TODO:
}
__global__ void
k_momentum()
{
// TODO:
}
__global__ void
k_boundary_conditions()
{
// TODO:
}
__global__ void
k_next()
{
// TODO:
}
void
cuda_finalise_()
{
// Clean up grid constants arrays.
delete grid_constants.e1t;
delete grid_constants.e2t;
delete grid_constants.e1u;
delete grid_constants.e2u;
delete grid_constants.e1f;
delete grid_constants.e2f;
delete grid_constants.e1v;
delete grid_constants.e2v;
delete grid_constants.e12t;
delete grid_constants.e12u;
delete grid_constants.e12v;
delete grid_constants.gphiu;
delete grid_constants.gphiv;
delete grid_constants.gphif;
delete grid_constants.xt;
delete grid_constants.yt;
delete grid_constants.ht;
delete grid_constants.hu;
delete grid_constants.hv;
delete grid_constants.pt;
// Clean up simulation params arrays.
delete simulation_vars.sshn;
delete simulation_vars.sshn_u;
delete simulation_vars.sshn_v;
delete simulation_vars.ssha;
delete simulation_vars.ssha_u;
delete simulation_vars.ssha_v;
delete simulation_vars.un;
delete simulation_vars.vn;
delete simulation_vars.ua;
delete simulation_vars.va;
hipError_t cudaStatus = hipDeviceReset();
assert(cudaStatus == hipSuccess);
}
| f990c0f1f705726596aaf971e579201604053990.cu | #include <cassert>
#include <stdint.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include "fortran_array_2d.cuh"
// Working precision
typedef double wp_t;
// <<<number_of_blocks, size_of_block>>>
struct GridConstants
{
FortranArray2D<wp_t, 1, 1>* e1t = nullptr;
FortranArray2D<wp_t, 1, 1>* e2t = nullptr;
FortranArray2D<wp_t, 0, 1>* e1u = nullptr;
FortranArray2D<wp_t, 0, 1>* e2u = nullptr;
FortranArray2D<wp_t, 0, 0>* e1f = nullptr;
FortranArray2D<wp_t, 0, 0>* e2f = nullptr;
FortranArray2D<wp_t, 1, 0>* e1v = nullptr;
FortranArray2D<wp_t, 1, 0>* e2v = nullptr;
FortranArray2D<wp_t, 1, 1>* e12t = nullptr;
FortranArray2D<wp_t, 0, 1>* e12u = nullptr;
FortranArray2D<wp_t, 1, 0>* e12v = nullptr;
FortranArray2D<wp_t, 0, 1>* gphiu = nullptr;
FortranArray2D<wp_t, 1, 0>* gphiv = nullptr;
FortranArray2D<wp_t, 0, 0>* gphif = nullptr;
FortranArray2D<wp_t, 1, 1>* xt = nullptr;
FortranArray2D<wp_t, 1, 1>* yt = nullptr;
FortranArray2D<wp_t, 1, 1>* ht = nullptr;
FortranArray2D<wp_t, 0, 1>* hu = nullptr;
FortranArray2D<wp_t, 1, 0>* hv = nullptr;
// -1 = Water cell outside computational domain
// 0 = Land cell
// 1 = Water cell inside computational domain
FortranArray2D<int, 0, 0>* pt = nullptr;
GridConstants() {}
};
struct SimulationVariables
{
// Sea surface height - current values.
FortranArray2D<wp_t, 1, 1>* sshn = nullptr;
FortranArray2D<wp_t, 0, 1>* sshn_u = nullptr;
FortranArray2D<wp_t, 1, 0>* sshn_v = nullptr;
// Sea surface height - next step's values
FortranArray2D<wp_t, 1, 1>* ssha = nullptr;
FortranArray2D<wp_t, 0, 1>* ssha_u = nullptr;
FortranArray2D<wp_t, 1, 0>* ssha_v = nullptr;
// Velocities - current values
FortranArray2D<wp_t, 0, 1>* un = nullptr;
FortranArray2D<wp_t, 1, 0>* vn = nullptr;
// Velocities - next step's values
FortranArray2D<wp_t, 0, 1>* ua = nullptr;
FortranArray2D<wp_t, 1, 0>* va = nullptr;
SimulationVariables() {}
};
struct ModelParameters
{
// Number of columns in modle grid
int jpi = 0;
// Number of rows in model grid
int jpj = 0;
// Grid size in x and y directions (m)
wp_t dx = 0;
wp_t dy = 0;
// Constant depth (m)
wp_t dep_const = 0.0;
// First time step
int nit000 = 0;
// Final time step
int nitend = 0;
// Interval on which to save results
int irecord = 0;
// Size of time step (s)
wp_t rdt = 0.0;
// Bottom friction coefficient
wp_t cbfr = 0.0;
// Horizontal kinematic viscosity coefficient
wp_t visc = 0.0;
};
__global__ void
k_initialise_grid(const FortranArray2D<wp_t, 1, 1>& sshn,
const FortranArray2D<wp_t, 0, 1>& sshn_u,
const FortranArray2D<wp_t, 1, 0>& sshn_v,
const FortranArray2D<wp_t, 1, 1>& e1t,
const FortranArray2D<wp_t, 1, 1>& e2t,
const FortranArray2D<wp_t, 0, 1>& e1u,
const FortranArray2D<wp_t, 0, 1>& e2u,
const FortranArray2D<wp_t, 0, 0>& e1f,
const FortranArray2D<wp_t, 0, 0>& e2f,
const FortranArray2D<wp_t, 1, 0>& e1v,
const FortranArray2D<wp_t, 1, 0>& e2v,
const FortranArray2D<wp_t, 1, 1>& e12t,
const FortranArray2D<wp_t, 0, 1>& e12u,
const FortranArray2D<wp_t, 1, 0>& e12v,
const FortranArray2D<wp_t, 0, 1>& gphiu,
const FortranArray2D<wp_t, 1, 0>& gphiv,
const FortranArray2D<wp_t, 0, 0>& gphif,
const FortranArray2D<wp_t, 1, 1>& xt,
const FortranArray2D<wp_t, 1, 1>& yt,
const FortranArray2D<wp_t, 1, 1>& ht,
const FortranArray2D<wp_t, 0, 1>& hu,
const FortranArray2D<wp_t, 1, 0>& hv,
const FortranArray2D<int, 0, 0>& pt,
const int jpi,
const int jpj,
const wp_t dx,
const wp_t dy,
const wp_t dep_const);
__global__ void
k_setup_model_params(const int jpi,
const int jpj,
const wp_t dx,
const wp_t dy,
const wp_t dep_const,
const int nit000,
const int nitend,
const int irecord,
const wp_t rdt,
const wp_t cbfr,
const wp_t visc);
__global__ void
k_continuity();
__global__ void
k_boundary_conditions();
__global__ void
k_momentum();
__global__ void
k_next();
void
finalise();
FortranArray2D<wp_t, 0, 1>* global_array = nullptr;
extern "C"
{
void cuda_setup_model_params_(int jpi,
int jpj,
wp_t dx,
wp_t dy,
wp_t dep_const,
int nit000,
int nitend,
int irecord,
wp_t rdt,
wp_t cbfr,
wp_t visc);
void cuda_initialise_grid_();
void cuda_continuity_() { k_continuity<<<1, 10>>>(); }
void cuda_boundary_conditions_() { k_boundary_conditions<<<1, 10>>>(); }
void cuda_momentum_()
{
k_momentum<<<1, 10>>>();
cudaDeviceSynchronize();
}
void cuda_next_() { k_next<<<1, 10>>>(); }
void cuda_finalise_();
};
GridConstants grid_constants;
SimulationVariables simulation_vars;
ModelParameters model_params;
void
cuda_initialise_grid_()
{
const int jpi = model_params.jpi;
const int jpj = model_params.jpj;
if (jpi == 0 || jpj == 0) {
fprintf(stderr,
"Invalid grid size: (%d, %d); have you setup model params?",
jpi,
jpj);
}
printf(
"[CUDA](Host) Initialising grid constants and simluation variables.\n");
// Create and allocate the grid constants
grid_constants.e1t = new FortranArray2D<wp_t, 1, 1>(jpi, jpj);
grid_constants.e2t = new FortranArray2D<wp_t, 1, 1>(jpi, jpj);
grid_constants.e1u = new FortranArray2D<wp_t, 0, 1>(jpi, jpj);
grid_constants.e2u = new FortranArray2D<wp_t, 0, 1>(jpi, jpj);
grid_constants.e1f = new FortranArray2D<wp_t, 0, 0>(jpi, jpj);
grid_constants.e2f = new FortranArray2D<wp_t, 0, 0>(jpi, jpj);
grid_constants.e1v = new FortranArray2D<wp_t, 1, 0>(jpi, jpj);
grid_constants.e2v = new FortranArray2D<wp_t, 1, 0>(jpi, jpj);
grid_constants.e12t = new FortranArray2D<wp_t, 1, 1>(jpi, jpj);
grid_constants.e12u = new FortranArray2D<wp_t, 0, 1>(jpi, jpj);
grid_constants.e12v = new FortranArray2D<wp_t, 1, 0>(jpi, jpj);
grid_constants.gphiu = new FortranArray2D<wp_t, 0, 1>(jpi, jpj);
grid_constants.gphiv = new FortranArray2D<wp_t, 1, 0>(jpi, jpj);
grid_constants.gphif = new FortranArray2D<wp_t, 0, 0>(jpi, jpj);
grid_constants.xt = new FortranArray2D<wp_t, 1, 1>(jpi, jpj);
grid_constants.yt = new FortranArray2D<wp_t, 1, 1>(jpi, jpj);
grid_constants.ht = new FortranArray2D<wp_t, 1, 1>(jpi, jpj);
grid_constants.hu = new FortranArray2D<wp_t, 0, 1>(jpi, jpj);
grid_constants.hv = new FortranArray2D<wp_t, 1, 0>(jpi, jpj);
grid_constants.pt = new FortranArray2D<int, 0, 0>(jpi + 1, jpj + 1);
// Create and allocate simulation variables
simulation_vars.sshn = new FortranArray2D<wp_t, 1, 1>(jpi, jpj);
simulation_vars.sshn_u = new FortranArray2D<wp_t, 0, 1>(jpi, jpj);
simulation_vars.sshn_v = new FortranArray2D<wp_t, 1, 0>(jpi, jpj);
simulation_vars.ssha = new FortranArray2D<wp_t, 1, 1>(jpi, jpj);
simulation_vars.ssha_u = new FortranArray2D<wp_t, 0, 1>(jpi, jpj);
simulation_vars.ssha_v = new FortranArray2D<wp_t, 1, 0>(jpi, jpj);
simulation_vars.un = new FortranArray2D<wp_t, 0, 1>(jpi, jpj);
simulation_vars.vn = new FortranArray2D<wp_t, 1, 0>(jpi, jpj);
simulation_vars.ua = new FortranArray2D<wp_t, 0, 1>(jpi, jpj);
simulation_vars.va = new FortranArray2D<wp_t, 1, 0>(jpi, jpj);
// Initialise simulation parameters
k_initialise_grid<<<jpi + 2, jpj + 2>>>(*simulation_vars.sshn,
*simulation_vars.sshn_u,
*simulation_vars.sshn_v,
*grid_constants.e1t,
*grid_constants.e2t,
*grid_constants.e1u,
*grid_constants.e2u,
*grid_constants.e1f,
*grid_constants.e2f,
*grid_constants.e1v,
*grid_constants.e2v,
*grid_constants.e12t,
*grid_constants.e12u,
*grid_constants.e12v,
*grid_constants.gphiu,
*grid_constants.gphiv,
*grid_constants.gphif,
*grid_constants.xt,
*grid_constants.yt,
*grid_constants.ht,
*grid_constants.hu,
*grid_constants.hv,
*grid_constants.pt,
jpi,
jpj,
model_params.dx,
model_params.dy,
model_params.dep_const);
cudaDeviceSynchronize();
}
__global__ void
k_initialise_grid(const FortranArray2D<wp_t, 1, 1>& sshn,
const FortranArray2D<wp_t, 0, 1>& sshn_u,
const FortranArray2D<wp_t, 1, 0>& sshn_v,
const FortranArray2D<wp_t, 1, 1>& e1t,
const FortranArray2D<wp_t, 1, 1>& e2t,
const FortranArray2D<wp_t, 0, 1>& e1u,
const FortranArray2D<wp_t, 0, 1>& e2u,
const FortranArray2D<wp_t, 0, 0>& e1f,
const FortranArray2D<wp_t, 0, 0>& e2f,
const FortranArray2D<wp_t, 1, 0>& e1v,
const FortranArray2D<wp_t, 1, 0>& e2v,
const FortranArray2D<wp_t, 1, 1>& e12t,
const FortranArray2D<wp_t, 0, 1>& e12u,
const FortranArray2D<wp_t, 1, 0>& e12v,
const FortranArray2D<wp_t, 0, 1>& gphiu,
const FortranArray2D<wp_t, 1, 0>& gphiv,
const FortranArray2D<wp_t, 0, 0>& gphif,
const FortranArray2D<wp_t, 1, 1>& xt,
const FortranArray2D<wp_t, 1, 1>& yt,
const FortranArray2D<wp_t, 1, 1>& ht,
const FortranArray2D<wp_t, 0, 1>& hu,
const FortranArray2D<wp_t, 1, 0>& hv,
const FortranArray2D<int, 0, 0>& pt,
const int jpi,
const int jpj,
const wp_t dx,
const wp_t dy,
const wp_t dep_const)
{
int ji = threadIdx.x * blockIdx.x + blockDim.x;
int jj = threadIdx.y * blockIdx.y + blockDim.y;
// Setup the grid constants values.
// Define model solid/open boundaries via the properties of t-cells.
if (jj <= jpj + 1 && ji <= jpi + 1) {
// All inner cells
pt(ji, jj) = 1;
// West, East and North have solid boundaries
if (ji == 0 || ji == jpi + 1 || jj == jpj + 1) {
pt(ji, jj) = 0;
}
// South open boundary
if (jj == 0) {
pt(ji, jj) = -1;
}
}
if (ji <= jpi && jj <= jpj) {
// 1:N, 1:M
if (ji > 0 && jj > 0) {
e1t(ji, jj) = dx;
e2t(ji, jj) = dy;
e12t(ji, jj) = e1t(ji, jj) * e2t(ji, jj);
// NOTE: The NEMOLite2D Fortran code was designed to handle a dx that
// varies, indicating a non-linear physical grid size (different cells
// have different sizes). Here we assume that the dx and dy are fixed and
// not variant on the grid cell. This makes the calculation much easier
// and makes parallelising the below xt, yt initilisation possible.
xt(ji, jj) = e1t(ji, jj) * (static_cast<wp_t>(ji) - 0.5);
yt(ji, jj) = e2t(ji, jj) * (static_cast<wp_t>(jj) - 0.5);
ht(ji, jj) = dep_const;
}
// 0:N, 1:M
if (jj > 0) {
e1u(ji, jj) = dx;
e2u(ji, jj) = dy;
e12u(ji, jj) = e1u(ji, jj) * e2u(ji, jj);
gphiu(ji, jj) = 50.0;
hu(ji, jj) = dep_const;
}
// 1:N, 0:M
if (ji > 0) {
e1v(ji, jj) = dx;
e2v(ji, jj) = dy;
e12v(ji, jj) = e1v(ji, jj) * e2v(ji, jj);
gphiv(ji, jj) = 50.0;
hv(ji, jj) = dep_const;
}
// 0:N, 0:M
e1f(ji, jj) = dx;
e2f(ji, jj) = dy;
gphif(ji, jj) = 50.0;
}
// Setup the simulation variables initial values.
if (ji <= jpi && jj > 0 && jj <= jpj) {
int itmp1 = min(ji + 1, jpi);
int itmp2 = max(ji, 1);
wp_t rtmp1 =
e12t(itmp1, jj) * sshn(itmp1, jj) + e12t(itmp2, jj) * sshn(itmp2, jj);
sshn_u(ji, jj) = 0.5 * rtmp1 / e12u(ji, jj);
}
if (ji > 0 && ji <= jpi && jj <= jpj) {
int itmp1 = min(jj + 1, jpj);
int itmp2 = max(jj, 1);
wp_t rtmp1 =
e12t(ji, itmp1) * sshn(ji, itmp1) + e12t(ji, itmp2) * sshn(ji, itmp2);
sshn_v(ji, jj) = 0.5 * rtmp1 / e12v(ji, jj);
}
}
void
cuda_setup_model_params_(int jpi,
int jpj,
wp_t dx,
wp_t dy,
wp_t dep_const,
int nit000,
int nitend,
int irecord,
wp_t rdt,
wp_t cbfr,
wp_t visc)
{
printf("[CUDA](Host) Initialising model params.\n");
model_params = {
.jpi = jpi,
.jpj = jpj,
.dx = dx,
.dy = dy,
.dep_const = dep_const,
.nit000 = nit000,
.nitend = nitend,
.irecord = irecord,
.rdt = rdt,
.cbfr = cbfr,
.visc = visc,
};
}
__global__ void
k_continuity()
{
// TODO:
}
__global__ void
k_momentum()
{
// TODO:
}
__global__ void
k_boundary_conditions()
{
// TODO:
}
__global__ void
k_next()
{
// TODO:
}
void
cuda_finalise_()
{
// Clean up grid constants arrays.
delete grid_constants.e1t;
delete grid_constants.e2t;
delete grid_constants.e1u;
delete grid_constants.e2u;
delete grid_constants.e1f;
delete grid_constants.e2f;
delete grid_constants.e1v;
delete grid_constants.e2v;
delete grid_constants.e12t;
delete grid_constants.e12u;
delete grid_constants.e12v;
delete grid_constants.gphiu;
delete grid_constants.gphiv;
delete grid_constants.gphif;
delete grid_constants.xt;
delete grid_constants.yt;
delete grid_constants.ht;
delete grid_constants.hu;
delete grid_constants.hv;
delete grid_constants.pt;
// Clean up simulation params arrays.
delete simulation_vars.sshn;
delete simulation_vars.sshn_u;
delete simulation_vars.sshn_v;
delete simulation_vars.ssha;
delete simulation_vars.ssha_u;
delete simulation_vars.ssha_v;
delete simulation_vars.un;
delete simulation_vars.vn;
delete simulation_vars.ua;
delete simulation_vars.va;
cudaError_t cudaStatus = cudaDeviceReset();
assert(cudaStatus == cudaSuccess);
}
|
1c2414172fef0503729872bf1222c70b7b1c043e.hip | // !!! This is a file automatically generated by hipify!!!
/************************************************************************************
* Author: Tao Rui
* : V1.0 Linux
* :
*
************************************************************************************/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#include <string.h>
#include "unistd.h"
#include "global_variables.cpp"
#include <unistd.h> //linux
//#include <direct.h> //windows
__global__ void dev_matrix(float *A, int xdim, int ydim, int zdim)
{
int i = blockIdx.x;
int j = blockIdx.y;
int k = threadIdx.x;
int idx = i * ydim * zdim + j * zdim + k;
if(A[idx] <=-1e-12 || A[idx] >=1e-12)
{
printf("[%d][%d][%d] = %e\n",i, j, k, A[idx]);
}
}
void print_dev_matrix(float *A, int xdim, int ydim, int zdim)
{
dim3 gridma(xdim, ydim);
dim3 blockma(zdim);
hipError_t cudaStatus = hipSuccess;
hipLaunchKernelGGL(( dev_matrix), dim3(gridma), dim3(blockma), 0, 0, A, xdim, ydim, zdim);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
{
printf("dev_print Failed: %s\n", hipGetErrorString(cudaStatus));
}
hipDeviceSynchronize();
}
/************************************************************************************
* GPU
************************************************************************************/
dim3 gridUHyz(npml, nx - 1);
dim3 blockUHyz(nz);
__global__ void gpu_UHyz(float *UHyz, float *RBHyz, float *RAHyz, float *Ez)
{
/*
in0 UHyz nx+1 ny nz
in1 RBHyz nx-1 2*npml nz
in2 RAHyz nx-1 2*npml nz
in3 Ez nx+1 ny+1 nz
UHyz = UHyz * RBHyz + RAHyz * (Ez - Ez) / dy
nx-1 * npml * nz
UHyz5
y
UHyz(2:nx, [1:npml ny-npml+1:ny], :)=RBHyz .* UHyz(2:nx, [1:npml ny-npml+1:ny], :)...
+RAHyz ./ dy .* (Ez(2:nx, [2:npml+1 ny-npml+2:ny+1], :) - Ez(2:nx, [1:npml ny-npml+1:ny], :));
*/
int ix = blockIdx.y; // ix in [0, nx - 1)
int iy = blockIdx.x; // iy in [0, npml)
int iz = threadIdx.x; // iz in [0, nz)
int lid0 = (ix + 1)*ny*nz + iy * nz + iz; // checked!
int rid0 = (ix + 1)*ny*nz + (iy + ny - npml) * nz + iz; //checked!
int lid1 = ix * (2 * npml)*nz + iy * nz + iz; // checked!
int rid1 = ix * (2 * npml)*nz + (iy + npml) * nz + iz; // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid3 = (ix + 1)*(ny + 1)*nz + (iy + 1)*nz + iz; // checked!
int rid3 = (ix + 1)*(ny + 1)*nz + (iy + ny - npml + 1)*nz + iz; // checked!
int lid4 = (ix + 1)*(ny + 1)*nz + iy * nz + iz; // checked!
int rid4 = (ix + 1)*(ny + 1)*nz + (iy + ny - npml)*nz + iz; // checked!
UHyz[lid0] = UHyz[lid0] * RBHyz[lid1] + RAHyz[lid2] * (Ez[lid3] - Ez[lid4]) / dy;
UHyz[rid0] = UHyz[rid0] * RBHyz[rid1] + RAHyz[rid2] * (Ez[rid3] - Ez[rid4]) / dy;
}
dim3 gridUHzy(nx - 1, ny);
dim3 blockUHzy(npml);
__global__ void gpu_UHzy(float *UHzy, float *RBHzy, float *RAHzy, float *Ey)
{
/*
in0 UHzy --size-- nx+1 ny nz
in1 RBHzy --size-- nx-1 ny 2*npml
in2 RAHzy --size-- nx-1 ny 2*npml
in3 Ey --size-- nx+1 ny nz+1
UHyz = UHyz * RBHyz + RAHyz * (Ez - Ez) / dy
nx-1 * ny * (5 *npml)
UHyz5
z
UHzy(2:nx, :, [1:npml nz-npml+1:nz])=RBHzy.*UHzy(2:nx, :, [1:npml nz-npml+1:nz])
+RAHzy./dz.*(Ey(2:nx, :, [2:npml+1 nz-npml+2:nz+1])-Ey(2:nx, :, [1:npml nz-npml+1:nz]));
*/
int ix = blockIdx.x; // ix in [0, nx - 1)
int iy = blockIdx.y; // iy in [0, ny)
int iz = threadIdx.x; // ix in [0, npml)
int lid0 = (ix + 1) * ny * nz + iy * nz + iz; //checked!
int rid0 = (ix + 1) * ny * nz + iy * nz + iz + nz - npml; //checked!
int lid1 = ix * ny * (2 * npml) + iy * (2 * npml) + iz; //checked!
int rid1 = ix * ny * (2 * npml) + iy * (2 * npml) + iz + npml; //checked!
int lid2 = lid1;
int rid2 = rid1;
int lid4 = (ix + 1) * ny * (nz + 1) + iy * (nz + 1) + iz; //checked!
int rid4 = (ix + 1) * ny * (nz + 1) + iy * (nz + 1) + iz + nz - npml; //checked!
int lid3 = lid4 + 1;
int rid3 = rid4 + 1;
UHzy[lid0] = UHzy[lid0] * RBHzy[lid1] + RAHzy[lid2] * (Ey[lid3] - Ey[lid4]) / dz;
UHzy[rid0] = UHzy[rid0] * RBHzy[rid1] + RAHzy[rid2] * (Ey[rid3] - Ey[rid4]) / dz;
}
dim3 gridUHzx(nx, ny - 1);
dim3 blockUHzx(npml);
__global__ void gpu_UHzx(float *UHzx, float *RBHzx, float *RAHzx, float *Ex)
{
/*
in0 UHzx --size-- nx ny + 1 nz
in1 RBHzx --size-- nx ny - 1 2 * npml
in2 RAHzx --size-- nx ny - 1 2 * npml
in3 Ex --size-- nx ny + 1 nz + 1
UHzx = UHzx * RBHzx + RAHzx * (Ez - Ez) / dy
nx * ny - 1 * npml
UHzx5
z 1:npml -npml:0
UHzx(:, 2:ny, [1:npml nz - npml + 1:nz])=RBHzx. * UHzx(:, 2:ny, [1:npml nz - npml + 1:nz])
+RAHzx./dz.*(Ex(:, 2:ny, [2:npml + 1 nz - npml + 2:nz + 1]) - Ex(:, 2:ny, [1:npml nz - npml + 1:nz]));
*/
int ix = blockIdx.x; // ix in [0, nx)
int iy = blockIdx.y; // iy in [0, ny - 1)
int iz = threadIdx.x; // iz in [0, npml)
int lid0 = ix * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid0 = ix * (ny + 1) * nz + (iy + 1) * nz + iz + nz - npml; // checked!
int lid1 = ix * (ny - 1) * (2 * npml) + iy * (2 * npml) + iz; // checked!
int rid1 = ix * (ny - 1) * (2 * npml) + iy * (2 * npml) + iz + npml; // checked!
int lid2 = lid1;
int rid2 = rid1;
int lid4 = ix * (ny + 1) * (nz + 1) + (iy + 1) * (nz + 1) + iz; // checked!
int rid4 = ix * (ny + 1) * (nz + 1) + (iy + 1) * (nz + 1) + iz + nz - npml; // checked!
int lid3 = lid4 + 1;
int rid3 = rid4 + 1;
UHzx[lid0] = UHzx[lid0] * RBHzx[lid1] + RAHzx[lid2] * (Ex[lid3] - Ex[lid4]) / dz;
UHzx[rid0] = UHzx[rid0] * RBHzx[rid1] + RAHzx[rid2] * (Ex[rid3] - Ex[rid4]) / dz;
}
dim3 gridUHxz(npml, ny - 1);
dim3 blockUHxz(nz);
__global__ void gpu_UHxz(float *UHxz, float *RBHxz, float *RAHxz, float *Ez)
{
/*
in0 UHxz --size-- nx ny + 1 nz
in1 RBHxz --size-- 2*npml ny - 1 nz
in2 RAHxz --size-- 2*npml ny - 1 nz
in3 Ez --size-- nx + 1 ny + 1 nz
UHxz = UHxz * RBHxz + RAHxz * (Ez - Ez) / dx
npml * ny - 1 * nz
UHxz5
x 1:npml -npml:0
UHxz([1:npml nx-npml+1:nx], 2:ny, :)=RBHxz.*UHxz([1:npml nx-npml+1:nx], 2:ny, :)...
+RAHxz./dx.*(Ez([2:npml+1 nx-npml+2:nx+1], 2:ny, :)-Ez([1:npml nx-npml+1:nx], 2:ny, :));
*/
int ix = blockIdx.x; // ix in [0, npml)
int iy = blockIdx.y; // iy in [0, ny - 1)
int iz = threadIdx.x; // iz in [0, nz)
int lid0 = ix * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid0 = (ix + nx - npml) * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int lid1 = ix * (ny - 1) * nz + iy * nz + iz; // checked!
int rid1 = (ix + npml) * (ny - 1) * nz + iy * nz + iz; // checked!
int lid2 = lid1;
int rid2 = rid1;
int lid4 = ix * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid4 = (ix + nx - npml) * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int lid3 = lid4 + (ny + 1) * nz;
int rid3 = rid4 + (ny + 1) * nz;
UHxz[lid0] = UHxz[lid0] * RBHxz[lid1] + RAHxz[lid2] * (Ez[lid3] - Ez[lid4]) / dx;
UHxz[rid0] = UHxz[rid0] * RBHxz[rid1] + RAHxz[rid2] * (Ez[rid3] - Ez[rid4]) / dx;
}
dim3 gridUHxy(npml, ny);
dim3 blockUHxy(nz - 1);
__global__ void gpu_UHxy(float *UHxy, float *RBHxy, float *RAHxy, float *Ey)
{
/*
in0 UHxy --size-- nx ny nz + 1
in1 RBHxy --size-- 2*npml ny nz - 1
in2 RAHxy --size-- 2*npml ny nz - 1
in3 EY --size-- nx + 1 ny nz + 1
UHxy = UHxy * RBHxy + RAHxy * (Ez - Ez) / dx
npml * ny * nz - 1
UHxy5
x 1:npml -npml:0
UHxy([1:npml nx-npml+1:nx], :, 2:nz)=RBHxy.*UHxy([1:npml nx-npml+1:nx], :, 2:nz)...
+RAHxy./dx.*(Ey([2:npml+1 nx-npml+2:nx+1], :, 2:nz)-Ey([1:npml nx-npml+1:nx], :, 2:nz));
*/
int ix = blockIdx.x; // ix in [0, npml)
int iy = blockIdx.y; // iy in [0, ny)
int iz = threadIdx.x; // iz in [0, nz - 1)
int lid0 = ix * ny * (nz + 1) + iy * (nz + 1) + iz + 1; // checked!
int rid0 = (ix + nx - npml) * ny * (nz + 1) + iy * (nz + 1) + iz + 1; //checked
int lid1 = ix * ny * (nz - 1) + iy * (nz - 1) + iz; // checked!
int rid1 = (ix + npml) * ny * (nz - 1) + iy * (nz - 1) + iz; // checked!
int lid2 = lid1;
int rid2 = rid1;
int lid4 = ix * ny * (nz + 1) + iy * (nz + 1) + iz + 1; // checked!
int rid4 = (ix + nx - npml) * ny * (nz + 1) + iy * (nz + 1) + iz + 1; // checked!
int lid3 = lid4 + ny * (nz + 1);
int rid3 = rid4 + ny * (nz + 1);
UHxy[lid0] = UHxy[lid0] * RBHxy[lid1] + RAHxy[lid2] * (Ey[lid3] - Ey[lid4]) / dx;
UHxy[rid0] = UHxy[rid0] * RBHxy[rid1] + RAHxy[rid2] * (Ey[rid3] - Ey[rid4]) / dx;
}
dim3 gridUHyx(npml, nx);
dim3 blockUHyx(nz - 1);
__global__ void gpu_UHyx(float *UHyx, float *RBHyx, float *RAHyx, float *Ex)
{
/*
in0 UHyx nx ny nz + 1
in1 RBHyx nx 2*npml nz - 1
in2 RAHyx nx 2*npml nz - 1
in3 Ex nx ny + 1 nz + 1
UHyx = UHyx * RBHyx + RAHyx * (Ex - Ex) / dy
nx * npml * nz - 1
UHyx5
y
UHyx(:, [1:npml ny-npml+1:ny], 2:nz)=RBHyx.*UHyx(:, [1:npml ny-npml+1:ny], 2:nz)...
+RAHyx./dy.*(Ex(:, [2:npml+1 ny-npml+2:ny+1], 2:nz)-Ex(:, [1:npml ny-npml+1:ny], 2:nz));
*/
int ix = blockIdx.y; // ix in [0, nx)
int iy = blockIdx.x; // iy in [0, npml)
int iz = threadIdx.x; // iz in [0, nz - 1)
int lid0 = ix * ny * (nz + 1) + iy * (nz + 1) + iz + 1; // checked!
int rid0 = ix * ny * (nz + 1) + (iy + ny - npml) * (nz + 1) + iz + 1; //checked!
int lid1 = ix * (2 * npml) * (nz - 1) + iy * (nz - 1) + iz; // checked!
int rid1 = ix * (2 * npml) * (nz - 1) + (iy + npml) * (nz - 1) + iz; // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = ix * (ny + 1) * (nz + 1) + iy * (nz + 1) + iz + 1; // checked!
int rid4 = ix * (ny + 1) * (nz + 1) + (iy + ny - npml) * (nz + 1) + iz + 1; // checked!
int lid3 = lid4 + (nz + 1); // checked!
int rid3 = rid4 + (nz + 1); // checked!
UHyx[lid0] = UHyx[lid0] * RBHyx[lid1] + RAHyx[lid2] * (Ex[lid3] - Ex[lid4]) / dy;
UHyx[rid0] = UHyx[rid0] * RBHyx[rid1] + RAHyx[rid2] * (Ex[rid3] - Ex[rid4]) / dy;
}
dim3 gridHx(nx - 1, ny);
dim3 blockHx(nz);
__global__ void gpu_Hx(float *Hx, float *CPHx, float *CQHx, float *ky_Hx, float *kz_Hx, float *Ez, float *Ey, float *UHyz, float *UHzy)
{
//
// * nx - 1 * ny * nz
// * Hx(2:nx,:,:)
//
int ix = blockIdx.x + 1;
int iy = blockIdx.y;
int iz = threadIdx.x;
int idx = ix * ny * nz + iy * nz + iz;
int idxEz = ix * (ny + 1)*nz + iy * nz + iz;
int idxEy = ix * ny * (nz + 1) + iy * (nz + 1) + iz;
int deltaEz = nz;
int deltaEy = 1;
float CQH = CQHx[idx];
Hx[idx] = Hx[idx] * CPHx[idx]
- CQH / ky_Hx[idx] * (Ez[idxEz + deltaEz] - Ez[idxEz]) / dy
+ CQH / kz_Hx[idx] * (Ey[idxEy + deltaEy] - Ey[idxEy]) / dz
- CQH * UHyz[idx]
+ CQH * UHzy[idx];
}
dim3 gridHy(nx, ny - 1);
dim3 blockHy(nz);
__global__ void gpu_Hy(float *Hy, float *CPHy, float *CQHy, float *kz_Hy, float *kx_Hy, float *Ex, float *Ez, float *UHzx, float *UHxz)
{
//
// * nx * ny -1 * nz
// * Hy(:,2:ny,:)
//
int ix = blockIdx.x;
int iy = blockIdx.y + 1;
int iz = threadIdx.x;
int idx = ix * (ny + 1)*nz + iy * nz + iz;
int idxEx = ix * (ny + 1)*(nz + 1) + iy * (nz + 1) + iz;
int idxEz = ix * (ny + 1)*nz + iy * nz + iz;
int deltaEx = 1;
int deltaEz = (ny + 1)*nz;
float CQH = CQHy[idx];
Hy[idx] = Hy[idx] * CPHy[idx]
- CQH / kz_Hy[idx] * (Ex[idxEx + deltaEx] - Ex[idxEx]) / dz
+ CQH / kx_Hy[idx] * (Ez[idxEz + deltaEz] - Ez[idxEz]) / dx
- CQH * UHzx[idx]
+ CQH * UHxz[idx];
}
dim3 gridHz(nx, ny);
dim3 blockHz(nz - 1);
__global__ void gpu_Hz(float *Hz, float *CPHz, float *CQHz, float *kx_Hz, float *ky_Hz, float *Ey, float *Ex, float *UHxy, float *UHyx)
{
//
// * nx * ny * nz -1
// * Hz(:,;,2:nz)
// * Hznx ny nz+1
//
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x + 1;
int idx = ix * ny * (nz + 1) + iy * (nz + 1) + iz;
int idxEy = ix * ny * (nz + 1) + iy * (nz + 1) + iz;
int idxEx = ix * (ny + 1) * (nz + 1) + iy * (nz + 1) + iz;
int deltaEy = ny * (nz + 1);
int deltaEx = nz + 1;
float CQH = CQHz[idx];
Hz[idx] = Hz[idx] * CPHz[idx]
- CQH / kx_Hz[idx] * (Ey[idxEy + deltaEy] - Ey[idxEy]) / dx
+ CQH / ky_Hz[idx] * (Ex[idxEx + deltaEx] - Ex[idxEx]) / dy
- CQH * UHxy[idx]
+ CQH * UHyx[idx];
}
dim3 gridUEyz(npml - 1, nx);
dim3 blockUEyz(nz - 1);
__global__ void gpu_UEyz(float *UEyz, float *RBEyz, float *RAEyz, float *Hz)
{
/*
dim3 blockUEyz(nz - 1);
dim3 gridUEyz(npml - 1, nx);
in0 UEyz nx ny + 1 nz + 1
in1 RBEyz nx 2*(npml-1) nz - 1
in2 RAEyz nx 2*(npml-1) nz - 1
in3 Hz nx ny nz + 1
nx * npml - 1 * nz - 1
UEyz(:, [2:npml ny-npml+2:ny], 2:nz)=RBEyz .* UEyz(:, [2:npml ny-npml+2:ny], 2:nz)...
+RAEyz ./ dy .* (Hz(:, [2:npml ny-npml+2:ny], 2:nz) - Hz(:, [1:npml-1 ny-npml+1:ny-1], 2:nz));
*/
int ix = blockIdx.y; // ix in [0, nx)
int iy = blockIdx.x; // iy in [0, npml - 1)
int iz = threadIdx.x; // iz in [0, nz - 1)
int lid0 = ix * (ny + 1) * (nz + 1) + (iy + 1) * (nz + 1) + (iz + 1); // checked!
int rid0 = ix * (ny + 1) * (nz + 1) + (iy + 1 + ny - npml) * (nz + 1) + (iz + 1); //checked!
int lid1 = ix * (2 * (npml - 1)) * (nz - 1) + iy * (nz - 1) + iz; // checked!
int rid1 = ix * (2 * (npml - 1)) * (nz - 1) + (iy + npml - 1) * (nz - 1) + iz; // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = ix * ny * (nz + 1) + iy * (nz + 1) + (iz + 1); // checked!
int rid4 = ix * ny * (nz + 1) + (iy + ny - npml) * (nz + 1) + (iz + 1); // checked!
int lid3 = lid4 + (nz + 1); // checked!
int rid3 = rid4 + (nz + 1); // checked!
UEyz[lid0] = UEyz[lid0] * RBEyz[lid1] + RAEyz[lid2] * (Hz[lid3] - Hz[lid4]) / dy;
UEyz[rid0] = UEyz[rid0] * RBEyz[rid1] + RAEyz[rid2] * (Hz[rid3] - Hz[rid4]) / dy;
}
dim3 gridUEyx(npml - 1, nx);
dim3 blockUEyx(nz - 1);
__global__ void gpu_UEyx(float *UEyx, float *RBEyx, float *RAEyx, float *Hx)
{
/*
dim3 blockUEyx(nz - 1);
dim3 gridUEyx(npml - 1, nx);
in0 UEyx nx + 1 ny + 1 nz
in1 RBEyx nx - 1 2*(npml-1) nz
in2 RAEyx nx - 1 2*(npml-1) nz
in3 Hx nx + 1 ny nz
nx * npml-1 * nz-1
UEyx(2:nx, [2:npml ny-npml+2:ny], :)=RBEyx .* UEyx(2:nx, [2:npml ny-npml+2:ny], :)...
+RAEyx ./ dy .* (Hx(2:nx, [2:npml ny-npml+2:ny], :) - Hx(2:nx, [1:npml-1 ny-npml+1:ny-1], :));
*/
int ix = blockIdx.y; // ix in [0, nx)
int iy = blockIdx.x; // iy in [0, npml - 1)
int iz = threadIdx.x; // iz in [0, nz - 1)
int lid0 = (ix + 1) * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid0 = (ix + 1) * (ny + 1) * nz + (iy + 1 + ny - npml) * nz + iz; //checked!
int lid1 = ix * (2 * (npml - 1)) * nz + iy * nz + iz; // checked!
int rid1 = ix * (2 * (npml - 1)) * nz + (iy + npml - 1) * nz + iz; // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = (ix + 1) * ny * nz + iy * nz + iz; // checked!
int rid4 = (ix + 1) * ny * nz + (iy + ny - npml) * nz + iz; // checked!
int lid3 = lid4 + nz; // checked!
int rid3 = rid4 + nz; // checked!
UEyx[lid0] = UEyx[lid0] * RBEyx[lid1] + RAEyx[lid2] * (Hx[lid3] - Hx[lid4]) / dy;
UEyx[rid0] = UEyx[rid0] * RBEyx[rid1] + RAEyx[rid2] * (Hx[rid3] - Hx[rid4]) / dy;
}
dim3 gridUExy(npml - 1, ny - 1);
dim3 blockUExy(nz);
__global__ void gpu_UExy(float *UExy, float *RBExy, float *RAExy, float *Hy)
{
/*
dim3 blockUExy(nz);
dim3 gridUExy(npml - 1, ny - 1);
in0 UExy nx + 1 ny + 1 nz
in1 RBExy 2*(npml-1) ny - 1 nz
in2 RAExy 2*(npml-1) ny - 1 nz
in3 Hy nx ny + 1 nz
npml-1 * ny-1 * nz
UExy([2:npml nx-npml+2:nx], 2:ny, :)=RBExy .* UExy([2:npml nx-npml+2:nx], 2:ny, :)...
+RAExy ./ dx .* (Hy([2:npml nx-npml+2:nx], 2:ny, :) - Hy([1:npml-1 nx-npml+1:nx-1], 2:ny, :));
*/
int ix = blockIdx.x; // ix in [0, npml - 1)
int iy = blockIdx.y; // iy in [0, ny - 1)
int iz = threadIdx.x; // iz in [0, nz)
int lid0 = (ix + 1) * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid0 = (ix + 1 + nx - npml) * (ny + 1) * nz + (iy + 1) * nz + iz; //checked!
int lid1 = ix * (ny - 1) * nz + iy * nz + iz; // checked!
int rid1 = (ix + npml - 1) * (ny - 1) * nz + iy * nz + iz; // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = ix * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid4 = (ix + nx - npml) * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int lid3 = lid4 + (ny + 1) * nz; // checked!
int rid3 = rid4 + (ny + 1) * nz; // checked!
UExy[lid0] = UExy[lid0] * RBExy[lid1] + RAExy[lid2] * (Hy[lid3] - Hy[lid4]) / dx;
UExy[rid0] = UExy[rid0] * RBExy[rid1] + RAExy[rid2] * (Hy[rid3] - Hy[rid4]) / dx;
}
dim3 gridUExz(npml - 1, ny);
dim3 blockUExz(nz - 1);
__global__ void gpu_UExz(float *UExz, float *RBExz, float *RAExz, float *Hz)
{
/*
dim3 blockUExz(nz - 1);
dim3 gridUExz(npml - 1, ny);
in0 UExz nx + 1 ny nz + 1
in1 RBExz 2*(npml-1) ny nz - 1
in2 RAExz 2*(npml-1) ny nz - 1
in3 Hz nx ny nz + 1
npml-1 * ny * nz-1
UExz([2:npml nx-npml+2:nx], :, 2:nz)=RBExz .* UExz([2:npml nx-npml+2:nx], :, 2:nz)...
+RAExz ./ dx .* (Hz([2:npml nx-npml+2:nx], :, 2:nz) - Hz([1:npml-1 nx-npml+1:nx-1], :, 2:nz));
*/
int ix = blockIdx.x; // ix in [0, npml - 1)
int iy = blockIdx.y; // iy in [0, ny)
int iz = threadIdx.x; // iz in [0, nz - 1)
int lid0 = (ix + 1) * ny * (nz + 1) + iy * (nz + 1) + (iz + 1); // checked!
int rid0 = (ix + 1 + nx - npml) * ny * (nz + 1) + iy * (nz + 1) + (iz + 1); //checked!
int lid1 = ix * ny * (nz - 1) + iy * (nz - 1) + iz; // checked!
int rid1 = (ix + npml - 1) * ny * (nz - 1) + iy * (nz - 1) + iz; // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = ix * ny * (nz + 1) + iy * (nz + 1) + (iz + 1); // checked!
int rid4 = (ix + nx - npml) * ny * (nz + 1) + iy * (nz + 1) + (iz + 1); // checked!
int lid3 = lid4 + ny * (nz + 1); // checked!
int rid3 = rid4 + ny * (nz + 1); // checked!
UExz[lid0] = UExz[lid0] * RBExz[lid1] + RAExz[lid2] * (Hz[lid3] - Hz[lid4]) / dx;
UExz[rid0] = UExz[rid0] * RBExz[rid1] + RAExz[rid2] * (Hz[rid3] - Hz[rid4]) / dx;
}
dim3 gridUEzx(nx - 1, ny);
dim3 blockUEzx(npml - 1);
__global__ void gpu_UEzx(float *UEzx, float *RBEzx, float *RAEzx, float *Hx)
{
/*
dim3 blockUEzx(npml - 1);
dim3 gridUEzx(nx - 1, ny);
in0 UEzx nx + 1 ny nz + 1
in1 RBEzx nx - 1 ny 2*(npml-1)
in2 RAEzx nx - 1 ny 2*(npml-1)
in3 Hx nx + 1 ny nz
nx-1 * ny * npml-1
UEzx(2:nx, :, [2:npml nz-npml+2:nz])=RBEzx .* UEzx(2:nx, :, [2:npml nz-npml+2:nz])...
+RAEzx ./ dz .* (Hx(2:nx, :, [2:npml nz-npml+2:nz]) - Hx(2:nx, :, [1:npml-1 nz-npml+1:nz-1]));
*/
int ix = blockIdx.x; // ix in [0, nx)
int iy = blockIdx.y; // iy in [0, npml - 1)
int iz = threadIdx.x; // iz in [0, nz)
int lid0 = (ix + 1) * ny * (nz + 1) + iy * (nz + 1) + (iz + 1); // checked!
int rid0 = (ix + 1) * ny * (nz + 1) + iy * (nz + 1) + (iz + 1 + nz - npml); //checked!
int lid1 = ix * ny * (2 * (npml - 1)) + iy * (2 * (npml - 1)) + iz; // checked!
int rid1 = ix * ny * (2 * (npml - 1)) + iy * (2 * (npml - 1)) + (iz + npml - 1); // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = (ix + 1) * ny * nz + iy * nz + iz; // checked!
int rid4 = (ix + 1) * ny * nz + iy * nz + (iz + nz - npml); // checked!
int lid3 = lid4 + 1; // checked!
int rid3 = rid4 + 1; // checked!
UEzx[lid0] = UEzx[lid0] * RBEzx[lid1] + RAEzx[lid2] * (Hx[lid3] - Hx[lid4]) / dz;
UEzx[rid0] = UEzx[rid0] * RBEzx[rid1] + RAEzx[rid2] * (Hx[rid3] - Hx[rid4]) / dz;
}
dim3 gridUEzy(nx, ny - 1);
dim3 blockUEzy(npml - 1);
__global__ void gpu_UEzy(float *UEzy, float *RBEzy, float *RAEzy, float *Hy)
{
/*
dim3 blockUEzy(npml - 1);
dim3 gridUEzy(nx, ny - 1);
in0 UEzy nx ny + 1 nz + 1
in1 RBEzy nx ny - 1 2*(npml-1)
in2 RAEzy nx ny - 1 2*(npml-1)
in3 Hy nx ny + 1 nz
nx * ny - 1 * npml-1
UEzy(:, 2:ny, [2:npml nz-npml+2:nz])=RBEzy.*UEzy(:, 2:ny, [2:npml nz-npml+2:nz])...
+RAEzy./dz.*(Hy(:, 2:ny, [2:npml nz-npml+2:nz])-Hy(:, 2:ny, [1:npml-1 nz-npml+1:nz-1]));
*/
int ix = blockIdx.x; // ix in [0, nx)
int iy = blockIdx.y; // iy in [0, npml - 1)
int iz = threadIdx.x; // iz in [0, nz)
int lid0 = ix * (ny + 1) * (nz + 1) + (iy + 1) * (nz + 1) + (iz + 1); // checked!
int rid0 = ix * (ny + 1) * (nz + 1) + (iy + 1) * (nz + 1) + (iz + 1 + nz - npml); //checked!
int lid1 = ix * (ny - 1) * (2 * (npml - 1)) + iy * (2 * (npml - 1)) + iz; // checked!
int rid1 = ix * (ny - 1) * (2 * (npml - 1)) + iy * (2 * (npml - 1)) + (iz + npml - 1); // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = ix * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid4 = ix * (ny + 1) * nz + (iy + 1) * nz + (iz + nz - npml); // checked!
int lid3 = lid4 + 1; // checked!
int rid3 = rid4 + 1; // checked!
UEzy[lid0] = UEzy[lid0] * RBEzy[lid1] + RAEzy[lid2] * (Hy[lid3] - Hy[lid4]) / dz;
UEzy[rid0] = UEzy[rid0] * RBEzy[rid1] + RAEzy[rid2] * (Hy[rid3] - Hy[rid4]) / dz;
}
dim3 gridEx(nx, ny - 1);
dim3 blockEx(nz - 1);
__global__ void gpu_Ex(float *Ex, float *CAEx, float *CBEx, float *ky_Ex, float *kz_Ex, float *Hz, float *Hy, float *UEyz, float *UEzy)
{
//
// * dim3 blockEx(nz-1);
// * dim3 gridEx(nx, ny-1);
// * nx * ny-1 * nz-1
// * Ex(:, 2:ny, 2:nz)
//
int ix = blockIdx.x; // ix in [0, nx)
int iy = blockIdx.y + 1; // iy in [1, ny)
int iz = threadIdx.x + 1; // iz in [1, nz)
int idx = ix * (ny + 1) * (nz + 1) + iy * (nz + 1) + iz;
int idxHz = ix * ny * (nz + 1) + iy * (nz + 1) + iz;
int idxHy = ix * (ny + 1)*nz + iy * nz + iz;
int deltaHz = nz + 1;
int deltaHy = 1;
float CBE = CBEx[idx];
Ex[idx] = Ex[idx] * CAEx[idx]
+ CBE / ky_Ex[idx] * (Hz[idxHz] - Hz[idxHz - deltaHz]) / dy
- CBE / kz_Ex[idx] * (Hy[idxHy] - Hy[idxHy - deltaHy]) / dz
+ CBE * UEyz[idx]
- CBE * UEzy[idx];
}
dim3 gridEy(nx - 1, ny);
dim3 blockEy(nz - 1);
__global__ void gpu_Ey(float *Ey, float *CAEy, float *CBEy, float *kz_Ey, float *kx_Ey, float *Hx, float *Hz, float *UEzx, float *UExz)
{
//
// * dim3 blockEy(nz-1);
// * dim3 gridEy(nx-1, ny);
// * nx-1 * ny * nz-1
// * Ey(2:nx, :, 2:nz)
//
int ix = blockIdx.x + 1; // ix in [1, nx)
int iy = blockIdx.y; // iy in [0, ny)
int iz = threadIdx.x + 1; // iz in [1, nz)
int idx = ix * ny * (nz + 1) + iy * (nz + 1) + iz;
int idxHx = ix * ny * nz + iy * nz + iz;
int idxHz = ix * ny * (nz + 1) + iy * (nz + 1) + iz;
int deltaHx = 1;
int deltaHz = ny * (nz + 1);
float CBE = CBEy[idx];
Ey[idx] = Ey[idx] * CAEy[idx]
+ CBE / kz_Ey[idx] * (Hx[idxHx] - Hx[idxHx - deltaHx]) / dz
- CBE / kx_Ey[idx] * (Hz[idxHz] - Hz[idxHz - deltaHz]) / dx
+ CBE * UEzx[idx]
- CBE * UExz[idx];
}
dim3 gridEz(nx - 1, ny - 1);
dim3 blockEz(nz);
__global__ void gpu_Ez(float *Ez, float *CAEz, float *CBEz, float *kx_Ez, float *ky_Ez, float *Hy, float *Hx, float *UExy, float *UEyx)
{
//
// * dim3 blockEz(nz);
// * dim3 gridEz(nx-1, ny-1);
// * nx-1 * ny-1 * nz
// * Ez(2:nx, 2:ny, :)
// * Eznx ny nz+1
//
int ix = blockIdx.x + 1; // ix in [1, nx)
int iy = blockIdx.y + 1; // iy in [1, ny)
int iz = threadIdx.x; // iz in [0, nz)
int idx = ix * (ny + 1) * nz + iy * nz + iz;
int idxHy = ix * (ny + 1) * nz + iy * nz + iz;
int idxHx = ix * ny * nz + iy * nz + iz;
int deltaHy = (ny + 1) * nz;
int deltaHx = nz;
float CBE = CBEz[idx];
Ez[idx] = Ez[idx] * CAEz[idx]
+ CBE / kx_Ez[idx] * (Hy[idxHy] - Hy[idxHy - deltaHy]) / dx
- CBE / ky_Ez[idx] * (Hx[idxHx] - Hx[idxHx - deltaHx]) / dy
+ CBE * UExy[idx]
- CBE * UEyx[idx];
}
dim3 grid_zheng_1(npmlc, ny - 2 * npml);
dim3 grid_zheng_2(nx - 2 * npml, npmlc);
dim3 grid_zheng_3(nx - 2 * npml, ny - 2 * npml);
dim3 grid_zheng_last(nx - 2 * npml, ny - 2 * npml);
dim3 block_zheng_1(nz - 2 * npml);
dim3 block_zheng_2(nz - 2 * npml);
dim3 block_zheng_3(npmlc);
dim3 block_zheng_last(nz - 2 * npml);
__global__ void gpu_zheng_1(
float *dev_Ex_zheng, float *dev_Ey_zheng, float *dev_Ez_zheng,
float *dev_Hx_zheng, float *dev_Hy_zheng, float *dev_Hz_zheng,
float *dev_Ex, float *dev_Ey, float *dev_Ez,
float *dev_Hx, float *dev_Hy, float *dev_Hz,
int j)
{
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x;
int lidzheng; //**_zheng_*
int ridzheng; //**_zheng_*
int lidEx, lidEy, lidEz, lidHx, lidHy, lidHz;
int ridEx, ridEy, ridEz, ridHx, ridHy, ridHz;
lidzheng =
j * (2 * npmlc) * (ny - 2 * npml) * (nz - 2 * npml) +
ix * (ny - 2 * npml) * (nz - 2 * npml) +
iy * (nz - 2 * npml) +
iz;
lidEx =
(ix + npml) * (ny + 1) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEy =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEz =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHx =
(ix + npml) * (ny + 0) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHy =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHz =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
ridzheng = lidzheng + (ny - 2 * npml) * (nz - 2 * npml) * (npmlc);
ridEx = lidEx + (ny + 1) * (nz + 1) * (nx - 2 * npml - npmlc);
ridEy = lidEy + (ny + 0) * (nz + 1) * (nx - 2 * npml - npmlc);
ridEz = lidEz + (ny + 1) * (nz + 0) * (nx - 2 * npml - npmlc);
ridHx = lidHx + (ny + 0) * (nz + 0) * (nx - 2 * npml - npmlc);
ridHy = lidHy + (ny + 1) * (nz + 0) * (nx - 2 * npml - npmlc);
ridHz = lidHz + (ny + 0) * (nz + 1) * (nx - 2 * npml - npmlc);
dev_Ex_zheng[lidzheng] = dev_Ex[lidEx];
dev_Ey_zheng[lidzheng] = dev_Ey[lidEy];
dev_Ez_zheng[lidzheng] = dev_Ez[lidEz];
dev_Hx_zheng[lidzheng] = dev_Hx[lidHx];
dev_Hy_zheng[lidzheng] = dev_Hy[lidHy];
dev_Hz_zheng[lidzheng] = dev_Hz[lidHz];
dev_Ex_zheng[ridzheng] = dev_Ex[ridEx];
dev_Ey_zheng[ridzheng] = dev_Ey[ridEy];
dev_Ez_zheng[ridzheng] = dev_Ez[ridEz];
dev_Hx_zheng[ridzheng] = dev_Hx[ridHx];
dev_Hy_zheng[ridzheng] = dev_Hy[ridHy];
dev_Hz_zheng[ridzheng] = dev_Hz[ridHz];
}
__global__ void gpu_zheng_2(
float *dev_Ex_zheng, float *dev_Ey_zheng, float *dev_Ez_zheng,
float *dev_Hx_zheng, float *dev_Hy_zheng, float *dev_Hz_zheng,
float *dev_Ex, float *dev_Ey, float *dev_Ez,
float *dev_Hx, float *dev_Hy, float *dev_Hz,
int j)
{
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x;
int lidzheng; //**_zheng_*
int ridzheng; //**_zheng_*
int lidEx, lidEy, lidEz, lidHx, lidHy, lidHz;
int ridEx, ridEy, ridEz, ridHx, ridHy, ridHz;
lidzheng =
j * (nx - 2 * npml) * (2 * npmlc) * (nz - 2 * npml) +
ix * (2 * npmlc) * (nz - 2 * npml) +
iy * (nz - 2 * npml) +
iz;
lidEx =
(ix + npml) * (ny + 1) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEy =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEz =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHx =
(ix + npml) * (ny + 0) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHy =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHz =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
ridzheng = lidzheng + (nz - 2 * npml) * (npmlc);
ridEx = lidEx + (nz + 1) * (ny - 2 * npml - npmlc);
ridEy = lidEy + (nz + 1) * (ny - 2 * npml - npmlc);
ridEz = lidEz + (nz + 0) * (ny - 2 * npml - npmlc);
ridHx = lidHx + (nz + 0) * (ny - 2 * npml - npmlc);
ridHy = lidHy + (nz + 0) * (ny - 2 * npml - npmlc);
ridHz = lidHz + (nz + 1) * (ny - 2 * npml - npmlc);
dev_Ex_zheng[lidzheng] = dev_Ex[lidEx];
dev_Ey_zheng[lidzheng] = dev_Ey[lidEy];
dev_Ez_zheng[lidzheng] = dev_Ez[lidEz];
dev_Hx_zheng[lidzheng] = dev_Hx[lidHx];
dev_Hy_zheng[lidzheng] = dev_Hy[lidHy];
dev_Hz_zheng[lidzheng] = dev_Hz[lidHz];
dev_Ex_zheng[ridzheng] = dev_Ex[ridEx];
dev_Ey_zheng[ridzheng] = dev_Ey[ridEy];
dev_Ez_zheng[ridzheng] = dev_Ez[ridEz];
dev_Hx_zheng[ridzheng] = dev_Hx[ridHx];
dev_Hy_zheng[ridzheng] = dev_Hy[ridHy];
dev_Hz_zheng[ridzheng] = dev_Hz[ridHz];
}
__global__ void gpu_zheng_3(
float *dev_Ex_zheng, float *dev_Ey_zheng, float *dev_Ez_zheng,
float *dev_Hx_zheng, float *dev_Hy_zheng, float *dev_Hz_zheng,
float *dev_Ex, float *dev_Ey, float *dev_Ez,
float *dev_Hx, float *dev_Hy, float *dev_Hz,
int j)
{
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x;
int lidzheng; //**_zheng_*
int ridzheng; //**_zheng_*
int lidEx, lidEy, lidEz, lidHx, lidHy, lidHz;
int ridEx, ridEy, ridEz, ridHx, ridHy, ridHz;
lidzheng =
j * (nx - 2 * npml) * (ny - 2 * npml) * (2 * npmlc) +
ix * (ny - 2 * npml) * (2 * npmlc) +
iy * (2 * npmlc) +
iz;
lidEx =
(ix + npml) * (ny + 1) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEy =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEz =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHx =
(ix + npml) * (ny + 0) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHy =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHz =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
ridzheng = lidzheng + (npmlc);
ridEx = lidEx + (nz - 2 * npml - npmlc);
ridEy = lidEy + (nz - 2 * npml - npmlc);
ridEz = lidEz + (nz - 2 * npml - npmlc);
ridHx = lidHx + (nz - 2 * npml - npmlc);
ridHy = lidHy + (nz - 2 * npml - npmlc);
ridHz = lidHz + (nz - 2 * npml - npmlc);
dev_Ex_zheng[lidzheng] = dev_Ex[lidEx];
dev_Ey_zheng[lidzheng] = dev_Ey[lidEy];
dev_Ez_zheng[lidzheng] = dev_Ez[lidEz];
dev_Hx_zheng[lidzheng] = dev_Hx[lidHx];
dev_Hy_zheng[lidzheng] = dev_Hy[lidHy];
dev_Hz_zheng[lidzheng] = dev_Hz[lidHz];
dev_Ex_zheng[ridzheng] = dev_Ex[ridEx];
dev_Ey_zheng[ridzheng] = dev_Ey[ridEy];
dev_Ez_zheng[ridzheng] = dev_Ez[ridEz];
dev_Hx_zheng[ridzheng] = dev_Hx[ridHx];
dev_Hy_zheng[ridzheng] = dev_Hy[ridHy];
dev_Hz_zheng[ridzheng] = dev_Hz[ridHz];
}
__global__ void gpu_zheng_last(
float *dev_Ex_zheng, float *dev_Ey_zheng, float *dev_Ez_zheng,
float *dev_Hx_zheng, float *dev_Hy_zheng, float *dev_Hz_zheng,
float *dev_Ex, float *dev_Ey, float *dev_Ez,
float *dev_Hx, float *dev_Hy, float *dev_Hz)
{
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x;
int lidzheng; //**_zheng_*
int lidEx, lidEy, lidEz, lidHx, lidHy, lidHz;
lidzheng =
ix * (ny - 2 * npml) * (nz - 2 * npml) +
iy * (nz - 2 * npml) +
iz;
lidEx =
(ix + npml) * (ny + 1) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEy =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEz =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHx =
(ix + npml) * (ny + 0) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHy =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHz =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
dev_Ex_zheng[lidzheng] = dev_Ex[lidEx];
dev_Ey_zheng[lidzheng] = dev_Ey[lidEy];
dev_Ez_zheng[lidzheng] = dev_Ez[lidEz];
dev_Hx_zheng[lidzheng] = dev_Hx[lidHx];
dev_Hy_zheng[lidzheng] = dev_Hy[lidHy];
dev_Hz_zheng[lidzheng] = dev_Hz[lidHz];
}
__global__ void gpu_back_zheng_1(
float *dev_Ex_zheng, float *dev_Ey_zheng, float *dev_Ez_zheng,
float *dev_Hx_zheng, float *dev_Hy_zheng, float *dev_Hz_zheng,
float *dev_Ex, float *dev_Ey, float *dev_Ez,
float *dev_Hx, float *dev_Hy, float *dev_Hz,
int j)
{
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x;
int lidzheng; //**_zheng_*
int ridzheng; //**_zheng_*
int lidEx, lidEy, lidEz, lidHx, lidHy, lidHz;
int ridEx, ridEy, ridEz, ridHx, ridHy, ridHz;
lidzheng =
j * (2 * npmlc) * (ny - 2 * npml) * (nz - 2 * npml) +
ix * (ny - 2 * npml) * (nz - 2 * npml) +
iy * (nz - 2 * npml) +
iz;
lidEx =
(ix + npml) * (ny + 1) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEy =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEz =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHx =
(ix + npml) * (ny + 0) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHy =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHz =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
ridzheng = lidzheng + (ny - 2 * npml) * (nz - 2 * npml) * (npmlc);
ridEx = lidEx + (ny + 1) * (nz + 1) * (nx - 2 * npml - npmlc);
ridEy = lidEy + (ny + 0) * (nz + 1) * (nx - 2 * npml - npmlc);
ridEz = lidEz + (ny + 1) * (nz + 0) * (nx - 2 * npml - npmlc);
ridHx = lidHx + (ny + 0) * (nz + 0) * (nx - 2 * npml - npmlc);
ridHy = lidHy + (ny + 1) * (nz + 0) * (nx - 2 * npml - npmlc);
ridHz = lidHz + (ny + 0) * (nz + 1) * (nx - 2 * npml - npmlc);
dev_Ex[lidEx] = dev_Ex_zheng[lidzheng];
dev_Ey[lidEy] = dev_Ey_zheng[lidzheng];
dev_Ez[lidEz] = dev_Ez_zheng[lidzheng];
dev_Hx[lidHx] = dev_Hx_zheng[lidzheng];
dev_Hy[lidHy] = dev_Hy_zheng[lidzheng];
dev_Hz[lidHz] = dev_Hz_zheng[lidzheng];
dev_Ex[ridEx] = dev_Ex_zheng[ridzheng];
dev_Ey[ridEy] = dev_Ey_zheng[ridzheng];
dev_Ez[ridEz] = dev_Ez_zheng[ridzheng];
dev_Hx[ridHx] = dev_Hx_zheng[ridzheng];
dev_Hy[ridHy] = dev_Hy_zheng[ridzheng];
dev_Hz[ridHz] = dev_Hz_zheng[ridzheng];
}
__global__ void gpu_back_zheng_2(
float *dev_Ex_zheng, float *dev_Ey_zheng, float *dev_Ez_zheng,
float *dev_Hx_zheng, float *dev_Hy_zheng, float *dev_Hz_zheng,
float *dev_Ex, float *dev_Ey, float *dev_Ez,
float *dev_Hx, float *dev_Hy, float *dev_Hz,
int j)
{
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x;
int lidzheng; //**_zheng_*
int ridzheng; //**_zheng_*
int lidEx, lidEy, lidEz, lidHx, lidHy, lidHz;
int ridEx, ridEy, ridEz, ridHx, ridHy, ridHz;
lidzheng =
j * (nx - 2 * npml) * (2 * npmlc) * (nz - 2 * npml) +
ix * (2 * npmlc) * (nz - 2 * npml) +
iy * (nz - 2 * npml) +
iz;
lidEx =
(ix + npml) * (ny + 1) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEy =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEz =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHx =
(ix + npml) * (ny + 0) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHy =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHz =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
ridzheng = lidzheng + (nz - 2 * npml) * (npmlc);
ridEx = lidEx + (nz + 1) * (ny - 2 * npml - npmlc);
ridEy = lidEy + (nz + 1) * (ny - 2 * npml - npmlc);
ridEz = lidEz + (nz + 0) * (ny - 2 * npml - npmlc);
ridHx = lidHx + (nz + 0) * (ny - 2 * npml - npmlc);
ridHy = lidHy + (nz + 0) * (ny - 2 * npml - npmlc);
ridHz = lidHz + (nz + 1) * (ny - 2 * npml - npmlc);
dev_Ex[lidEx] = dev_Ex_zheng[lidzheng];
dev_Ey[lidEy] = dev_Ey_zheng[lidzheng];
dev_Ez[lidEz] = dev_Ez_zheng[lidzheng];
dev_Hx[lidHx] = dev_Hx_zheng[lidzheng];
dev_Hy[lidHy] = dev_Hy_zheng[lidzheng];
dev_Hz[lidHz] = dev_Hz_zheng[lidzheng];
dev_Ex[ridEx] = dev_Ex_zheng[ridzheng];
dev_Ey[ridEy] = dev_Ey_zheng[ridzheng];
dev_Ez[ridEz] = dev_Ez_zheng[ridzheng];
dev_Hx[ridHx] = dev_Hx_zheng[ridzheng];
dev_Hy[ridHy] = dev_Hy_zheng[ridzheng];
dev_Hz[ridHz] = dev_Hz_zheng[ridzheng];
}
__global__ void gpu_back_zheng_3(
float *dev_Ex_zheng, float *dev_Ey_zheng, float *dev_Ez_zheng,
float *dev_Hx_zheng, float *dev_Hy_zheng, float *dev_Hz_zheng,
float *dev_Ex, float *dev_Ey, float *dev_Ez,
float *dev_Hx, float *dev_Hy, float *dev_Hz,
int j)
{
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x;
int lidzheng; //**_zheng_*
int ridzheng; //**_zheng_*
int lidEx, lidEy, lidEz, lidHx, lidHy, lidHz;
int ridEx, ridEy, ridEz, ridHx, ridHy, ridHz;
lidzheng =
j * (nx - 2 * npml) * (ny - 2 * npml) * (2 * npmlc) +
ix * (ny - 2 * npml) * (2 * npmlc) +
iy * (2 * npmlc) +
iz;
lidEx =
(ix + npml) * (ny + 1) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEy =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEz =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHx =
(ix + npml) * (ny + 0) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHy =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHz =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
ridzheng = lidzheng + (npmlc);
ridEx = lidEx + (nz - 2 * npml - npmlc);
ridEy = lidEy + (nz - 2 * npml - npmlc);
ridEz = lidEz + (nz - 2 * npml - npmlc);
ridHx = lidHx + (nz - 2 * npml - npmlc);
ridHy = lidHy + (nz - 2 * npml - npmlc);
ridHz = lidHz + (nz - 2 * npml - npmlc);
dev_Ex[lidEx] = dev_Ex_zheng[lidzheng];
dev_Ey[lidEy] = dev_Ey_zheng[lidzheng];
dev_Ez[lidEz] = dev_Ez_zheng[lidzheng];
dev_Hx[lidHx] = dev_Hx_zheng[lidzheng];
dev_Hy[lidHy] = dev_Hy_zheng[lidzheng];
dev_Hz[lidHz] = dev_Hz_zheng[lidzheng];
dev_Ex[ridEx] = dev_Ex_zheng[ridzheng];
dev_Ey[ridEy] = dev_Ey_zheng[ridzheng];
dev_Ez[ridEz] = dev_Ez_zheng[ridzheng];
dev_Hx[ridHx] = dev_Hx_zheng[ridzheng];
dev_Hy[ridHy] = dev_Hy_zheng[ridzheng];
dev_Hz[ridHz] = dev_Hz_zheng[ridzheng];
}
__global__ void gpu_back_zheng_last(
float *dev_Ex_zheng, float *dev_Ey_zheng, float *dev_Ez_zheng,
float *dev_Hx_zheng, float *dev_Hy_zheng, float *dev_Hz_zheng,
float *dev_Ex, float *dev_Ey, float *dev_Ez,
float *dev_Hx, float *dev_Hy, float *dev_Hz)
{
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x;
int lidzheng; //**_zheng_*
int lidEx, lidEy, lidEz, lidHx, lidHy, lidHz;
lidzheng =
ix * (ny - 2 * npml) * (nz - 2 * npml) +
iy * (nz - 2 * npml) +
iz;
lidEx =
(ix + npml) * (ny + 1) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEy =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEz =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHx =
(ix + npml) * (ny + 0) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHy =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHz =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
dev_Ex[lidEx] = dev_Ex_zheng[lidzheng];
dev_Ey[lidEy] = dev_Ey_zheng[lidzheng];
dev_Ez[lidEz] = dev_Ez_zheng[lidzheng];
dev_Hx[lidHx] = dev_Hx_zheng[lidzheng];
dev_Hy[lidHy] = dev_Hy_zheng[lidzheng];
dev_Hz[lidHz] = dev_Hz_zheng[lidzheng];
}
dim3 grid_fan_huanyuan(nx - 2 * npml, ny - 2 * npml);
dim3 block_fan_huanyuan(nz - 2 * npml);
__global__ void gpu_fan_huanyuan(float *dev_dst, float *dev_Ex)
{
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x;
int lidfan, lidEx; //**_zheng_*
lidfan =
ix * (ny - 2 * npml) * (nz - 2 * npml) +
iy * (nz - 2 * npml) +
iz;
lidEx =
(ix + npml) * (ny + 1) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
dev_dst[lidfan] = dev_Ex[lidEx];
}
dim3 grid_HE1(nx - np - np, ny - np - np);
dim3 block_HE1(nz - np - np);
__global__ void gpu_H1(
float *dev_Hx1, float *dev_Hy1, float *dev_Hz1,
float *dev_Ex1, float *dev_Ey1, float *dev_Ez1,
float *dev_CPHx, float *dev_CPHy, float *dev_CPHz,
float *dev_CQHx, float *dev_CQHy, float *dev_CQHz)
{
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x;
int idxHx1 = (ix + np)*(ny + 0)*(nz + 0) + (iy + np)*(nz + 0) + (iz + np);
int idxHy1 = (ix + np)*(ny + 1)*(nz + 0) + (iy + np)*(nz + 0) + (iz + np);
int idxHz1 = (ix + np)*(ny + 0)*(nz + 1) + (iy + np)*(nz + 1) + (iz + np);
int idxEx1 = (ix + np)*(ny + 1)*(nz + 1) + (iy + np)*(nz + 1) + (iz + np);
int idxEy1 = (ix + np)*(ny + 0)*(nz + 1) + (iy + np)*(nz + 1) + (iz + np);
int idxEz1 = (ix + np)*(ny + 1)*(nz + 0) + (iy + np)*(nz + 0) + (iz + np);
int delEz1_Hx1 = nz;
int delEy1_Hx1 = 1;
int delEx1_Hy1 = 1;
int delEz1_Hy1 = (ny + 1) * nz;
int delEy1_Hz1 = ny * (nz + 1);
int delEx1_Hz1 = nz + 1;
const float rfCPHx = 1 / dev_CPHx[idxHx1];// reciprocal of fCPHx
const float fCQHx = dev_CQHx[idxHx1];
dev_Hx1[idxHx1] = rfCPHx * dev_Hx1[idxHx1]
+ rfCPHx * fCQHx / dy * (dev_Ez1[idxEz1 + delEz1_Hx1] - dev_Ez1[idxEz1])
- rfCPHx * fCQHx / dz * (dev_Ey1[idxEy1 + delEy1_Hx1] - dev_Ey1[idxEy1]);
const float rfCPHy = 1 / dev_CPHy[idxHy1];// reciprocal of fCPHy
const float fCQHy = dev_CQHy[idxHy1];
dev_Hy1[idxHy1] = rfCPHy * dev_Hy1[idxHy1]
+ rfCPHy * fCQHy / dz * (dev_Ex1[idxEx1 + delEx1_Hy1] - dev_Ex1[idxEx1])
- rfCPHy * fCQHy / dx * (dev_Ez1[idxEz1 + delEz1_Hy1] - dev_Ez1[idxEz1]);
const float rfCPHz = 1 / dev_CPHz[idxHz1];// reciprocal of fCPHz
const float fCQHz = dev_CQHz[idxHz1];
dev_Hz1[idxHz1] = rfCPHz * dev_Hz1[idxHz1]
+ rfCPHz * fCQHz / dx * (dev_Ey1[idxEy1 + delEy1_Hz1] - dev_Ey1[idxEy1])
- rfCPHz * fCQHz / dy * (dev_Ex1[idxEx1 + delEx1_Hz1] - dev_Ex1[idxEx1]);
}
__global__ void gpu_E1(
float *dev_Hx1, float *dev_Hy1, float *dev_Hz1,
float *dev_Ex1, float *dev_Ey1, float *dev_Ez1,
float *dev_CAEx, float *dev_CAEy, float *dev_CAEz,
float *dev_CBEx, float *dev_CBEy, float *dev_CBEz)
{
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x;
int idxHx1 = (ix + np)*(ny + 0)*(nz + 0) + (iy + np)*(nz + 0) + (iz + np);
int idxHy1 = (ix + np)*(ny + 1)*(nz + 0) + (iy + np)*(nz + 0) + (iz + np);
int idxHz1 = (ix + np)*(ny + 0)*(nz + 1) + (iy + np)*(nz + 1) + (iz + np);
int idxEx1 = (ix + np)*(ny + 1)*(nz + 1) + (iy + np)*(nz + 1) + (iz + np);
int idxEy1 = (ix + np)*(ny + 0)*(nz + 1) + (iy + np)*(nz + 1) + (iz + np);
int idxEz1 = (ix + np)*(ny + 1)*(nz + 0) + (iy + np)*(nz + 0) + (iz + np);
int delHz1_Ex1 = nz + 1;
int delHy1_Ex1 = 1;
int delHx1_Ey1 = 1;
int delHz1_Ey1 = ny * (nz + 1);
int delHy1_Ez1 = (ny + 1) * nz;
int delHx1_Ez1 = nz;
const float rfCAEx = 1 / dev_CAEx[idxEx1];// reciprocal of fCAEx
const float fCBEx = dev_CBEx[idxEx1];
dev_Ex1[idxEx1] = rfCAEx * dev_Ex1[idxEx1]
+ rfCAEx * fCBEx / dy * (dev_Hz1[idxHz1] - dev_Hz1[idxHz1 - delHz1_Ex1])
- rfCAEx * fCBEx / dz * (dev_Hy1[idxHy1] - dev_Hy1[idxHy1 - delHy1_Ex1]);
const float rfCAEy = 1 / dev_CAEy[idxEy1];// reciprocal of fCAEy
const float fCBEy = dev_CBEy[idxEy1];
dev_Ey1[idxEy1] = rfCAEy * dev_Ey1[idxEy1]
+ rfCAEy * fCBEy / dz * (dev_Hx1[idxHx1] - dev_Hx1[idxHx1 - delHx1_Ey1])
- rfCAEy * fCBEy / dx * (dev_Hz1[idxHz1] - dev_Hz1[idxHz1 - delHz1_Ey1]);
const float rfCAEz = 1 / dev_CAEz[idxEz1];// reciprocal of fCAEz
const float fCBEz = dev_CBEz[idxEz1];
dev_Ez1[idxEz1] = rfCAEz * dev_Ez1[idxEz1]
+ rfCAEz * fCBEz / dx * (dev_Hy1[idxHy1] - dev_Hy1[idxHy1 - delHy1_Ez1])
- rfCAEz * fCBEz / dy * (dev_Hx1[idxHx1] - dev_Hx1[idxHx1 - delHx1_Ez1]);
}
dim3 grid_nzf(nx - 2 * npml, ny - 2 * npml);
dim3 block_nzf(nz - 2 * npml);
__global__ void gpu_nzf(float *dev_dst, float *dev_src1, float *dev_src2)
{
int idx =
blockIdx.x * (ny - 2 * npml) * (nz - 2 * npml) +
blockIdx.y * (nz - 2 * npml) +
threadIdx.x;
dev_dst[idx] += dev_src1[idx] * dev_src2[idx];
//printf("nzf:%f\n",dev_dst[idx]);
}
void read_int(const char *name, int *a, int n1, int n2, int n3)
{
FILE *fp = fopen(name, "r");
if (fp == NULL) //
{
printf("fopen %s error! \n", name);
return;
}
printf("fopen %s ok! \n", name);
for (int i = 0; i < n1; i++)
{
for (int k = 0; k < n3; k++)
{
for (int j = 0; j < n2; j++)
{
fscanf(fp, "%d", &a[i * n2*n3 + j * n3 + k]); // a[i][j][k]
}
}
}
printf("read %s OK\n", name);
fclose(fp);
return;
}
void read_float(const char *name, float *a, int n1, int n2, int n3)
{
FILE *fp = fopen(name, "r");
if (fp == NULL) //
{
printf("fopen %s error! \n", name);
return;
}
printf("fopen %s ok! \n", name);
for (int i = 0; i < n1; i++)
{
for (int k = 0; k < n3; k++)
{
for (int j = 0; j < n2; j++)
{
fscanf(fp, "%f", a + i * n2*n3 + j * n3 + k); // a[i][j][k]
//if(name == "data_pianyi/source.txt")
//{
// printf("source[%d] = %.6f\n",k,*(a + i * n2*n3 + j * n3 + k));
//}
}
}
}
printf("read %s OK\n", name);
fclose(fp);
return;
}
void print_nzf(const char *name, float *a, int n1, int n2, int n3)
{
FILE *fp = fopen(name, "w+");
if (fp == NULL) //
{
printf("fopen %s error! \n", name);
return;
}
printf("fopen %s ok! \n", name);
for (int k = 0; k < n3; k++)
{
for (int j = 0; j < n2; j++)
{
for (int i = 0; i < n1; i++)
{
fprintf(fp, "%e ", *(a + i * n2*n3 + j * n3 + k)); // a[i][j][k]
}
}
}
printf("print %s OK\n", name);
fclose(fp);
return;
}
void read_data_from_txt(bool pianYi)
{
if (pianYi)
{
read_float("data_pianyi/CAEx.txt", (float*)CAEx, nx, ny + 1, nz + 1);
read_float("data_pianyi/CBEx.txt", (float*)CBEx, nx, ny + 1, nz + 1);
read_float("data_pianyi/RAEyz.txt", (float*)RAEyz, nx, 2 * (npml - 1), nz - 1);
read_float("data_pianyi/RBEyz.txt", (float*)RBEyz, nx, 2 * (npml - 1), nz - 1);
read_float("data_pianyi/RAEzy.txt", (float*)RAEzy, nx, ny - 1, 2 * (npml - 1));
read_float("data_pianyi/RBEzy.txt", (float*)RBEzy, nx, ny - 1, 2 * (npml - 1));
read_float("data_pianyi/CAEy.txt", (float*)CAEy, nx + 1, ny, nz + 1);
read_float("data_pianyi/CBEy.txt", (float*)CBEy, nx + 1, ny, nz + 1);
read_float("data_pianyi/RAEzx.txt", (float*)RAEzx, nx - 1, ny, 2 * (npml - 1));
read_float("data_pianyi/RBEzx.txt", (float*)RBEzx, nx - 1, ny, 2 * (npml - 1));
read_float("data_pianyi/RAExz.txt", (float*)RAExz, 2 * (npml - 1), ny, nz - 1);
read_float("data_pianyi/RBExz.txt", (float*)RBExz, 2 * (npml - 1), ny, nz - 1);
read_float("data_pianyi/CAEz.txt", (float*)CAEz, nx + 1, ny + 1, nz);
read_float("data_pianyi/CBEz.txt", (float*)CBEz, nx + 1, ny + 1, nz);
read_float("data_pianyi/RAExy.txt", (float*)RAExy, 2 * (npml - 1), ny - 1, nz);
read_float("data_pianyi/RBExy.txt", (float*)RBExy, 2 * (npml - 1), ny - 1, nz);
read_float("data_pianyi/RAEyx.txt", (float*)RAEyx, nx - 1, 2 * (npml - 1), nz);
read_float("data_pianyi/RBEyx.txt", (float*)RBEyx, nx - 1, 2 * (npml - 1), nz);
read_float("data_pianyi/CPHx.txt", (float*)CPHx, nx + 1, ny, nz);
read_float("data_pianyi/CQHx.txt", (float*)CQHx, nx + 1, ny, nz);
read_float("data_pianyi/RAHyz.txt", (float*)RAHyz, nx - 1, 2 * npml, nz);
read_float("data_pianyi/RBHyz.txt", (float*)RBHyz, nx - 1, 2 * npml, nz);
read_float("data_pianyi/RAHzy.txt", (float*)RAHzy, nx - 1, ny, 2 * npml);
read_float("data_pianyi/RBHzy.txt", (float*)RBHzy, nx - 1, ny, 2 * npml);
read_float("data_pianyi/CPHy.txt", (float*)CPHy, nx, ny + 1, nz);
read_float("data_pianyi/CQHy.txt", (float*)CQHy, nx, ny + 1, nz);
read_float("data_pianyi/RAHzx.txt", (float*)RAHzx, nx, ny - 1, 2 * npml);
read_float("data_pianyi/RBHzx.txt", (float*)RBHzx, nx, ny - 1, 2 * npml);
read_float("data_pianyi/RAHxz.txt", (float*)RAHxz, 2 * npml, ny - 1, nz);
read_float("data_pianyi/RBHxz.txt", (float*)RBHxz, 2 * npml, ny - 1, nz);
read_float("data_pianyi/CPHz.txt", (float*)CPHz, nx, ny, nz + 1);
read_float("data_pianyi/CQHz.txt", (float*)CQHz, nx, ny, nz + 1);
read_float("data_pianyi/RAHxy.txt", (float*)RAHxy, 2 * npml, ny, nz - 1);
read_float("data_pianyi/RBHxy.txt", (float*)RBHxy, 2 * npml, ny, nz - 1);
read_float("data_pianyi/RAHyx.txt", (float*)RAHyx, nx, 2 * npml, nz - 1);
read_float("data_pianyi/RBHyx.txt", (float*)RBHyx, nx, 2 * npml, nz - 1);
read_float("data_pianyi/kx_Ey.txt", (float*)kx_Ey, nx + 1, ny, nz + 1);
read_float("data_pianyi/kx_Ez.txt", (float*)kx_Ez, nx + 1, ny + 1, nz);
read_float("data_pianyi/ky_Ex.txt", (float*)ky_Ex, nx, ny + 1, nz + 1);
read_float("data_pianyi/ky_Ez.txt", (float*)ky_Ez, nx + 1, ny + 1, nz);
read_float("data_pianyi/kz_Ex.txt", (float*)kz_Ex, nx, ny + 1, nz + 1);
read_float("data_pianyi/kz_Ey.txt", (float*)kz_Ey, nx + 1, ny, nz + 1);
read_float("data_pianyi/kx_Hy.txt", (float*)kx_Hy, nx, ny + 1, nz);
read_float("data_pianyi/kx_Hz.txt", (float*)kx_Hz, nx, ny, nz + 1);
read_float("data_pianyi/ky_Hx.txt", (float*)ky_Hx, nx + 1, ny, nz);
read_float("data_pianyi/ky_Hz.txt", (float*)ky_Hz, nx, ny, nz + 1);
read_float("data_pianyi/kz_Hx.txt", (float*)kz_Hx, nx + 1, ny, nz);
read_float("data_pianyi/kz_Hy.txt", (float*)kz_Hy, nx, ny + 1, nz);
read_int("data_pianyi/fswzx.txt", (int*)fswzx, 1, 1, szfsw);
read_int("data_pianyi/fswzy.txt", (int*)fswzy, 1, 1, szfsw);
read_int("data_pianyi/fswzz.txt", (int*)fswzz, 1, 1, szfsw);
read_int("data_pianyi/jswzx.txt", (int*)jswzx, 1, 1, szfsw);
read_int("data_pianyi/jswzy.txt", (int*)jswzy, 1, 1, szfsw);
read_int("data_pianyi/jswzz.txt", (int*)jswzz, 1, 1, szfsw);
read_float("data_pianyi/source.txt", (float*)source, 1, 1, it);
read_float("data_pianyi/E_obs.txt", (float*)E_obs, 1, it, szfsw);
}
else
{
read_float("data_zhengyan/CAEx.txt", (float*)CAEx, nx, ny + 1, nz + 1);
read_float("data_zhengyan/CBEx.txt", (float*)CBEx, nx, ny + 1, nz + 1);
read_float("data_zhengyan/RAEyz.txt", (float*)RAEyz, nx, 2 * (npml - 1), nz - 1);
read_float("data_zhengyan/RBEyz.txt", (float*)RBEyz, nx, 2 * (npml - 1), nz - 1);
read_float("data_zhengyan/RAEzy.txt", (float*)RAEzy, nx, ny - 1, 2 * (npml - 1));
read_float("data_zhengyan/RBEzy.txt", (float*)RBEzy, nx, ny - 1, 2 * (npml - 1));
read_float("data_zhengyan/CAEy.txt", (float*)CAEy, nx + 1, ny, nz + 1);
read_float("data_zhengyan/CBEy.txt", (float*)CBEy, nx + 1, ny, nz + 1);
read_float("data_zhengyan/RAEzx.txt", (float*)RAEzx, nx - 1, ny, 2 * (npml - 1));
read_float("data_zhengyan/RBEzx.txt", (float*)RBEzx, nx - 1, ny, 2 * (npml - 1));
read_float("data_zhengyan/RAExz.txt", (float*)RAExz, 2 * (npml - 1), ny, nz - 1);
read_float("data_zhengyan/RBExz.txt", (float*)RBExz, 2 * (npml - 1), ny, nz - 1);
read_float("data_zhengyan/CAEz.txt", (float*)CAEz, nx + 1, ny + 1, nz);
read_float("data_zhengyan/CBEz.txt", (float*)CBEz, nx + 1, ny + 1, nz);
read_float("data_zhengyan/RAExy.txt", (float*)RAExy, 2 * (npml - 1), ny - 1, nz);
read_float("data_zhengyan/RBExy.txt", (float*)RBExy, 2 * (npml - 1), ny - 1, nz);
read_float("data_zhengyan/RAEyx.txt", (float*)RAEyx, nx - 1, 2 * (npml - 1), nz);
read_float("data_zhengyan/RBEyx.txt", (float*)RBEyx, nx - 1, 2 * (npml - 1), nz);
read_float("data_zhengyan/CPHx.txt", (float*)CPHx, nx + 1, ny, nz);
read_float("data_zhengyan/CQHx.txt", (float*)CQHx, nx + 1, ny, nz);
read_float("data_zhengyan/RAHyz.txt", (float*)RAHyz, nx - 1, 2 * npml, nz);
read_float("data_zhengyan/RBHyz.txt", (float*)RBHyz, nx - 1, 2 * npml, nz);
read_float("data_zhengyan/RAHzy.txt", (float*)RAHzy, nx - 1, ny, 2 * npml);
read_float("data_zhengyan/RBHzy.txt", (float*)RBHzy, nx - 1, ny, 2 * npml);
read_float("data_zhengyan/CPHy.txt", (float*)CPHy, nx, ny + 1, nz);
read_float("data_zhengyan/CQHy.txt", (float*)CQHy, nx, ny + 1, nz);
read_float("data_zhengyan/RAHzx.txt", (float*)RAHzx, nx, ny - 1, 2 * npml);
read_float("data_zhengyan/RBHzx.txt", (float*)RBHzx, nx, ny - 1, 2 * npml);
read_float("data_zhengyan/RAHxz.txt", (float*)RAHxz, 2 * npml, ny - 1, nz);
read_float("data_zhengyan/RBHxz.txt", (float*)RBHxz, 2 * npml, ny - 1, nz);
read_float("data_zhengyan/CPHz.txt", (float*)CPHz, nx, ny, nz + 1);
read_float("data_zhengyan/CQHz.txt", (float*)CQHz, nx, ny, nz + 1);
read_float("data_zhengyan/RAHxy.txt", (float*)RAHxy, 2 * npml, ny, nz - 1);
read_float("data_zhengyan/RBHxy.txt", (float*)RBHxy, 2 * npml, ny, nz - 1);
read_float("data_zhengyan/RAHyx.txt", (float*)RAHyx, nx, 2 * npml, nz - 1);
read_float("data_zhengyan/RBHyx.txt", (float*)RBHyx, nx, 2 * npml, nz - 1);
read_float("data_zhengyan/kx_Ey.txt", (float*)kx_Ey, nx + 1, ny, nz + 1);
read_float("data_zhengyan/kx_Ez.txt", (float*)kx_Ez, nx + 1, ny + 1, nz);
read_float("data_zhengyan/ky_Ex.txt", (float*)ky_Ex, nx, ny + 1, nz + 1);
read_float("data_zhengyan/ky_Ez.txt", (float*)ky_Ez, nx + 1, ny + 1, nz);
read_float("data_zhengyan/kz_Ex.txt", (float*)kz_Ex, nx, ny + 1, nz + 1);
read_float("data_zhengyan/kz_Ey.txt", (float*)kz_Ey, nx + 1, ny, nz + 1);
read_float("data_zhengyan/kx_Hy.txt", (float*)kx_Hy, nx, ny + 1, nz);
read_float("data_zhengyan/kx_Hz.txt", (float*)kx_Hz, nx, ny, nz + 1);
read_float("data_zhengyan/ky_Hx.txt", (float*)ky_Hx, nx + 1, ny, nz);
read_float("data_zhengyan/ky_Hz.txt", (float*)ky_Hz, nx, ny, nz + 1);
read_float("data_zhengyan/kz_Hx.txt", (float*)kz_Hx, nx + 1, ny, nz);
read_float("data_zhengyan/kz_Hy.txt", (float*)kz_Hy, nx, ny + 1, nz);
read_int("data_zhengyan/fswzx.txt", (int*)fswzx, 1, 1, szfsw);
read_int("data_zhengyan/fswzy.txt", (int*)fswzy, 1, 1, szfsw);
read_int("data_zhengyan/fswzz.txt", (int*)fswzz, 1, 1, szfsw);
read_int("data_zhengyan/jswzx.txt", (int*)jswzx, 1, 1, szfsw);
read_int("data_zhengyan/jswzy.txt", (int*)jswzy, 1, 1, szfsw);
read_int("data_zhengyan/jswzz.txt", (int*)jswzz, 1, 1, szfsw);
read_float("data_zhengyan/source.txt", (float*)source, 1, 1, it);
}
}
void print_E_obs()
{
const char *name = "output/E_obs.txt";
FILE *fp = fopen(name, "w+");
if (fp == NULL) //
{
printf("fopen %s error! \n", name);
}
printf("print fopen %s ok! \n", name);
fprintf(fp, "E_obs[%d][%d]\n", it, szfsw);
fprintf(fp, " %d %d \n", szfsw, it);
for (int i = 0; i < szfsw; i++)
{
for (int j = 0; j < it; j++)
{
fprintf(fp, "%8f ", E_obs[j][i]);
}
fprintf(fp, "\n");
}
printf("print %s OK\n", name);
fclose(fp);
const char *name2 = "data_pianyi/E_obs.txt";
fp = fopen(name2, "w+");
if (fp == NULL) //
{
printf("fopen %s error! \n", name2);
}
printf("print fopen %s ok! \n", name2);
for (int i = 0; i < szfsw; i++)
{
for (int j = 0; j < it; j++)
{
fprintf(fp, "%8f ", E_obs[j][i]);
}
fprintf(fp, "\n");
}
printf("print %s OK\n", name2);
fclose(fp);
return;
}
void gpu_memory_malloc(bool pianYi)
{
hipError_t cudaStatus = hipSuccess;
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!"); }
//
cudaStatus = hipMalloc((void**)&dev_CAEx, sizeof(CAEx));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_CBEx, sizeof(CBEx));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_RAEyz, sizeof(RAEyz));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_RBEyz, sizeof(RBEyz));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_RAEzy, sizeof(RAEzy));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_RBEzy, sizeof(RBEzy));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_CAEy, sizeof(CAEy));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_CBEy, sizeof(CBEy));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_RAExz, sizeof(RAExz));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_RBExz, sizeof(RBExz));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_RAEzx, sizeof(RAEzx));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_RBEzx, sizeof(RBEzx));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_CAEz, sizeof(CAEz));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_CBEz, sizeof(CBEz));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_RAExy, sizeof(RAExy));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_RBExy, sizeof(RBExy));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_RAEyx, sizeof(RAEyx));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_RBEyx, sizeof(RBEyx));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_CPHx, sizeof(CPHx));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_CQHx, sizeof(CQHx));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_RAHyz, sizeof(RAHyz));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_RBHyz, sizeof(RBHyz));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_RAHzy, sizeof(RAHzy));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_RBHzy, sizeof(RBHzy));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_CPHy, sizeof(CPHy));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_CQHy, sizeof(CQHy));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_RAHxz, sizeof(RAHxz));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_RBHxz, sizeof(RBHxz));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_RAHzx, sizeof(RAHzx));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_RBHzx, sizeof(RBHzx));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_CPHz, sizeof(CPHz));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_CQHz, sizeof(CQHz));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_RAHxy, sizeof(RAHxy));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_RBHxy, sizeof(RBHxy));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_RAHyx, sizeof(RAHyx));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_RBHyx, sizeof(RBHyx));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_kx_Ey, sizeof(kx_Ey));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_kx_Ez, sizeof(kx_Ez));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_ky_Ex, sizeof(ky_Ex));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_ky_Ez, sizeof(ky_Ez));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_kz_Ex, sizeof(kz_Ex));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_kz_Ey, sizeof(kz_Ey));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_kx_Hy, sizeof(kx_Hy));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_kx_Hz, sizeof(kx_Hz));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_ky_Hx, sizeof(ky_Hx));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_ky_Hz, sizeof(ky_Hz));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_kz_Hx, sizeof(kz_Hx));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_kz_Hy, sizeof(kz_Hy));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
//gpu
int szEx = nx * (ny + 1)*(nz + 1);
int szEy = (nx + 1)*ny*(nz + 1);
int szEz = (nx + 1)*(ny + 1)*nz;
int szHx = (nx + 1)*ny*nz;
int szHy = nx * (ny + 1)*nz;
int szHz = nx * ny*(nz + 1);
cudaStatus = hipMalloc((void**)&dev_Ex, szEx * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_UEyz, szEx * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_UEzy, szEx * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_Ey, szEy * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_UEzx, szEy * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_UExz, szEy * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_Ez, szEz * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_UExy, szEz * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_UEyx, szEz * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_Hx, szHx * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_UHyz, szHx * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_UHzy, szHx * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_Hy, szHy * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_UHzx, szHy * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_UHxz, szHy * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_Hz, szHz * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_UHxy, szHz * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_UHyx, szHz * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_V, sizeof(V));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_E_obs, sizeof(E_obs));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_source, sizeof(source));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
//
if(pianYi)
{
cudaStatus = hipMalloc((void**)&dev_fan, sizeof(fan));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_huanyuan, sizeof(huanyuan));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_ns, sizeof(ns));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_zv, sizeof(zv));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_fv, sizeof(fv));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_Ex1, sizeof(Ex1));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_Ey1, sizeof(Ey1));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_Ez1, sizeof(Ez1));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_Hx1, sizeof(Hx1));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_Hy1, sizeof(Hy1));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
cudaStatus = hipMalloc((void**)&dev_Hz1, sizeof(Hz1));
if (cudaStatus != hipSuccess) { printf("hipMalloc failed!");}
//
cudaStatus = hipMalloc((void**)&dev_Ex_zheng_1, (it)*(2 * npmlc)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc Super Big Array failed!");}
cudaStatus = hipMalloc((void**)&dev_Ex_zheng_2, (it)*(nx - 2 * npml)*(2 * npmlc)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc Super Big Array failed!");}
cudaStatus = hipMalloc((void**)&dev_Ex_zheng_3, (it)*(nx - 2 * npml)*(ny - 2 * npml)*(2 * npmlc) * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc Super Big Array failed!");}
cudaStatus = hipMalloc((void**)&dev_Ey_zheng_1, (it)*(2 * npmlc)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc Super Big Array failed!");}
cudaStatus = hipMalloc((void**)&dev_Ey_zheng_2, (it)*(nx - 2 * npml)*(2 * npmlc)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc Super Big Array failed!");}
cudaStatus = hipMalloc((void**)&dev_Ey_zheng_3, (it)*(nx - 2 * npml)*(ny - 2 * npml)*(2 * npmlc) * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc Super Big Array failed!");}
cudaStatus = hipMalloc((void**)&dev_Ez_zheng_1, (it)*(2 * npmlc)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc Super Big Array failed!");}
cudaStatus = hipMalloc((void**)&dev_Ez_zheng_2, (it)*(nx - 2 * npml)*(2 * npmlc)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc Super Big Array failed!");}
cudaStatus = hipMalloc((void**)&dev_Ez_zheng_3, (it)*(nx - 2 * npml)*(ny - 2 * npml)*(2 * npmlc) * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc Super Big Array failed!");}
cudaStatus = hipMalloc((void**)&dev_Hx_zheng_1, (it)*(2 * npmlc)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc Super Big Array failed!");}
cudaStatus = hipMalloc((void**)&dev_Hx_zheng_2, (it)*(nx - 2 * npml)*(2 * npmlc)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc Super Big Array failed!");}
cudaStatus = hipMalloc((void**)&dev_Hx_zheng_3, (it)*(nx - 2 * npml)*(ny - 2 * npml)*(2 * npmlc) * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc Super Big Array failed!");}
cudaStatus = hipMalloc((void**)&dev_Hy_zheng_1, (it)*(2 * npmlc)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc Super Big Array failed!");}
cudaStatus = hipMalloc((void**)&dev_Hy_zheng_2, (it)*(nx - 2 * npml)*(2 * npmlc)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc Super Big Array failed!");}
cudaStatus = hipMalloc((void**)&dev_Hy_zheng_3, (it)*(nx - 2 * npml)*(ny - 2 * npml)*(2 * npmlc) * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc Super Big Array failed!");}
cudaStatus = hipMalloc((void**)&dev_Hz_zheng_1, (it)*(2 * npmlc)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc Super Big Array failed!");}
cudaStatus = hipMalloc((void**)&dev_Hz_zheng_2, (it)*(nx - 2 * npml)*(2 * npmlc)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc Super Big Array failed!");}
cudaStatus = hipMalloc((void**)&dev_Hz_zheng_3, (it)*(nx - 2 * npml)*(ny - 2 * npml)*(2 * npmlc) * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc Super Big Array failed!");}
cudaStatus = hipMalloc((void**)&dev_Ex_zheng_last, (nx - 2 * npml)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc Super Big Array failed!");}
cudaStatus = hipMalloc((void**)&dev_Ey_zheng_last, (nx - 2 * npml)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc Super Big Array failed!");}
cudaStatus = hipMalloc((void**)&dev_Ez_zheng_last, (nx - 2 * npml)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc Super Big Array failed!");}
cudaStatus = hipMalloc((void**)&dev_Hx_zheng_last, (nx - 2 * npml)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc Super Big Array failed!");}
cudaStatus = hipMalloc((void**)&dev_Hy_zheng_last, (nx - 2 * npml)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc Super Big Array failed!");}
cudaStatus = hipMalloc((void**)&dev_Hz_zheng_last, (nx - 2 * npml)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc Super Big Array failed!");}
}
}
// flag == 0 GPUE*, UE**, H*, UH**, (V, E_obs)
// flag == 1 GPUE*, UE**, H*, UH**, (V, E*_zheng_*, H*_zheng_*, E*_zheng_last, H*_zheng_last, fan, huanyuan)
// flag == 2 GPUE*, UE**, H*, UH**, (V, E*1, H*1)
void gpu_memory_set_zero(int flag)
{
int szEx = nx * (ny + 1)*(nz + 1);
int szEy = (nx + 1)*ny*(nz + 1);
int szEz = (nx + 1)*(ny + 1)*nz;
int szHx = (nx + 1)*ny*nz;
int szHy = nx * (ny + 1)*nz;
int szHz = nx * ny*(nz + 1);
hipMemset(dev_Ex, 0, szEx * sizeof(float));
hipMemset(dev_UEyz, 0, szEx * sizeof(float));
hipMemset(dev_UEzy, 0, szEx * sizeof(float));
hipMemset(dev_Ey, 0, szEy * sizeof(float));
hipMemset(dev_UEzx, 0, szEy * sizeof(float));
hipMemset(dev_UExz, 0, szEy * sizeof(float));
hipMemset(dev_Ez, 0, szEz * sizeof(float));
hipMemset(dev_UExy, 0, szEz * sizeof(float));
hipMemset(dev_UEyx, 0, szEz * sizeof(float));
hipMemset(dev_Hx, 0, szHx * sizeof(float));
hipMemset(dev_UHyz, 0, szHx * sizeof(float));
hipMemset(dev_UHzy, 0, szHx * sizeof(float));
hipMemset(dev_Hy, 0, szHy * sizeof(float));
hipMemset(dev_UHzx, 0, szHy * sizeof(float));
hipMemset(dev_UHxz, 0, szHy * sizeof(float));
hipMemset(dev_Hz, 0, szHz * sizeof(float));
hipMemset(dev_UHxy, 0, szHz * sizeof(float));
hipMemset(dev_UHyx, 0, szHz * sizeof(float));
if (flag == 0)
{
hipMemset(dev_V, 0, sizeof(V));
hipMemset(dev_E_obs, 0, sizeof(E_obs));
}
else if (flag == 1)
{
hipMemset(dev_V, 0, sizeof(V));
hipMemset(dev_Ex_zheng_1, 0, (it)*(2 * npmlc)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
hipMemset(dev_Ex_zheng_2, 0, (it)*(nx - 2 * npml)*(2 * npmlc)*(nz - 2 * npml) * sizeof(float));
hipMemset(dev_Ex_zheng_3, 0, (it)*(nx - 2 * npml)*(ny - 2 * npml)*(2 * npmlc) * sizeof(float));
hipMemset(dev_Ey_zheng_1, 0, (it)*(2 * npmlc)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
hipMemset(dev_Ey_zheng_2, 0, (it)*(nx - 2 * npml)*(2 * npmlc)*(nz - 2 * npml) * sizeof(float));
hipMemset(dev_Ey_zheng_3, 0, (it)*(nx - 2 * npml)*(ny - 2 * npml)*(2 * npmlc) * sizeof(float));
hipMemset(dev_Ez_zheng_1, 0, (it)*(2 * npmlc)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
hipMemset(dev_Ez_zheng_2, 0, (it)*(nx - 2 * npml)*(2 * npmlc)*(nz - 2 * npml) * sizeof(float));
hipMemset(dev_Ez_zheng_3, 0, (it)*(nx - 2 * npml)*(ny - 2 * npml)*(2 * npmlc) * sizeof(float));
hipMemset(dev_Hx_zheng_1, 0, (it)*(2 * npmlc)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
hipMemset(dev_Hx_zheng_2, 0, (it)*(nx - 2 * npml)*(2 * npmlc)*(nz - 2 * npml) * sizeof(float));
hipMemset(dev_Hx_zheng_3, 0, (it)*(nx - 2 * npml)*(ny - 2 * npml)*(2 * npmlc) * sizeof(float));
hipMemset(dev_Hy_zheng_1, 0, (it)*(2 * npmlc)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
hipMemset(dev_Hy_zheng_2, 0, (it)*(nx - 2 * npml)*(2 * npmlc)*(nz - 2 * npml) * sizeof(float));
hipMemset(dev_Hy_zheng_3, 0, (it)*(nx - 2 * npml)*(ny - 2 * npml)*(2 * npmlc) * sizeof(float));
hipMemset(dev_Hz_zheng_1, 0, (it)*(2 * npmlc)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
hipMemset(dev_Hz_zheng_2, 0, (it)*(nx - 2 * npml)*(2 * npmlc)*(nz - 2 * npml) * sizeof(float));
hipMemset(dev_Hz_zheng_3, 0, (it)*(nx - 2 * npml)*(ny - 2 * npml)*(2 * npmlc) * sizeof(float));
size_t sz_last = (nx - 2 * npml)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float);
hipMemset(dev_Ex_zheng_last, 0, sz_last);
hipMemset(dev_Ey_zheng_last, 0, sz_last);
hipMemset(dev_Ez_zheng_last, 0, sz_last);
hipMemset(dev_Hx_zheng_last, 0, sz_last);
hipMemset(dev_Hy_zheng_last, 0, sz_last);
hipMemset(dev_Hz_zheng_last, 0, sz_last);
hipMemset(dev_fan, 0, sz_last);
hipMemset(dev_huanyuan, 0, sz_last);
}
else if (flag == 2)
{
hipMemset(dev_Ex1, 0, sizeof(Ex1));
hipMemset(dev_Ey1, 0, sizeof(Ey1));
hipMemset(dev_Ez1, 0, sizeof(Ez1));
hipMemset(dev_Hx1, 0, sizeof(Hx1));
hipMemset(dev_Hy1, 0, sizeof(Hy1));
hipMemset(dev_Hz1, 0, sizeof(Hz1));
}
}
//
// flag == 0 CAE CBE RAE RBE CPH CQH RAH RBH k*_E* k*_H* source
// flag == 1 CAE CBE RAE RBE CPH CQH RAH RBH k*_E* k*_H* source
void gpu_memory_copy(bool pianYi)
{
hipError_t cudaStatus;
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_CAEx, CAEx, sizeof(CAEx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_CBEx, CBEx, sizeof(CBEx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_RAEyz, RAEyz, sizeof(RAEyz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_RBEyz, RBEyz, sizeof(RBEyz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_RAEzy, RAEzy, sizeof(RAEzy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_RBEzy, RBEzy, sizeof(RBEzy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_CAEy, CAEy, sizeof(CAEy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_CBEy, CBEy, sizeof(CBEy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_RAExz, RAExz, sizeof(RAExz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_RBExz, RBExz, sizeof(RBExz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_RAEzx, RAEzx, sizeof(RAEzx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_RBEzx, RBEzx, sizeof(RBEzx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_CAEz, CAEz, sizeof(CAEz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_CBEz, CBEz, sizeof(CBEz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_RAExy, RAExy, sizeof(RAExy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_RBExy, RBExy, sizeof(RBExy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_RAEyx, RAEyx, sizeof(RAEyx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_RBEyx, RBEyx, sizeof(RBEyx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_CPHx, CPHx, sizeof(CPHx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_CQHx, CQHx, sizeof(CQHx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_RAHyz, RAHyz, sizeof(RAHyz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_RBHyz, RBHyz, sizeof(RBHyz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_RAHzy, RAHzy, sizeof(RAHzy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_RBHzy, RBHzy, sizeof(RBHzy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_CPHy, CPHy, sizeof(CPHy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_CQHy, CQHy, sizeof(CQHy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_RAHxz, RAHxz, sizeof(RAHxz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_RBHxz, RBHxz, sizeof(RBHxz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_RAHzx, RAHzx, sizeof(RAHzx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_RBHzx, RBHzx, sizeof(RBHzx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_CPHz, CPHz, sizeof(CPHz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_CQHz, CQHz, sizeof(CQHz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_RAHxy, RAHxy, sizeof(RAHxy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_RBHxy, RBHxy, sizeof(RBHxy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_RAHyx, RAHyx, sizeof(RAHyx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_RBHyx, RBHyx, sizeof(RBHyx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_kx_Ey, kx_Ey, sizeof(kx_Ey), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_kx_Ez, kx_Ez, sizeof(kx_Ez), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_ky_Ex, ky_Ex, sizeof(ky_Ex), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_ky_Ez, ky_Ez, sizeof(ky_Ez), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_kz_Ex, kz_Ex, sizeof(kz_Ex), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_kz_Ey, kz_Ey, sizeof(kz_Ey), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_kx_Hy, kx_Hy, sizeof(kx_Hy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_kx_Hz, kx_Hz, sizeof(kx_Hz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_ky_Hx, ky_Hx, sizeof(ky_Hx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_ky_Hz, ky_Hz, sizeof(ky_Hz), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_kz_Hx, kz_Hx, sizeof(kz_Hx), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_kz_Hy, kz_Hy, sizeof(kz_Hy), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
cudaStatus = hipMemcpy(dev_source, source, sizeof(source), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!");}
if (pianYi)
{
cudaStatus = hipMemcpy(dev_E_obs, E_obs, sizeof(E_obs), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy failed!"); }
}
}
//
void gpu_memory_free(bool pianYi)
{
hipFree(dev_Ex);
hipFree(dev_Ey);
hipFree(dev_Ez);
hipFree(dev_UEyz);
hipFree(dev_UEzy);
hipFree(dev_UExz);
hipFree(dev_UEzx);
hipFree(dev_UExy);
hipFree(dev_UEyx);
hipFree(dev_Hx);
hipFree(dev_Hy);
hipFree(dev_Hz);
hipFree(dev_UHyz);
hipFree(dev_UHzy);
hipFree(dev_UHxz);
hipFree(dev_UHzx);
hipFree(dev_UHxy);
hipFree(dev_UHyx);
hipFree(dev_CAEx);
hipFree(dev_CAEy);
hipFree(dev_CAEz);
hipFree(dev_CBEx);
hipFree(dev_CBEy);
hipFree(dev_CBEz);
hipFree(dev_RAEyz);
hipFree(dev_RAEzy);
hipFree(dev_RAEzx);
hipFree(dev_RAExz);
hipFree(dev_RAExy);
hipFree(dev_RAEyx);
hipFree(dev_RBEyz);
hipFree(dev_RBEzy);
hipFree(dev_RBEzx);
hipFree(dev_RBExz);
hipFree(dev_RBExy);
hipFree(dev_RBEyx);
hipFree(dev_CPHx);
hipFree(dev_CQHx);
hipFree(dev_CPHy);
hipFree(dev_CQHy);
hipFree(dev_CPHz);
hipFree(dev_CQHz);
hipFree(dev_RAHyz);
hipFree(dev_RAHzy);
hipFree(dev_RAHzx);
hipFree(dev_RAHxz);
hipFree(dev_RAHxy);
hipFree(dev_RAHyx);
hipFree(dev_RBHyz);
hipFree(dev_RBHzy);
hipFree(dev_RBHzx);
hipFree(dev_RBHxz);
hipFree(dev_RBHxy);
hipFree(dev_RBHyx);
hipFree(fswzx);
hipFree(fswzy);
hipFree(fswzz);
hipFree(jswzx);
hipFree(jswzy);
hipFree(jswzz);
hipFree(dev_E_obs);
hipFree(dev_V);
hipFree(dev_source);
hipFree(dev_kx_Ey);
hipFree(dev_kx_Ez);
hipFree(dev_ky_Ex);
hipFree(dev_ky_Ez);
hipFree(dev_kz_Ex);
hipFree(dev_kz_Ey);
hipFree(dev_kx_Hy);
hipFree(dev_kx_Hz);
hipFree(dev_ky_Hx);
hipFree(dev_ky_Hz);
hipFree(dev_kz_Hx);
hipFree(dev_kz_Hy);
if (pianYi)
{
hipFree(dev_Ex_zheng_1);
hipFree(dev_Ex_zheng_2);
hipFree(dev_Ex_zheng_3);
hipFree(dev_Ey_zheng_1);
hipFree(dev_Ey_zheng_2);
hipFree(dev_Ey_zheng_3);
hipFree(dev_Ez_zheng_1);
hipFree(dev_Ez_zheng_2);
hipFree(dev_Ez_zheng_3);
hipFree(dev_Hx_zheng_1);
hipFree(dev_Hx_zheng_2);
hipFree(dev_Hx_zheng_3);
hipFree(dev_Hy_zheng_1);
hipFree(dev_Hy_zheng_2);
hipFree(dev_Hy_zheng_3);
hipFree(dev_Hz_zheng_1);
hipFree(dev_Hz_zheng_2);
hipFree(dev_Hz_zheng_3);
hipFree(dev_Ex_zheng_last);
hipFree(dev_Ey_zheng_last);
hipFree(dev_Ez_zheng_last);
hipFree(dev_Hx_zheng_last);
hipFree(dev_Hy_zheng_last);
hipFree(dev_Hz_zheng_last);
hipFree(dev_fan);
hipFree(dev_huanyuan);
hipFree(dev_ns);
hipFree(dev_zv);
hipFree(dev_fv);
}
}
// gpuUH H UE E
void zheng_yan()
{
hipError_t cudaStatus = hipSuccess;
gpu_UHyz << < gridUHyz, blockUHyz >> > (dev_UHyz, dev_RBHyz, dev_RAHyz, dev_Ez);
gpu_UHzy << < gridUHzy, blockUHzy >> > (dev_UHzy, dev_RBHzy, dev_RAHzy, dev_Ey);
gpu_UHxy << < gridUHxy, blockUHxy >> > (dev_UHxy, dev_RBHxy, dev_RAHxy, dev_Ey);
gpu_UHxz << < gridUHxz, blockUHxz >> > (dev_UHxz, dev_RBHxz, dev_RAHxz, dev_Ez);
gpu_UHyx << < gridUHyx, blockUHyx >> > (dev_UHyx, dev_RBHyx, dev_RAHyx, dev_Ex);
gpu_UHzx << < gridUHzx, blockUHzx >> > (dev_UHzx, dev_RBHzx, dev_RAHzx, dev_Ex);
gpu_Hx << < gridHx, blockHx >> > (dev_Hx, dev_CPHx, dev_CQHx, dev_ky_Hx, dev_kz_Hx, dev_Ez, dev_Ey, dev_UHyz, dev_UHzy);
gpu_Hy << < gridHy, blockHy >> > (dev_Hy, dev_CPHy, dev_CQHy, dev_kz_Hy, dev_kx_Hy, dev_Ex, dev_Ez, dev_UHzx, dev_UHxz);
gpu_Hz << < gridHz, blockHz >> > (dev_Hz, dev_CPHz, dev_CQHz, dev_kx_Hz, dev_ky_Hz, dev_Ey, dev_Ex, dev_UHxy, dev_UHyx);
gpu_UExy << < gridUExy, blockUExy >> > (dev_UExy, dev_RBExy, dev_RAExy, dev_Hy);
gpu_UExz << < gridUExz, blockUExz >> > (dev_UExz, dev_RBExz, dev_RAExz, dev_Hz);
gpu_UEyx << < gridUEyx, blockUEyx >> > (dev_UEyx, dev_RBEyx, dev_RAEyx, dev_Hx);
gpu_UEyz << < gridUEyz, blockUEyz >> > (dev_UEyz, dev_RBEyz, dev_RAEyz, dev_Hz);
gpu_UEzx << < gridUEzx, blockUEzx >> > (dev_UEzx, dev_RBEzx, dev_RAEzx, dev_Hx);
gpu_UEzy << < gridUEzy, blockUEzy >> > (dev_UEzy, dev_RBEzy, dev_RAEzy, dev_Hy);
gpu_Ex << < gridEx, blockEx >> > (dev_Ex, dev_CAEx, dev_CBEx, dev_ky_Ex, dev_kz_Ex, dev_Hz, dev_Hy, dev_UEyz, dev_UEzy);
gpu_Ey << < gridEy, blockEy >> > (dev_Ey, dev_CAEy, dev_CBEy, dev_kz_Ey, dev_kx_Ey, dev_Hx, dev_Hz, dev_UEzx, dev_UExz);
gpu_Ez << < gridEz, blockEz >> > (dev_Ez, dev_CAEz, dev_CBEz, dev_kx_Ez, dev_ky_Ez, dev_Hy, dev_Hx, dev_UExy, dev_UEyx);
hipDeviceSynchronize();
// ?
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
{
printf("Zhengyan Calc Failed: %s\n", hipGetErrorString(cudaStatus));
}
}
hipError_t gpu_parallel_one()
{
hipError_t cudaStatus = hipSuccess;
int i, j;
for (i = 0; i < szfsw; i++)
{
gpu_memory_set_zero(0); // flag == 0 GPUE*, UE**, H*, UH**, (V, E_obs)
for (j = 0; j < it; j++)
{
if (j % 50 == 0)
{
printf("i = %3d / %d, j = %4d / %d\n", i, szfsw, j, it);
}
// matlab: Ex(fswzx(i),fswzy(i),fswzz(i))=source(j);
int idxEx = (fswzx[i] - 1) * (ny + 1) * (nz + 1) + (fswzy[i] - 1) * (nz + 1) + (fswzz[i] - 1);
cudaStatus = hipMemcpy(&(dev_Ex[idxEx]), &(dev_source[j]), sizeof(float), hipMemcpyDeviceToDevice);
if (cudaStatus != hipSuccess) { printf("source --> Ex hipMemcpy failed: %s\n", hipGetErrorString(cudaStatus)); return cudaStatus; };
hipDeviceSynchronize();
// GPU
zheng_yan();
// matlab: V(j)=Ex(jswzx(i), jswzy(i), jswzz(i));
idxEx = (jswzx[i] - 1) * (ny + 1) * (nz + 1) + (jswzy[i] - 1) * (nz + 1) + (jswzz[i] - 1);
cudaStatus = hipMemcpy(&(dev_V[j]), &(dev_Ex[idxEx]), sizeof(float), hipMemcpyDeviceToDevice);
if (cudaStatus != hipSuccess) { printf("Ex --> V hipMemcpy failed: %s\n", hipGetErrorString(cudaStatus)); return cudaStatus; };
hipDeviceSynchronize();
// matlab: E_obs(j,i) = V(j)
cudaStatus = hipMemcpy(&(E_obs[j][i]), &(dev_V[j]), sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) { printf("V --> E_obs hipMemcpy failed: %s\n", hipGetErrorString(cudaStatus)); return cudaStatus; };
hipDeviceSynchronize();
}
}
hipDeviceSynchronize();
printf("finish calc 1 !\n");
//
print_E_obs();
return cudaStatus;
}
hipError_t gpu_parallel_two()
{
hipError_t cudaStatus = hipSuccess;
//ns zv fv
hipMemset(dev_ns, 0, sizeof(ns));
hipMemset(dev_zv, 0, sizeof(zv));
hipMemset(dev_fv, 0, sizeof(fv));
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
{
printf("ns&zv&fv hipMemset Failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
int i, j;
for (i = 0; i < szfsw; i++)
{
// --------------------- part one ---------------------
// flag == 1 GPUE*, UE**, H*, UH**, (V, E*_zheng_*, H*_zheng_*, E*_zheng_last, H*_zheng_last, fan, huanyuan)
gpu_memory_set_zero(1);
hipDeviceSynchronize();
for (j = 0; j < it; j++)
{
// i j
if (j%50 == 0) { printf("i = %3d / %d, j = %4d / %d\n", i, szfsw, j, it); }
// matlab: Ex(fswzx(i), fswzy(i), fswzz(i)) = source(j);
int idxEx = (fswzx[i] - 1) * (ny + 1) * (nz + 1) + (fswzy[i] - 1) * (nz + 1) + (fswzz[i] - 1);
cudaStatus = hipMemcpy(&(dev_Ex[idxEx]), &(dev_source[j]), sizeof(float), hipMemcpyDeviceToDevice);
hipDeviceSynchronize();
if (cudaStatus != hipSuccess)
{
printf("source --> Ex hipMemcpy failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
// GPU
zheng_yan();
gpu_zheng_1 << <grid_zheng_1, block_zheng_1 >> > (
dev_Ex_zheng_1, dev_Ey_zheng_1, dev_Ez_zheng_1,
dev_Hx_zheng_1, dev_Hy_zheng_1, dev_Hz_zheng_1,
dev_Ex, dev_Ey, dev_Ez,
dev_Hx, dev_Hy, dev_Hz,
j);
gpu_zheng_2 << <grid_zheng_2, block_zheng_2 >> > (
dev_Ex_zheng_2, dev_Ey_zheng_2, dev_Ez_zheng_2,
dev_Hx_zheng_2, dev_Hy_zheng_2, dev_Hz_zheng_2,
dev_Ex, dev_Ey, dev_Ez,
dev_Hx, dev_Hy, dev_Hz,
j);
gpu_zheng_3 << <grid_zheng_3, block_zheng_3 >> > (
dev_Ex_zheng_3, dev_Ey_zheng_3, dev_Ez_zheng_3,
dev_Hx_zheng_3, dev_Hy_zheng_3, dev_Hz_zheng_3,
dev_Ex, dev_Ey, dev_Ez,
dev_Hx, dev_Hy, dev_Hz,
j);
gpu_zheng_last << <grid_zheng_last, block_zheng_last >> > (
dev_Ex_zheng_last, dev_Ey_zheng_last, dev_Ez_zheng_last,
dev_Hx_zheng_last, dev_Hy_zheng_last, dev_Hz_zheng_last,
dev_Ex, dev_Ey, dev_Ez,
dev_Hx, dev_Hy, dev_Hz);
hipDeviceSynchronize();
}
cudaStatus = hipGetLastError();
printf("--------------------- part one --------------------- : %s\n", hipGetErrorString(cudaStatus));
// --------------------- part two ---------------------
gpu_memory_set_zero(2);
for (j = it - 1; j >= 0; j--)
{
if (j%50==0) { printf("i = %3d / %d, j = %4d / %d\n", i, szfsw, j, it); }
//Ex(fswzx(i), fswzy(i), fswzz(i)) = E_obs(j, i);
int idxEx = (fswzx[i] - 1) * (ny + 1) * (nz + 1) + (fswzy[i] - 1) * (nz + 1) + (fswzz[i] - 1);
int idxE_obs = j * szfsw + i;
cudaStatus = hipMemcpy(&(dev_Ex[idxEx]), &(dev_E_obs[idxE_obs]), sizeof(float), hipMemcpyDeviceToDevice);
hipDeviceSynchronize();
if (cudaStatus != hipSuccess)
{
printf("E_obs --> Ex hipMemcpy failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
// GPU
zheng_yan();
hipDeviceSynchronize();
// matlab: fan=Ex(npml+1:nx-npml,npml+1:ny-npml,npml+1:nz-npml);
gpu_fan_huanyuan << <grid_fan_huanyuan, block_fan_huanyuan >> > (dev_fan, dev_Ex);
//printf("fan\n");
//print_dev_matrix(dev_fan, nx-2*npml, ny-2*npml, nz-2*npml);
hipDeviceSynchronize();
//getchar();
if (j == it - 1)
{
gpu_back_zheng_last << <grid_zheng_last, block_zheng_last >> > (
dev_Ex_zheng_last, dev_Ey_zheng_last, dev_Ez_zheng_last,
dev_Hx_zheng_last, dev_Hy_zheng_last, dev_Hz_zheng_last,
dev_Ex1, dev_Ey1, dev_Ez1,
dev_Hx1, dev_Hy1, dev_Hz1);
hipDeviceSynchronize();
}
else //j < it - 1
{
gpu_back_zheng_1 << <grid_zheng_1, block_zheng_1 >> > (
dev_Ex_zheng_1, dev_Ey_zheng_1, dev_Ez_zheng_1,
dev_Hx_zheng_1, dev_Hy_zheng_1, dev_Hz_zheng_1,
dev_Ex1, dev_Ey1, dev_Ez1,
dev_Hx1, dev_Hy1, dev_Hz1,
j);
gpu_back_zheng_2 << <grid_zheng_2, block_zheng_2 >> > (
dev_Ex_zheng_2, dev_Ey_zheng_2, dev_Ez_zheng_2,
dev_Hx_zheng_2, dev_Hy_zheng_2, dev_Hz_zheng_2,
dev_Ex1, dev_Ey1, dev_Ez1,
dev_Hx1, dev_Hy1, dev_Hz1,
j);
gpu_back_zheng_3 << <grid_zheng_3, block_zheng_3 >> > (
dev_Ex_zheng_3, dev_Ey_zheng_3, dev_Ez_zheng_3,
dev_Hx_zheng_3, dev_Hy_zheng_3, dev_Hz_zheng_3,
dev_Ex1, dev_Ey1, dev_Ez1,
dev_Hx1, dev_Hy1, dev_Hz1,
j);
hipDeviceSynchronize();
//print_dev_Ex1();
//getchar();
// matlab: Ex1(fswzx(i), fswzy(i), fswzz(i)) = source(j);
int idxEx1 = (fswzx[i] - 1) * (ny + 1) * (nz + 1) + (fswzy[i] - 1) * (nz + 1) + (fswzz[i] - 1);
cudaStatus = hipMemcpy(&(dev_Ex1[idxEx1]), &(dev_source[j]), sizeof(float), hipMemcpyDeviceToDevice);
if (cudaStatus != hipSuccess)
{
printf("source --> Ex1 hipMemcpy failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
gpu_H1 << <grid_HE1, block_HE1 >> > (
dev_Hx1, dev_Hy1, dev_Hz1,
dev_Ex1, dev_Ey1, dev_Ez1,
dev_CPHx, dev_CPHy, dev_CPHz,
dev_CQHx, dev_CQHy, dev_CQHz);
gpu_E1 << <grid_HE1, block_HE1 >> > (
dev_Hx1, dev_Hy1, dev_Hz1,
dev_Ex1, dev_Ey1, dev_Ez1,
dev_CAEx, dev_CAEy, dev_CAEz,
dev_CBEx, dev_CBEy, dev_CBEz);
}
// matlab: huanyuan=Ex1(npml+1:nx-npml,npml+1:ny-npml,npml+1:nz-npml);
gpu_fan_huanyuan << <grid_fan_huanyuan, block_fan_huanyuan >> > (dev_huanyuan, dev_Ex1);
//printf("huanyuan\n");
//print_dev_matrix(dev_huanyuan, nx-2*npml, ny-2*npml, nz-2*npml);
//hipDeviceSynchronize();
//getchar();
gpu_nzf << <grid_nzf, block_nzf >> > (dev_ns, dev_huanyuan, dev_fan);
gpu_nzf << <grid_nzf, block_nzf >> > (dev_zv, dev_huanyuan, dev_huanyuan);
gpu_nzf << <grid_nzf, block_nzf >> > (dev_fv, dev_fan, dev_fan);
hipDeviceSynchronize();
}
cudaStatus = hipGetLastError();
printf("--------------------- part two --------------------- : %s\n", hipGetErrorString(cudaStatus));
}
// ns fv zv
cudaStatus = hipMemcpy(ns, dev_ns, sizeof(ns), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess)
{
printf("dev_ns --> ns hipMemcpy failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
cudaStatus = hipMemcpy(fv, dev_fv, sizeof(fv), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess)
{
printf("dev_fv --> fv hipMemcpy failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
cudaStatus = hipMemcpy(zv, dev_zv, sizeof(zv), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess)
{
printf("dev_zv --> ns hipMemcpy failed: %s\n", hipGetErrorString(cudaStatus));
return cudaStatus;
}
printf("finish calc 2!\n");
print_nzf("nzf/ns.txt", (float*)ns, nx - 2 * npml, ny - 2 * npml, nz - 2 * npml);
print_nzf("nzf/fv.txt", (float*)fv, nx - 2 * npml, ny - 2 * npml, nz - 2 * npml);
print_nzf("nzf/zv.txt", (float*)zv, nx - 2 * npml, ny - 2 * npml, nz - 2 * npml);
return cudaStatus;
}
void gpu_parallel(bool pianYi)
{
hipError_t cudaStatus;
if(pianYi)
{
cudaStatus = gpu_parallel_two();
if (cudaStatus != hipSuccess) { printf("gpu_parallel_two failed!"); }
else { printf("gpu_parallel_two success!\n"); }
}
else
{
cudaStatus = gpu_parallel_one();
if (cudaStatus != hipSuccess) { printf("gpu_parallel_one failed!"); }
else { printf("gpu_parallel_one success!\n"); }
}
}
/************************************************************************************
*
************************************************************************************/
int main()
{
//
chdir(path); //linux
//_chdir(path);
char str[80];
printf("Current Dir: %s \n",getcwd(str, 80)); //linux
//printf("Current Dir: %s \n", _getcwd(str, 80));
// matlab
read_data_from_txt(isPianYi);
printf("Read Data From Txt OK ! \n");
// GPU
hipError_t cudaStatus = hipSetDevice(cudaDevice);
if (cudaStatus != hipSuccess) { printf("hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); return 1; }
else { printf("hipSetDevice success!\n"); }
//
gpu_memory_malloc(isPianYi);
gpu_memory_copy(isPianYi);
// gpu
gpu_parallel(isPianYi);
//
gpu_memory_free(isPianYi);
// GPU
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) { printf("hipDeviceReset failed!"); return 1; }
//
freeMemory();
return 0;
} | 1c2414172fef0503729872bf1222c70b7b1c043e.cu | /************************************************************************************
* Author: Tao Rui
* 版本: V1.0 单卡,Linux版
* 说明:
* 计算第二部分的并行。
************************************************************************************/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#include <string.h>
#include "unistd.h"
#include "global_variables.cpp"
#include <unistd.h> //linux
//#include <direct.h> //windows
__global__ void dev_matrix(float *A, int xdim, int ydim, int zdim)
{
int i = blockIdx.x;
int j = blockIdx.y;
int k = threadIdx.x;
int idx = i * ydim * zdim + j * zdim + k;
if(A[idx] <=-1e-12 || A[idx] >=1e-12)
{
printf("[%d][%d][%d] = %e\n",i, j, k, A[idx]);
}
}
void print_dev_matrix(float *A, int xdim, int ydim, int zdim)
{
dim3 gridma(xdim, ydim);
dim3 blockma(zdim);
cudaError_t cudaStatus = cudaSuccess;
dev_matrix<<<gridma, blockma>>>(A, xdim, ydim, zdim);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
printf("dev_print Failed: %s\n", cudaGetErrorString(cudaStatus));
}
cudaDeviceSynchronize();
}
/************************************************************************************
* GPU计算单个矩阵的函数
************************************************************************************/
dim3 gridUHyz(npml, nx - 1);
dim3 blockUHyz(nz);
__global__ void gpu_UHyz(float *UHyz, float *RBHyz, float *RAHyz, float *Ez)
{
/*
in0 UHyz nx+1 ny nz
in1 RBHyz nx-1 2*npml nz
in2 RAHyz nx-1 2*npml nz
in3 Ez nx+1 ny+1 nz
UHyz = UHyz * RBHyz + RAHyz * (Ez - Ez) / dy
运算块大小 nx-1 * npml * nz
UHyz由5个矩阵相乘或相加得来。
y维分为了两块
UHyz(2:nx, [1:npml ny-npml+1:ny], :)=RBHyz .* UHyz(2:nx, [1:npml ny-npml+1:ny], :)...
+RAHyz ./ dy .* (Ez(2:nx, [2:npml+1 ny-npml+2:ny+1], :) - Ez(2:nx, [1:npml ny-npml+1:ny], :));
*/
int ix = blockIdx.y; // ix in [0, nx - 1)
int iy = blockIdx.x; // iy in [0, npml)
int iz = threadIdx.x; // iz in [0, nz)
int lid0 = (ix + 1)*ny*nz + iy * nz + iz; // checked!
int rid0 = (ix + 1)*ny*nz + (iy + ny - npml) * nz + iz; //checked!
int lid1 = ix * (2 * npml)*nz + iy * nz + iz; // checked!
int rid1 = ix * (2 * npml)*nz + (iy + npml) * nz + iz; // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid3 = (ix + 1)*(ny + 1)*nz + (iy + 1)*nz + iz; // checked!
int rid3 = (ix + 1)*(ny + 1)*nz + (iy + ny - npml + 1)*nz + iz; // checked!
int lid4 = (ix + 1)*(ny + 1)*nz + iy * nz + iz; // checked!
int rid4 = (ix + 1)*(ny + 1)*nz + (iy + ny - npml)*nz + iz; // checked!
UHyz[lid0] = UHyz[lid0] * RBHyz[lid1] + RAHyz[lid2] * (Ez[lid3] - Ez[lid4]) / dy;
UHyz[rid0] = UHyz[rid0] * RBHyz[rid1] + RAHyz[rid2] * (Ez[rid3] - Ez[rid4]) / dy;
}
dim3 gridUHzy(nx - 1, ny);
dim3 blockUHzy(npml);
__global__ void gpu_UHzy(float *UHzy, float *RBHzy, float *RAHzy, float *Ey)
{
/*
in0 UHzy --size-- nx+1 ny nz
in1 RBHzy --size-- nx-1 ny 2*npml
in2 RAHzy --size-- nx-1 ny 2*npml
in3 Ey --size-- nx+1 ny nz+1
UHyz = UHyz * RBHyz + RAHyz * (Ez - Ez) / dy
运算块大小 nx-1 * ny * (5 *npml)
UHyz由5个矩阵相乘或相加得来。
z维分为了两块
UHzy(2:nx, :, [1:npml nz-npml+1:nz])=RBHzy.*UHzy(2:nx, :, [1:npml nz-npml+1:nz])
+RAHzy./dz.*(Ey(2:nx, :, [2:npml+1 nz-npml+2:nz+1])-Ey(2:nx, :, [1:npml nz-npml+1:nz]));
*/
int ix = blockIdx.x; // ix in [0, nx - 1)
int iy = blockIdx.y; // iy in [0, ny)
int iz = threadIdx.x; // ix in [0, npml)
int lid0 = (ix + 1) * ny * nz + iy * nz + iz; //checked!
int rid0 = (ix + 1) * ny * nz + iy * nz + iz + nz - npml; //checked!
int lid1 = ix * ny * (2 * npml) + iy * (2 * npml) + iz; //checked!
int rid1 = ix * ny * (2 * npml) + iy * (2 * npml) + iz + npml; //checked!
int lid2 = lid1;
int rid2 = rid1;
int lid4 = (ix + 1) * ny * (nz + 1) + iy * (nz + 1) + iz; //checked!
int rid4 = (ix + 1) * ny * (nz + 1) + iy * (nz + 1) + iz + nz - npml; //checked!
int lid3 = lid4 + 1;
int rid3 = rid4 + 1;
UHzy[lid0] = UHzy[lid0] * RBHzy[lid1] + RAHzy[lid2] * (Ey[lid3] - Ey[lid4]) / dz;
UHzy[rid0] = UHzy[rid0] * RBHzy[rid1] + RAHzy[rid2] * (Ey[rid3] - Ey[rid4]) / dz;
}
dim3 gridUHzx(nx, ny - 1);
dim3 blockUHzx(npml);
__global__ void gpu_UHzx(float *UHzx, float *RBHzx, float *RAHzx, float *Ex)
{
/*
in0 UHzx --size-- nx ny + 1 nz
in1 RBHzx --size-- nx ny - 1 2 * npml
in2 RAHzx --size-- nx ny - 1 2 * npml
in3 Ex --size-- nx ny + 1 nz + 1
UHzx = UHzx * RBHzx + RAHzx * (Ez - Ez) / dy
运算块大小 nx * ny - 1 * npml
UHzx由5个矩阵相乘或相加得来。
z维分为了两块 1:npml -npml:0
UHzx(:, 2:ny, [1:npml nz - npml + 1:nz])=RBHzx. * UHzx(:, 2:ny, [1:npml nz - npml + 1:nz])
+RAHzx./dz.*(Ex(:, 2:ny, [2:npml + 1 nz - npml + 2:nz + 1]) - Ex(:, 2:ny, [1:npml nz - npml + 1:nz]));
*/
int ix = blockIdx.x; // ix in [0, nx)
int iy = blockIdx.y; // iy in [0, ny - 1)
int iz = threadIdx.x; // iz in [0, npml)
int lid0 = ix * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid0 = ix * (ny + 1) * nz + (iy + 1) * nz + iz + nz - npml; // checked!
int lid1 = ix * (ny - 1) * (2 * npml) + iy * (2 * npml) + iz; // checked!
int rid1 = ix * (ny - 1) * (2 * npml) + iy * (2 * npml) + iz + npml; // checked!
int lid2 = lid1;
int rid2 = rid1;
int lid4 = ix * (ny + 1) * (nz + 1) + (iy + 1) * (nz + 1) + iz; // checked!
int rid4 = ix * (ny + 1) * (nz + 1) + (iy + 1) * (nz + 1) + iz + nz - npml; // checked!
int lid3 = lid4 + 1;
int rid3 = rid4 + 1;
UHzx[lid0] = UHzx[lid0] * RBHzx[lid1] + RAHzx[lid2] * (Ex[lid3] - Ex[lid4]) / dz;
UHzx[rid0] = UHzx[rid0] * RBHzx[rid1] + RAHzx[rid2] * (Ex[rid3] - Ex[rid4]) / dz;
}
dim3 gridUHxz(npml, ny - 1);
dim3 blockUHxz(nz);
__global__ void gpu_UHxz(float *UHxz, float *RBHxz, float *RAHxz, float *Ez)
{
/*
in0 UHxz --size-- nx ny + 1 nz
in1 RBHxz --size-- 2*npml ny - 1 nz
in2 RAHxz --size-- 2*npml ny - 1 nz
in3 Ez --size-- nx + 1 ny + 1 nz
UHxz = UHxz * RBHxz + RAHxz * (Ez - Ez) / dx
运算块大小 npml * ny - 1 * nz
UHxz由5个矩阵相乘或相加得来。
x维分为了两块 1:npml -npml:0
UHxz([1:npml nx-npml+1:nx], 2:ny, :)=RBHxz.*UHxz([1:npml nx-npml+1:nx], 2:ny, :)...
+RAHxz./dx.*(Ez([2:npml+1 nx-npml+2:nx+1], 2:ny, :)-Ez([1:npml nx-npml+1:nx], 2:ny, :));
*/
int ix = blockIdx.x; // ix in [0, npml)
int iy = blockIdx.y; // iy in [0, ny - 1)
int iz = threadIdx.x; // iz in [0, nz)
int lid0 = ix * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid0 = (ix + nx - npml) * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int lid1 = ix * (ny - 1) * nz + iy * nz + iz; // checked!
int rid1 = (ix + npml) * (ny - 1) * nz + iy * nz + iz; // checked!
int lid2 = lid1;
int rid2 = rid1;
int lid4 = ix * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid4 = (ix + nx - npml) * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int lid3 = lid4 + (ny + 1) * nz;
int rid3 = rid4 + (ny + 1) * nz;
UHxz[lid0] = UHxz[lid0] * RBHxz[lid1] + RAHxz[lid2] * (Ez[lid3] - Ez[lid4]) / dx;
UHxz[rid0] = UHxz[rid0] * RBHxz[rid1] + RAHxz[rid2] * (Ez[rid3] - Ez[rid4]) / dx;
}
dim3 gridUHxy(npml, ny);
dim3 blockUHxy(nz - 1);
__global__ void gpu_UHxy(float *UHxy, float *RBHxy, float *RAHxy, float *Ey)
{
/*
in0 UHxy --size-- nx ny nz + 1
in1 RBHxy --size-- 2*npml ny nz - 1
in2 RAHxy --size-- 2*npml ny nz - 1
in3 EY --size-- nx + 1 ny nz + 1
UHxy = UHxy * RBHxy + RAHxy * (Ez - Ez) / dx
运算块大小 npml * ny * nz - 1
UHxy由5个矩阵相乘或相加得来。
x维分为了两块 1:npml -npml:0
UHxy([1:npml nx-npml+1:nx], :, 2:nz)=RBHxy.*UHxy([1:npml nx-npml+1:nx], :, 2:nz)...
+RAHxy./dx.*(Ey([2:npml+1 nx-npml+2:nx+1], :, 2:nz)-Ey([1:npml nx-npml+1:nx], :, 2:nz));
*/
int ix = blockIdx.x; // ix in [0, npml)
int iy = blockIdx.y; // iy in [0, ny)
int iz = threadIdx.x; // iz in [0, nz - 1)
int lid0 = ix * ny * (nz + 1) + iy * (nz + 1) + iz + 1; // checked!
int rid0 = (ix + nx - npml) * ny * (nz + 1) + iy * (nz + 1) + iz + 1; //checked
int lid1 = ix * ny * (nz - 1) + iy * (nz - 1) + iz; // checked!
int rid1 = (ix + npml) * ny * (nz - 1) + iy * (nz - 1) + iz; // checked!
int lid2 = lid1;
int rid2 = rid1;
int lid4 = ix * ny * (nz + 1) + iy * (nz + 1) + iz + 1; // checked!
int rid4 = (ix + nx - npml) * ny * (nz + 1) + iy * (nz + 1) + iz + 1; // checked!
int lid3 = lid4 + ny * (nz + 1);
int rid3 = rid4 + ny * (nz + 1);
UHxy[lid0] = UHxy[lid0] * RBHxy[lid1] + RAHxy[lid2] * (Ey[lid3] - Ey[lid4]) / dx;
UHxy[rid0] = UHxy[rid0] * RBHxy[rid1] + RAHxy[rid2] * (Ey[rid3] - Ey[rid4]) / dx;
}
dim3 gridUHyx(npml, nx);
dim3 blockUHyx(nz - 1);
__global__ void gpu_UHyx(float *UHyx, float *RBHyx, float *RAHyx, float *Ex)
{
/*
in0 UHyx nx ny nz + 1
in1 RBHyx nx 2*npml nz - 1
in2 RAHyx nx 2*npml nz - 1
in3 Ex nx ny + 1 nz + 1
UHyx = UHyx * RBHyx + RAHyx * (Ex - Ex) / dy
运算块大小 nx * npml * nz - 1
UHyx由5个矩阵相乘或相加得来。
y维分为了两块
UHyx(:, [1:npml ny-npml+1:ny], 2:nz)=RBHyx.*UHyx(:, [1:npml ny-npml+1:ny], 2:nz)...
+RAHyx./dy.*(Ex(:, [2:npml+1 ny-npml+2:ny+1], 2:nz)-Ex(:, [1:npml ny-npml+1:ny], 2:nz));
*/
int ix = blockIdx.y; // ix in [0, nx)
int iy = blockIdx.x; // iy in [0, npml)
int iz = threadIdx.x; // iz in [0, nz - 1)
int lid0 = ix * ny * (nz + 1) + iy * (nz + 1) + iz + 1; // checked!
int rid0 = ix * ny * (nz + 1) + (iy + ny - npml) * (nz + 1) + iz + 1; //checked!
int lid1 = ix * (2 * npml) * (nz - 1) + iy * (nz - 1) + iz; // checked!
int rid1 = ix * (2 * npml) * (nz - 1) + (iy + npml) * (nz - 1) + iz; // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = ix * (ny + 1) * (nz + 1) + iy * (nz + 1) + iz + 1; // checked!
int rid4 = ix * (ny + 1) * (nz + 1) + (iy + ny - npml) * (nz + 1) + iz + 1; // checked!
int lid3 = lid4 + (nz + 1); // checked!
int rid3 = rid4 + (nz + 1); // checked!
UHyx[lid0] = UHyx[lid0] * RBHyx[lid1] + RAHyx[lid2] * (Ex[lid3] - Ex[lid4]) / dy;
UHyx[rid0] = UHyx[rid0] * RBHyx[rid1] + RAHyx[rid2] * (Ex[rid3] - Ex[rid4]) / dy;
}
dim3 gridHx(nx - 1, ny);
dim3 blockHx(nz);
__global__ void gpu_Hx(float *Hx, float *CPHx, float *CQHx, float *ky_Hx, float *kz_Hx, float *Ez, float *Ey, float *UHyz, float *UHzy)
{
//
// * 运算块大小 nx - 1 * ny * nz
// * Hx(2:nx,:,:)
//
int ix = blockIdx.x + 1;
int iy = blockIdx.y;
int iz = threadIdx.x;
int idx = ix * ny * nz + iy * nz + iz;
int idxEz = ix * (ny + 1)*nz + iy * nz + iz;
int idxEy = ix * ny * (nz + 1) + iy * (nz + 1) + iz;
int deltaEz = nz;
int deltaEy = 1;
float CQH = CQHx[idx];
Hx[idx] = Hx[idx] * CPHx[idx]
- CQH / ky_Hx[idx] * (Ez[idxEz + deltaEz] - Ez[idxEz]) / dy
+ CQH / kz_Hx[idx] * (Ey[idxEy + deltaEy] - Ey[idxEy]) / dz
- CQH * UHyz[idx]
+ CQH * UHzy[idx];
}
dim3 gridHy(nx, ny - 1);
dim3 blockHy(nz);
__global__ void gpu_Hy(float *Hy, float *CPHy, float *CQHy, float *kz_Hy, float *kx_Hy, float *Ex, float *Ez, float *UHzx, float *UHxz)
{
//
// * 运算块大小 nx * ny -1 * nz
// * Hy(:,2:ny,:)
//
int ix = blockIdx.x;
int iy = blockIdx.y + 1;
int iz = threadIdx.x;
int idx = ix * (ny + 1)*nz + iy * nz + iz;
int idxEx = ix * (ny + 1)*(nz + 1) + iy * (nz + 1) + iz;
int idxEz = ix * (ny + 1)*nz + iy * nz + iz;
int deltaEx = 1;
int deltaEz = (ny + 1)*nz;
float CQH = CQHy[idx];
Hy[idx] = Hy[idx] * CPHy[idx]
- CQH / kz_Hy[idx] * (Ex[idxEx + deltaEx] - Ex[idxEx]) / dz
+ CQH / kx_Hy[idx] * (Ez[idxEz + deltaEz] - Ez[idxEz]) / dx
- CQH * UHzx[idx]
+ CQH * UHxz[idx];
}
dim3 gridHz(nx, ny);
dim3 blockHz(nz - 1);
__global__ void gpu_Hz(float *Hz, float *CPHz, float *CQHz, float *kx_Hz, float *ky_Hz, float *Ey, float *Ex, float *UHxy, float *UHyx)
{
//
// * 运算块大小 nx * ny * nz -1
// * Hz(:,;,2:nz)
// * Hz大小为nx ny nz+1
//
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x + 1;
int idx = ix * ny * (nz + 1) + iy * (nz + 1) + iz;
int idxEy = ix * ny * (nz + 1) + iy * (nz + 1) + iz;
int idxEx = ix * (ny + 1) * (nz + 1) + iy * (nz + 1) + iz;
int deltaEy = ny * (nz + 1);
int deltaEx = nz + 1;
float CQH = CQHz[idx];
Hz[idx] = Hz[idx] * CPHz[idx]
- CQH / kx_Hz[idx] * (Ey[idxEy + deltaEy] - Ey[idxEy]) / dx
+ CQH / ky_Hz[idx] * (Ex[idxEx + deltaEx] - Ex[idxEx]) / dy
- CQH * UHxy[idx]
+ CQH * UHyx[idx];
}
dim3 gridUEyz(npml - 1, nx);
dim3 blockUEyz(nz - 1);
__global__ void gpu_UEyz(float *UEyz, float *RBEyz, float *RAEyz, float *Hz)
{
/*
dim3 blockUEyz(nz - 1);
dim3 gridUEyz(npml - 1, nx);
in0 UEyz nx ny + 1 nz + 1
in1 RBEyz nx 2*(npml-1) nz - 1
in2 RAEyz nx 2*(npml-1) nz - 1
in3 Hz nx ny nz + 1
运算块大小 nx * npml - 1 * nz - 1
UEyz(:, [2:npml ny-npml+2:ny], 2:nz)=RBEyz .* UEyz(:, [2:npml ny-npml+2:ny], 2:nz)...
+RAEyz ./ dy .* (Hz(:, [2:npml ny-npml+2:ny], 2:nz) - Hz(:, [1:npml-1 ny-npml+1:ny-1], 2:nz));
*/
int ix = blockIdx.y; // ix in [0, nx)
int iy = blockIdx.x; // iy in [0, npml - 1)
int iz = threadIdx.x; // iz in [0, nz - 1)
int lid0 = ix * (ny + 1) * (nz + 1) + (iy + 1) * (nz + 1) + (iz + 1); // checked!
int rid0 = ix * (ny + 1) * (nz + 1) + (iy + 1 + ny - npml) * (nz + 1) + (iz + 1); //checked!
int lid1 = ix * (2 * (npml - 1)) * (nz - 1) + iy * (nz - 1) + iz; // checked!
int rid1 = ix * (2 * (npml - 1)) * (nz - 1) + (iy + npml - 1) * (nz - 1) + iz; // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = ix * ny * (nz + 1) + iy * (nz + 1) + (iz + 1); // checked!
int rid4 = ix * ny * (nz + 1) + (iy + ny - npml) * (nz + 1) + (iz + 1); // checked!
int lid3 = lid4 + (nz + 1); // checked!
int rid3 = rid4 + (nz + 1); // checked!
UEyz[lid0] = UEyz[lid0] * RBEyz[lid1] + RAEyz[lid2] * (Hz[lid3] - Hz[lid4]) / dy;
UEyz[rid0] = UEyz[rid0] * RBEyz[rid1] + RAEyz[rid2] * (Hz[rid3] - Hz[rid4]) / dy;
}
dim3 gridUEyx(npml - 1, nx);
dim3 blockUEyx(nz - 1);
__global__ void gpu_UEyx(float *UEyx, float *RBEyx, float *RAEyx, float *Hx)
{
/*
dim3 blockUEyx(nz - 1);
dim3 gridUEyx(npml - 1, nx);
in0 UEyx nx + 1 ny + 1 nz
in1 RBEyx nx - 1 2*(npml-1) nz
in2 RAEyx nx - 1 2*(npml-1) nz
in3 Hx nx + 1 ny nz
运算块大小 nx * npml-1 * nz-1
UEyx(2:nx, [2:npml ny-npml+2:ny], :)=RBEyx .* UEyx(2:nx, [2:npml ny-npml+2:ny], :)...
+RAEyx ./ dy .* (Hx(2:nx, [2:npml ny-npml+2:ny], :) - Hx(2:nx, [1:npml-1 ny-npml+1:ny-1], :));
*/
int ix = blockIdx.y; // ix in [0, nx)
int iy = blockIdx.x; // iy in [0, npml - 1)
int iz = threadIdx.x; // iz in [0, nz - 1)
int lid0 = (ix + 1) * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid0 = (ix + 1) * (ny + 1) * nz + (iy + 1 + ny - npml) * nz + iz; //checked!
int lid1 = ix * (2 * (npml - 1)) * nz + iy * nz + iz; // checked!
int rid1 = ix * (2 * (npml - 1)) * nz + (iy + npml - 1) * nz + iz; // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = (ix + 1) * ny * nz + iy * nz + iz; // checked!
int rid4 = (ix + 1) * ny * nz + (iy + ny - npml) * nz + iz; // checked!
int lid3 = lid4 + nz; // checked!
int rid3 = rid4 + nz; // checked!
UEyx[lid0] = UEyx[lid0] * RBEyx[lid1] + RAEyx[lid2] * (Hx[lid3] - Hx[lid4]) / dy;
UEyx[rid0] = UEyx[rid0] * RBEyx[rid1] + RAEyx[rid2] * (Hx[rid3] - Hx[rid4]) / dy;
}
dim3 gridUExy(npml - 1, ny - 1);
dim3 blockUExy(nz);
__global__ void gpu_UExy(float *UExy, float *RBExy, float *RAExy, float *Hy)
{
/*
dim3 blockUExy(nz);
dim3 gridUExy(npml - 1, ny - 1);
in0 UExy nx + 1 ny + 1 nz
in1 RBExy 2*(npml-1) ny - 1 nz
in2 RAExy 2*(npml-1) ny - 1 nz
in3 Hy nx ny + 1 nz
运算块大小 npml-1 * ny-1 * nz
UExy([2:npml nx-npml+2:nx], 2:ny, :)=RBExy .* UExy([2:npml nx-npml+2:nx], 2:ny, :)...
+RAExy ./ dx .* (Hy([2:npml nx-npml+2:nx], 2:ny, :) - Hy([1:npml-1 nx-npml+1:nx-1], 2:ny, :));
*/
int ix = blockIdx.x; // ix in [0, npml - 1)
int iy = blockIdx.y; // iy in [0, ny - 1)
int iz = threadIdx.x; // iz in [0, nz)
int lid0 = (ix + 1) * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid0 = (ix + 1 + nx - npml) * (ny + 1) * nz + (iy + 1) * nz + iz; //checked!
int lid1 = ix * (ny - 1) * nz + iy * nz + iz; // checked!
int rid1 = (ix + npml - 1) * (ny - 1) * nz + iy * nz + iz; // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = ix * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid4 = (ix + nx - npml) * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int lid3 = lid4 + (ny + 1) * nz; // checked!
int rid3 = rid4 + (ny + 1) * nz; // checked!
UExy[lid0] = UExy[lid0] * RBExy[lid1] + RAExy[lid2] * (Hy[lid3] - Hy[lid4]) / dx;
UExy[rid0] = UExy[rid0] * RBExy[rid1] + RAExy[rid2] * (Hy[rid3] - Hy[rid4]) / dx;
}
dim3 gridUExz(npml - 1, ny);
dim3 blockUExz(nz - 1);
__global__ void gpu_UExz(float *UExz, float *RBExz, float *RAExz, float *Hz)
{
/*
dim3 blockUExz(nz - 1);
dim3 gridUExz(npml - 1, ny);
in0 UExz nx + 1 ny nz + 1
in1 RBExz 2*(npml-1) ny nz - 1
in2 RAExz 2*(npml-1) ny nz - 1
in3 Hz nx ny nz + 1
运算块大小 npml-1 * ny * nz-1
UExz([2:npml nx-npml+2:nx], :, 2:nz)=RBExz .* UExz([2:npml nx-npml+2:nx], :, 2:nz)...
+RAExz ./ dx .* (Hz([2:npml nx-npml+2:nx], :, 2:nz) - Hz([1:npml-1 nx-npml+1:nx-1], :, 2:nz));
*/
int ix = blockIdx.x; // ix in [0, npml - 1)
int iy = blockIdx.y; // iy in [0, ny)
int iz = threadIdx.x; // iz in [0, nz - 1)
int lid0 = (ix + 1) * ny * (nz + 1) + iy * (nz + 1) + (iz + 1); // checked!
int rid0 = (ix + 1 + nx - npml) * ny * (nz + 1) + iy * (nz + 1) + (iz + 1); //checked!
int lid1 = ix * ny * (nz - 1) + iy * (nz - 1) + iz; // checked!
int rid1 = (ix + npml - 1) * ny * (nz - 1) + iy * (nz - 1) + iz; // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = ix * ny * (nz + 1) + iy * (nz + 1) + (iz + 1); // checked!
int rid4 = (ix + nx - npml) * ny * (nz + 1) + iy * (nz + 1) + (iz + 1); // checked!
int lid3 = lid4 + ny * (nz + 1); // checked!
int rid3 = rid4 + ny * (nz + 1); // checked!
UExz[lid0] = UExz[lid0] * RBExz[lid1] + RAExz[lid2] * (Hz[lid3] - Hz[lid4]) / dx;
UExz[rid0] = UExz[rid0] * RBExz[rid1] + RAExz[rid2] * (Hz[rid3] - Hz[rid4]) / dx;
}
dim3 gridUEzx(nx - 1, ny);
dim3 blockUEzx(npml - 1);
__global__ void gpu_UEzx(float *UEzx, float *RBEzx, float *RAEzx, float *Hx)
{
/*
dim3 blockUEzx(npml - 1);
dim3 gridUEzx(nx - 1, ny);
in0 UEzx nx + 1 ny nz + 1
in1 RBEzx nx - 1 ny 2*(npml-1)
in2 RAEzx nx - 1 ny 2*(npml-1)
in3 Hx nx + 1 ny nz
运算块大小 nx-1 * ny * npml-1
UEzx(2:nx, :, [2:npml nz-npml+2:nz])=RBEzx .* UEzx(2:nx, :, [2:npml nz-npml+2:nz])...
+RAEzx ./ dz .* (Hx(2:nx, :, [2:npml nz-npml+2:nz]) - Hx(2:nx, :, [1:npml-1 nz-npml+1:nz-1]));
*/
int ix = blockIdx.x; // ix in [0, nx)
int iy = blockIdx.y; // iy in [0, npml - 1)
int iz = threadIdx.x; // iz in [0, nz)
int lid0 = (ix + 1) * ny * (nz + 1) + iy * (nz + 1) + (iz + 1); // checked!
int rid0 = (ix + 1) * ny * (nz + 1) + iy * (nz + 1) + (iz + 1 + nz - npml); //checked!
int lid1 = ix * ny * (2 * (npml - 1)) + iy * (2 * (npml - 1)) + iz; // checked!
int rid1 = ix * ny * (2 * (npml - 1)) + iy * (2 * (npml - 1)) + (iz + npml - 1); // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = (ix + 1) * ny * nz + iy * nz + iz; // checked!
int rid4 = (ix + 1) * ny * nz + iy * nz + (iz + nz - npml); // checked!
int lid3 = lid4 + 1; // checked!
int rid3 = rid4 + 1; // checked!
UEzx[lid0] = UEzx[lid0] * RBEzx[lid1] + RAEzx[lid2] * (Hx[lid3] - Hx[lid4]) / dz;
UEzx[rid0] = UEzx[rid0] * RBEzx[rid1] + RAEzx[rid2] * (Hx[rid3] - Hx[rid4]) / dz;
}
dim3 gridUEzy(nx, ny - 1);
dim3 blockUEzy(npml - 1);
__global__ void gpu_UEzy(float *UEzy, float *RBEzy, float *RAEzy, float *Hy)
{
/*
dim3 blockUEzy(npml - 1);
dim3 gridUEzy(nx, ny - 1);
in0 UEzy nx ny + 1 nz + 1
in1 RBEzy nx ny - 1 2*(npml-1)
in2 RAEzy nx ny - 1 2*(npml-1)
in3 Hy nx ny + 1 nz
运算块大小 nx * ny - 1 * npml-1
UEzy(:, 2:ny, [2:npml nz-npml+2:nz])=RBEzy.*UEzy(:, 2:ny, [2:npml nz-npml+2:nz])...
+RAEzy./dz.*(Hy(:, 2:ny, [2:npml nz-npml+2:nz])-Hy(:, 2:ny, [1:npml-1 nz-npml+1:nz-1]));
*/
int ix = blockIdx.x; // ix in [0, nx)
int iy = blockIdx.y; // iy in [0, npml - 1)
int iz = threadIdx.x; // iz in [0, nz)
int lid0 = ix * (ny + 1) * (nz + 1) + (iy + 1) * (nz + 1) + (iz + 1); // checked!
int rid0 = ix * (ny + 1) * (nz + 1) + (iy + 1) * (nz + 1) + (iz + 1 + nz - npml); //checked!
int lid1 = ix * (ny - 1) * (2 * (npml - 1)) + iy * (2 * (npml - 1)) + iz; // checked!
int rid1 = ix * (ny - 1) * (2 * (npml - 1)) + iy * (2 * (npml - 1)) + (iz + npml - 1); // checked!
int lid2 = lid1; // checked!
int rid2 = rid1; // checked!
int lid4 = ix * (ny + 1) * nz + (iy + 1) * nz + iz; // checked!
int rid4 = ix * (ny + 1) * nz + (iy + 1) * nz + (iz + nz - npml); // checked!
int lid3 = lid4 + 1; // checked!
int rid3 = rid4 + 1; // checked!
UEzy[lid0] = UEzy[lid0] * RBEzy[lid1] + RAEzy[lid2] * (Hy[lid3] - Hy[lid4]) / dz;
UEzy[rid0] = UEzy[rid0] * RBEzy[rid1] + RAEzy[rid2] * (Hy[rid3] - Hy[rid4]) / dz;
}
dim3 gridEx(nx, ny - 1);
dim3 blockEx(nz - 1);
__global__ void gpu_Ex(float *Ex, float *CAEx, float *CBEx, float *ky_Ex, float *kz_Ex, float *Hz, float *Hy, float *UEyz, float *UEzy)
{
//
// * dim3 blockEx(nz-1);
// * dim3 gridEx(nx, ny-1);
// * 运算块大小 nx * ny-1 * nz-1
// * Ex(:, 2:ny, 2:nz)
//
int ix = blockIdx.x; // ix in [0, nx)
int iy = blockIdx.y + 1; // iy in [1, ny)
int iz = threadIdx.x + 1; // iz in [1, nz)
int idx = ix * (ny + 1) * (nz + 1) + iy * (nz + 1) + iz;
int idxHz = ix * ny * (nz + 1) + iy * (nz + 1) + iz;
int idxHy = ix * (ny + 1)*nz + iy * nz + iz;
int deltaHz = nz + 1;
int deltaHy = 1;
float CBE = CBEx[idx];
Ex[idx] = Ex[idx] * CAEx[idx]
+ CBE / ky_Ex[idx] * (Hz[idxHz] - Hz[idxHz - deltaHz]) / dy
- CBE / kz_Ex[idx] * (Hy[idxHy] - Hy[idxHy - deltaHy]) / dz
+ CBE * UEyz[idx]
- CBE * UEzy[idx];
}
dim3 gridEy(nx - 1, ny);
dim3 blockEy(nz - 1);
__global__ void gpu_Ey(float *Ey, float *CAEy, float *CBEy, float *kz_Ey, float *kx_Ey, float *Hx, float *Hz, float *UEzx, float *UExz)
{
//
// * dim3 blockEy(nz-1);
// * dim3 gridEy(nx-1, ny);
// * 运算块大小 nx-1 * ny * nz-1
// * Ey(2:nx, :, 2:nz)
//
int ix = blockIdx.x + 1; // ix in [1, nx)
int iy = blockIdx.y; // iy in [0, ny)
int iz = threadIdx.x + 1; // iz in [1, nz)
int idx = ix * ny * (nz + 1) + iy * (nz + 1) + iz;
int idxHx = ix * ny * nz + iy * nz + iz;
int idxHz = ix * ny * (nz + 1) + iy * (nz + 1) + iz;
int deltaHx = 1;
int deltaHz = ny * (nz + 1);
float CBE = CBEy[idx];
Ey[idx] = Ey[idx] * CAEy[idx]
+ CBE / kz_Ey[idx] * (Hx[idxHx] - Hx[idxHx - deltaHx]) / dz
- CBE / kx_Ey[idx] * (Hz[idxHz] - Hz[idxHz - deltaHz]) / dx
+ CBE * UEzx[idx]
- CBE * UExz[idx];
}
dim3 gridEz(nx - 1, ny - 1);
dim3 blockEz(nz);
__global__ void gpu_Ez(float *Ez, float *CAEz, float *CBEz, float *kx_Ez, float *ky_Ez, float *Hy, float *Hx, float *UExy, float *UEyx)
{
//
// * dim3 blockEz(nz);
// * dim3 gridEz(nx-1, ny-1);
// * 运算块大小 nx-1 * ny-1 * nz
// * Ez(2:nx, 2:ny, :)
// * Ez大小为nx ny nz+1
//
int ix = blockIdx.x + 1; // ix in [1, nx)
int iy = blockIdx.y + 1; // iy in [1, ny)
int iz = threadIdx.x; // iz in [0, nz)
int idx = ix * (ny + 1) * nz + iy * nz + iz;
int idxHy = ix * (ny + 1) * nz + iy * nz + iz;
int idxHx = ix * ny * nz + iy * nz + iz;
int deltaHy = (ny + 1) * nz;
int deltaHx = nz;
float CBE = CBEz[idx];
Ez[idx] = Ez[idx] * CAEz[idx]
+ CBE / kx_Ez[idx] * (Hy[idxHy] - Hy[idxHy - deltaHy]) / dx
- CBE / ky_Ez[idx] * (Hx[idxHx] - Hx[idxHx - deltaHx]) / dy
+ CBE * UExy[idx]
- CBE * UEyx[idx];
}
dim3 grid_zheng_1(npmlc, ny - 2 * npml);
dim3 grid_zheng_2(nx - 2 * npml, npmlc);
dim3 grid_zheng_3(nx - 2 * npml, ny - 2 * npml);
dim3 grid_zheng_last(nx - 2 * npml, ny - 2 * npml);
dim3 block_zheng_1(nz - 2 * npml);
dim3 block_zheng_2(nz - 2 * npml);
dim3 block_zheng_3(npmlc);
dim3 block_zheng_last(nz - 2 * npml);
__global__ void gpu_zheng_1(
float *dev_Ex_zheng, float *dev_Ey_zheng, float *dev_Ez_zheng,
float *dev_Hx_zheng, float *dev_Hy_zheng, float *dev_Hz_zheng,
float *dev_Ex, float *dev_Ey, float *dev_Ez,
float *dev_Hx, float *dev_Hy, float *dev_Hz,
int j)
{
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x;
int lidzheng; //**_zheng_* 前半部分的位置
int ridzheng; //**_zheng_* 后半部分的位置
int lidEx, lidEy, lidEz, lidHx, lidHy, lidHz;
int ridEx, ridEy, ridEz, ridHx, ridHy, ridHz;
lidzheng =
j * (2 * npmlc) * (ny - 2 * npml) * (nz - 2 * npml) +
ix * (ny - 2 * npml) * (nz - 2 * npml) +
iy * (nz - 2 * npml) +
iz;
lidEx =
(ix + npml) * (ny + 1) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEy =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEz =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHx =
(ix + npml) * (ny + 0) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHy =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHz =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
ridzheng = lidzheng + (ny - 2 * npml) * (nz - 2 * npml) * (npmlc);
ridEx = lidEx + (ny + 1) * (nz + 1) * (nx - 2 * npml - npmlc);
ridEy = lidEy + (ny + 0) * (nz + 1) * (nx - 2 * npml - npmlc);
ridEz = lidEz + (ny + 1) * (nz + 0) * (nx - 2 * npml - npmlc);
ridHx = lidHx + (ny + 0) * (nz + 0) * (nx - 2 * npml - npmlc);
ridHy = lidHy + (ny + 1) * (nz + 0) * (nx - 2 * npml - npmlc);
ridHz = lidHz + (ny + 0) * (nz + 1) * (nx - 2 * npml - npmlc);
dev_Ex_zheng[lidzheng] = dev_Ex[lidEx];
dev_Ey_zheng[lidzheng] = dev_Ey[lidEy];
dev_Ez_zheng[lidzheng] = dev_Ez[lidEz];
dev_Hx_zheng[lidzheng] = dev_Hx[lidHx];
dev_Hy_zheng[lidzheng] = dev_Hy[lidHy];
dev_Hz_zheng[lidzheng] = dev_Hz[lidHz];
dev_Ex_zheng[ridzheng] = dev_Ex[ridEx];
dev_Ey_zheng[ridzheng] = dev_Ey[ridEy];
dev_Ez_zheng[ridzheng] = dev_Ez[ridEz];
dev_Hx_zheng[ridzheng] = dev_Hx[ridHx];
dev_Hy_zheng[ridzheng] = dev_Hy[ridHy];
dev_Hz_zheng[ridzheng] = dev_Hz[ridHz];
}
__global__ void gpu_zheng_2(
float *dev_Ex_zheng, float *dev_Ey_zheng, float *dev_Ez_zheng,
float *dev_Hx_zheng, float *dev_Hy_zheng, float *dev_Hz_zheng,
float *dev_Ex, float *dev_Ey, float *dev_Ez,
float *dev_Hx, float *dev_Hy, float *dev_Hz,
int j)
{
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x;
int lidzheng; //**_zheng_* 前半部分的位置
int ridzheng; //**_zheng_* 后半部分的位置
int lidEx, lidEy, lidEz, lidHx, lidHy, lidHz;
int ridEx, ridEy, ridEz, ridHx, ridHy, ridHz;
lidzheng =
j * (nx - 2 * npml) * (2 * npmlc) * (nz - 2 * npml) +
ix * (2 * npmlc) * (nz - 2 * npml) +
iy * (nz - 2 * npml) +
iz;
lidEx =
(ix + npml) * (ny + 1) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEy =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEz =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHx =
(ix + npml) * (ny + 0) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHy =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHz =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
ridzheng = lidzheng + (nz - 2 * npml) * (npmlc);
ridEx = lidEx + (nz + 1) * (ny - 2 * npml - npmlc);
ridEy = lidEy + (nz + 1) * (ny - 2 * npml - npmlc);
ridEz = lidEz + (nz + 0) * (ny - 2 * npml - npmlc);
ridHx = lidHx + (nz + 0) * (ny - 2 * npml - npmlc);
ridHy = lidHy + (nz + 0) * (ny - 2 * npml - npmlc);
ridHz = lidHz + (nz + 1) * (ny - 2 * npml - npmlc);
dev_Ex_zheng[lidzheng] = dev_Ex[lidEx];
dev_Ey_zheng[lidzheng] = dev_Ey[lidEy];
dev_Ez_zheng[lidzheng] = dev_Ez[lidEz];
dev_Hx_zheng[lidzheng] = dev_Hx[lidHx];
dev_Hy_zheng[lidzheng] = dev_Hy[lidHy];
dev_Hz_zheng[lidzheng] = dev_Hz[lidHz];
dev_Ex_zheng[ridzheng] = dev_Ex[ridEx];
dev_Ey_zheng[ridzheng] = dev_Ey[ridEy];
dev_Ez_zheng[ridzheng] = dev_Ez[ridEz];
dev_Hx_zheng[ridzheng] = dev_Hx[ridHx];
dev_Hy_zheng[ridzheng] = dev_Hy[ridHy];
dev_Hz_zheng[ridzheng] = dev_Hz[ridHz];
}
__global__ void gpu_zheng_3(
float *dev_Ex_zheng, float *dev_Ey_zheng, float *dev_Ez_zheng,
float *dev_Hx_zheng, float *dev_Hy_zheng, float *dev_Hz_zheng,
float *dev_Ex, float *dev_Ey, float *dev_Ez,
float *dev_Hx, float *dev_Hy, float *dev_Hz,
int j)
{
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x;
int lidzheng; //**_zheng_* 前半部分的位置
int ridzheng; //**_zheng_* 后半部分的位置
int lidEx, lidEy, lidEz, lidHx, lidHy, lidHz;
int ridEx, ridEy, ridEz, ridHx, ridHy, ridHz;
lidzheng =
j * (nx - 2 * npml) * (ny - 2 * npml) * (2 * npmlc) +
ix * (ny - 2 * npml) * (2 * npmlc) +
iy * (2 * npmlc) +
iz;
lidEx =
(ix + npml) * (ny + 1) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEy =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEz =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHx =
(ix + npml) * (ny + 0) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHy =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHz =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
ridzheng = lidzheng + (npmlc);
ridEx = lidEx + (nz - 2 * npml - npmlc);
ridEy = lidEy + (nz - 2 * npml - npmlc);
ridEz = lidEz + (nz - 2 * npml - npmlc);
ridHx = lidHx + (nz - 2 * npml - npmlc);
ridHy = lidHy + (nz - 2 * npml - npmlc);
ridHz = lidHz + (nz - 2 * npml - npmlc);
dev_Ex_zheng[lidzheng] = dev_Ex[lidEx];
dev_Ey_zheng[lidzheng] = dev_Ey[lidEy];
dev_Ez_zheng[lidzheng] = dev_Ez[lidEz];
dev_Hx_zheng[lidzheng] = dev_Hx[lidHx];
dev_Hy_zheng[lidzheng] = dev_Hy[lidHy];
dev_Hz_zheng[lidzheng] = dev_Hz[lidHz];
dev_Ex_zheng[ridzheng] = dev_Ex[ridEx];
dev_Ey_zheng[ridzheng] = dev_Ey[ridEy];
dev_Ez_zheng[ridzheng] = dev_Ez[ridEz];
dev_Hx_zheng[ridzheng] = dev_Hx[ridHx];
dev_Hy_zheng[ridzheng] = dev_Hy[ridHy];
dev_Hz_zheng[ridzheng] = dev_Hz[ridHz];
}
__global__ void gpu_zheng_last(
float *dev_Ex_zheng, float *dev_Ey_zheng, float *dev_Ez_zheng,
float *dev_Hx_zheng, float *dev_Hy_zheng, float *dev_Hz_zheng,
float *dev_Ex, float *dev_Ey, float *dev_Ez,
float *dev_Hx, float *dev_Hy, float *dev_Hz)
{
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x;
int lidzheng; //**_zheng_* 前半部分的位置
int lidEx, lidEy, lidEz, lidHx, lidHy, lidHz;
lidzheng =
ix * (ny - 2 * npml) * (nz - 2 * npml) +
iy * (nz - 2 * npml) +
iz;
lidEx =
(ix + npml) * (ny + 1) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEy =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEz =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHx =
(ix + npml) * (ny + 0) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHy =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHz =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
dev_Ex_zheng[lidzheng] = dev_Ex[lidEx];
dev_Ey_zheng[lidzheng] = dev_Ey[lidEy];
dev_Ez_zheng[lidzheng] = dev_Ez[lidEz];
dev_Hx_zheng[lidzheng] = dev_Hx[lidHx];
dev_Hy_zheng[lidzheng] = dev_Hy[lidHy];
dev_Hz_zheng[lidzheng] = dev_Hz[lidHz];
}
__global__ void gpu_back_zheng_1(
float *dev_Ex_zheng, float *dev_Ey_zheng, float *dev_Ez_zheng,
float *dev_Hx_zheng, float *dev_Hy_zheng, float *dev_Hz_zheng,
float *dev_Ex, float *dev_Ey, float *dev_Ez,
float *dev_Hx, float *dev_Hy, float *dev_Hz,
int j)
{
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x;
int lidzheng; //**_zheng_* 前半部分的位置
int ridzheng; //**_zheng_* 后半部分的位置
int lidEx, lidEy, lidEz, lidHx, lidHy, lidHz;
int ridEx, ridEy, ridEz, ridHx, ridHy, ridHz;
lidzheng =
j * (2 * npmlc) * (ny - 2 * npml) * (nz - 2 * npml) +
ix * (ny - 2 * npml) * (nz - 2 * npml) +
iy * (nz - 2 * npml) +
iz;
lidEx =
(ix + npml) * (ny + 1) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEy =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEz =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHx =
(ix + npml) * (ny + 0) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHy =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHz =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
ridzheng = lidzheng + (ny - 2 * npml) * (nz - 2 * npml) * (npmlc);
ridEx = lidEx + (ny + 1) * (nz + 1) * (nx - 2 * npml - npmlc);
ridEy = lidEy + (ny + 0) * (nz + 1) * (nx - 2 * npml - npmlc);
ridEz = lidEz + (ny + 1) * (nz + 0) * (nx - 2 * npml - npmlc);
ridHx = lidHx + (ny + 0) * (nz + 0) * (nx - 2 * npml - npmlc);
ridHy = lidHy + (ny + 1) * (nz + 0) * (nx - 2 * npml - npmlc);
ridHz = lidHz + (ny + 0) * (nz + 1) * (nx - 2 * npml - npmlc);
dev_Ex[lidEx] = dev_Ex_zheng[lidzheng];
dev_Ey[lidEy] = dev_Ey_zheng[lidzheng];
dev_Ez[lidEz] = dev_Ez_zheng[lidzheng];
dev_Hx[lidHx] = dev_Hx_zheng[lidzheng];
dev_Hy[lidHy] = dev_Hy_zheng[lidzheng];
dev_Hz[lidHz] = dev_Hz_zheng[lidzheng];
dev_Ex[ridEx] = dev_Ex_zheng[ridzheng];
dev_Ey[ridEy] = dev_Ey_zheng[ridzheng];
dev_Ez[ridEz] = dev_Ez_zheng[ridzheng];
dev_Hx[ridHx] = dev_Hx_zheng[ridzheng];
dev_Hy[ridHy] = dev_Hy_zheng[ridzheng];
dev_Hz[ridHz] = dev_Hz_zheng[ridzheng];
}
__global__ void gpu_back_zheng_2(
float *dev_Ex_zheng, float *dev_Ey_zheng, float *dev_Ez_zheng,
float *dev_Hx_zheng, float *dev_Hy_zheng, float *dev_Hz_zheng,
float *dev_Ex, float *dev_Ey, float *dev_Ez,
float *dev_Hx, float *dev_Hy, float *dev_Hz,
int j)
{
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x;
int lidzheng; //**_zheng_* 前半部分的位置
int ridzheng; //**_zheng_* 后半部分的位置
int lidEx, lidEy, lidEz, lidHx, lidHy, lidHz;
int ridEx, ridEy, ridEz, ridHx, ridHy, ridHz;
lidzheng =
j * (nx - 2 * npml) * (2 * npmlc) * (nz - 2 * npml) +
ix * (2 * npmlc) * (nz - 2 * npml) +
iy * (nz - 2 * npml) +
iz;
lidEx =
(ix + npml) * (ny + 1) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEy =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEz =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHx =
(ix + npml) * (ny + 0) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHy =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHz =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
ridzheng = lidzheng + (nz - 2 * npml) * (npmlc);
ridEx = lidEx + (nz + 1) * (ny - 2 * npml - npmlc);
ridEy = lidEy + (nz + 1) * (ny - 2 * npml - npmlc);
ridEz = lidEz + (nz + 0) * (ny - 2 * npml - npmlc);
ridHx = lidHx + (nz + 0) * (ny - 2 * npml - npmlc);
ridHy = lidHy + (nz + 0) * (ny - 2 * npml - npmlc);
ridHz = lidHz + (nz + 1) * (ny - 2 * npml - npmlc);
dev_Ex[lidEx] = dev_Ex_zheng[lidzheng];
dev_Ey[lidEy] = dev_Ey_zheng[lidzheng];
dev_Ez[lidEz] = dev_Ez_zheng[lidzheng];
dev_Hx[lidHx] = dev_Hx_zheng[lidzheng];
dev_Hy[lidHy] = dev_Hy_zheng[lidzheng];
dev_Hz[lidHz] = dev_Hz_zheng[lidzheng];
dev_Ex[ridEx] = dev_Ex_zheng[ridzheng];
dev_Ey[ridEy] = dev_Ey_zheng[ridzheng];
dev_Ez[ridEz] = dev_Ez_zheng[ridzheng];
dev_Hx[ridHx] = dev_Hx_zheng[ridzheng];
dev_Hy[ridHy] = dev_Hy_zheng[ridzheng];
dev_Hz[ridHz] = dev_Hz_zheng[ridzheng];
}
__global__ void gpu_back_zheng_3(
float *dev_Ex_zheng, float *dev_Ey_zheng, float *dev_Ez_zheng,
float *dev_Hx_zheng, float *dev_Hy_zheng, float *dev_Hz_zheng,
float *dev_Ex, float *dev_Ey, float *dev_Ez,
float *dev_Hx, float *dev_Hy, float *dev_Hz,
int j)
{
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x;
int lidzheng; //**_zheng_* 前半部分的位置
int ridzheng; //**_zheng_* 后半部分的位置
int lidEx, lidEy, lidEz, lidHx, lidHy, lidHz;
int ridEx, ridEy, ridEz, ridHx, ridHy, ridHz;
lidzheng =
j * (nx - 2 * npml) * (ny - 2 * npml) * (2 * npmlc) +
ix * (ny - 2 * npml) * (2 * npmlc) +
iy * (2 * npmlc) +
iz;
lidEx =
(ix + npml) * (ny + 1) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEy =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEz =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHx =
(ix + npml) * (ny + 0) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHy =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHz =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
ridzheng = lidzheng + (npmlc);
ridEx = lidEx + (nz - 2 * npml - npmlc);
ridEy = lidEy + (nz - 2 * npml - npmlc);
ridEz = lidEz + (nz - 2 * npml - npmlc);
ridHx = lidHx + (nz - 2 * npml - npmlc);
ridHy = lidHy + (nz - 2 * npml - npmlc);
ridHz = lidHz + (nz - 2 * npml - npmlc);
dev_Ex[lidEx] = dev_Ex_zheng[lidzheng];
dev_Ey[lidEy] = dev_Ey_zheng[lidzheng];
dev_Ez[lidEz] = dev_Ez_zheng[lidzheng];
dev_Hx[lidHx] = dev_Hx_zheng[lidzheng];
dev_Hy[lidHy] = dev_Hy_zheng[lidzheng];
dev_Hz[lidHz] = dev_Hz_zheng[lidzheng];
dev_Ex[ridEx] = dev_Ex_zheng[ridzheng];
dev_Ey[ridEy] = dev_Ey_zheng[ridzheng];
dev_Ez[ridEz] = dev_Ez_zheng[ridzheng];
dev_Hx[ridHx] = dev_Hx_zheng[ridzheng];
dev_Hy[ridHy] = dev_Hy_zheng[ridzheng];
dev_Hz[ridHz] = dev_Hz_zheng[ridzheng];
}
__global__ void gpu_back_zheng_last(
float *dev_Ex_zheng, float *dev_Ey_zheng, float *dev_Ez_zheng,
float *dev_Hx_zheng, float *dev_Hy_zheng, float *dev_Hz_zheng,
float *dev_Ex, float *dev_Ey, float *dev_Ez,
float *dev_Hx, float *dev_Hy, float *dev_Hz)
{
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x;
int lidzheng; //**_zheng_* 前半部分的位置
int lidEx, lidEy, lidEz, lidHx, lidHy, lidHz;
lidzheng =
ix * (ny - 2 * npml) * (nz - 2 * npml) +
iy * (nz - 2 * npml) +
iz;
lidEx =
(ix + npml) * (ny + 1) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEy =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
lidEz =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHx =
(ix + npml) * (ny + 0) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHy =
(ix + npml) * (ny + 1) * (nz + 0) +
(iy + npml) * (nz + 0) +
(iz + npml);
lidHz =
(ix + npml) * (ny + 0) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
dev_Ex[lidEx] = dev_Ex_zheng[lidzheng];
dev_Ey[lidEy] = dev_Ey_zheng[lidzheng];
dev_Ez[lidEz] = dev_Ez_zheng[lidzheng];
dev_Hx[lidHx] = dev_Hx_zheng[lidzheng];
dev_Hy[lidHy] = dev_Hy_zheng[lidzheng];
dev_Hz[lidHz] = dev_Hz_zheng[lidzheng];
}
dim3 grid_fan_huanyuan(nx - 2 * npml, ny - 2 * npml);
dim3 block_fan_huanyuan(nz - 2 * npml);
__global__ void gpu_fan_huanyuan(float *dev_dst, float *dev_Ex)
{
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x;
int lidfan, lidEx; //**_zheng_* 前半部分的位置
lidfan =
ix * (ny - 2 * npml) * (nz - 2 * npml) +
iy * (nz - 2 * npml) +
iz;
lidEx =
(ix + npml) * (ny + 1) * (nz + 1) +
(iy + npml) * (nz + 1) +
(iz + npml);
dev_dst[lidfan] = dev_Ex[lidEx];
}
dim3 grid_HE1(nx - np - np, ny - np - np);
dim3 block_HE1(nz - np - np);
__global__ void gpu_H1(
float *dev_Hx1, float *dev_Hy1, float *dev_Hz1,
float *dev_Ex1, float *dev_Ey1, float *dev_Ez1,
float *dev_CPHx, float *dev_CPHy, float *dev_CPHz,
float *dev_CQHx, float *dev_CQHy, float *dev_CQHz)
{
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x;
int idxHx1 = (ix + np)*(ny + 0)*(nz + 0) + (iy + np)*(nz + 0) + (iz + np);
int idxHy1 = (ix + np)*(ny + 1)*(nz + 0) + (iy + np)*(nz + 0) + (iz + np);
int idxHz1 = (ix + np)*(ny + 0)*(nz + 1) + (iy + np)*(nz + 1) + (iz + np);
int idxEx1 = (ix + np)*(ny + 1)*(nz + 1) + (iy + np)*(nz + 1) + (iz + np);
int idxEy1 = (ix + np)*(ny + 0)*(nz + 1) + (iy + np)*(nz + 1) + (iz + np);
int idxEz1 = (ix + np)*(ny + 1)*(nz + 0) + (iy + np)*(nz + 0) + (iz + np);
int delEz1_Hx1 = nz;
int delEy1_Hx1 = 1;
int delEx1_Hy1 = 1;
int delEz1_Hy1 = (ny + 1) * nz;
int delEy1_Hz1 = ny * (nz + 1);
int delEx1_Hz1 = nz + 1;
const float rfCPHx = 1 / dev_CPHx[idxHx1];// 倒数reciprocal of fCPHx
const float fCQHx = dev_CQHx[idxHx1];
dev_Hx1[idxHx1] = rfCPHx * dev_Hx1[idxHx1]
+ rfCPHx * fCQHx / dy * (dev_Ez1[idxEz1 + delEz1_Hx1] - dev_Ez1[idxEz1])
- rfCPHx * fCQHx / dz * (dev_Ey1[idxEy1 + delEy1_Hx1] - dev_Ey1[idxEy1]);
const float rfCPHy = 1 / dev_CPHy[idxHy1];// 倒数reciprocal of fCPHy
const float fCQHy = dev_CQHy[idxHy1];
dev_Hy1[idxHy1] = rfCPHy * dev_Hy1[idxHy1]
+ rfCPHy * fCQHy / dz * (dev_Ex1[idxEx1 + delEx1_Hy1] - dev_Ex1[idxEx1])
- rfCPHy * fCQHy / dx * (dev_Ez1[idxEz1 + delEz1_Hy1] - dev_Ez1[idxEz1]);
const float rfCPHz = 1 / dev_CPHz[idxHz1];// 倒数reciprocal of fCPHz
const float fCQHz = dev_CQHz[idxHz1];
dev_Hz1[idxHz1] = rfCPHz * dev_Hz1[idxHz1]
+ rfCPHz * fCQHz / dx * (dev_Ey1[idxEy1 + delEy1_Hz1] - dev_Ey1[idxEy1])
- rfCPHz * fCQHz / dy * (dev_Ex1[idxEx1 + delEx1_Hz1] - dev_Ex1[idxEx1]);
}
__global__ void gpu_E1(
float *dev_Hx1, float *dev_Hy1, float *dev_Hz1,
float *dev_Ex1, float *dev_Ey1, float *dev_Ez1,
float *dev_CAEx, float *dev_CAEy, float *dev_CAEz,
float *dev_CBEx, float *dev_CBEy, float *dev_CBEz)
{
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = threadIdx.x;
int idxHx1 = (ix + np)*(ny + 0)*(nz + 0) + (iy + np)*(nz + 0) + (iz + np);
int idxHy1 = (ix + np)*(ny + 1)*(nz + 0) + (iy + np)*(nz + 0) + (iz + np);
int idxHz1 = (ix + np)*(ny + 0)*(nz + 1) + (iy + np)*(nz + 1) + (iz + np);
int idxEx1 = (ix + np)*(ny + 1)*(nz + 1) + (iy + np)*(nz + 1) + (iz + np);
int idxEy1 = (ix + np)*(ny + 0)*(nz + 1) + (iy + np)*(nz + 1) + (iz + np);
int idxEz1 = (ix + np)*(ny + 1)*(nz + 0) + (iy + np)*(nz + 0) + (iz + np);
int delHz1_Ex1 = nz + 1;
int delHy1_Ex1 = 1;
int delHx1_Ey1 = 1;
int delHz1_Ey1 = ny * (nz + 1);
int delHy1_Ez1 = (ny + 1) * nz;
int delHx1_Ez1 = nz;
const float rfCAEx = 1 / dev_CAEx[idxEx1];// 倒数reciprocal of fCAEx
const float fCBEx = dev_CBEx[idxEx1];
dev_Ex1[idxEx1] = rfCAEx * dev_Ex1[idxEx1]
+ rfCAEx * fCBEx / dy * (dev_Hz1[idxHz1] - dev_Hz1[idxHz1 - delHz1_Ex1])
- rfCAEx * fCBEx / dz * (dev_Hy1[idxHy1] - dev_Hy1[idxHy1 - delHy1_Ex1]);
const float rfCAEy = 1 / dev_CAEy[idxEy1];// 倒数reciprocal of fCAEy
const float fCBEy = dev_CBEy[idxEy1];
dev_Ey1[idxEy1] = rfCAEy * dev_Ey1[idxEy1]
+ rfCAEy * fCBEy / dz * (dev_Hx1[idxHx1] - dev_Hx1[idxHx1 - delHx1_Ey1])
- rfCAEy * fCBEy / dx * (dev_Hz1[idxHz1] - dev_Hz1[idxHz1 - delHz1_Ey1]);
const float rfCAEz = 1 / dev_CAEz[idxEz1];// 倒数reciprocal of fCAEz
const float fCBEz = dev_CBEz[idxEz1];
dev_Ez1[idxEz1] = rfCAEz * dev_Ez1[idxEz1]
+ rfCAEz * fCBEz / dx * (dev_Hy1[idxHy1] - dev_Hy1[idxHy1 - delHy1_Ez1])
- rfCAEz * fCBEz / dy * (dev_Hx1[idxHx1] - dev_Hx1[idxHx1 - delHx1_Ez1]);
}
dim3 grid_nzf(nx - 2 * npml, ny - 2 * npml);
dim3 block_nzf(nz - 2 * npml);
__global__ void gpu_nzf(float *dev_dst, float *dev_src1, float *dev_src2)
{
int idx =
blockIdx.x * (ny - 2 * npml) * (nz - 2 * npml) +
blockIdx.y * (nz - 2 * npml) +
threadIdx.x;
dev_dst[idx] += dev_src1[idx] * dev_src2[idx];
//printf("nzf:%f\n",dev_dst[idx]);
}
void read_int(const char *name, int *a, int n1, int n2, int n3)
{
FILE *fp = fopen(name, "r");
if (fp == NULL) // 判断文件读入是否正确
{
printf("fopen %s error! \n", name);
return;
}
printf("fopen %s ok! \n", name);
for (int i = 0; i < n1; i++)
{
for (int k = 0; k < n3; k++)
{
for (int j = 0; j < n2; j++)
{
fscanf(fp, "%d", &a[i * n2*n3 + j * n3 + k]); // 读入a[i][j][k]
}
}
}
printf("read %s OK\n", name);
fclose(fp);
return;
}
void read_float(const char *name, float *a, int n1, int n2, int n3)
{
FILE *fp = fopen(name, "r");
if (fp == NULL) // 判断文件读入是否正确
{
printf("fopen %s error! \n", name);
return;
}
printf("fopen %s ok! \n", name);
for (int i = 0; i < n1; i++)
{
for (int k = 0; k < n3; k++)
{
for (int j = 0; j < n2; j++)
{
fscanf(fp, "%f", a + i * n2*n3 + j * n3 + k); // 读入a[i][j][k]
//if(name == "data_pianyi/source.txt")
//{
// printf("source[%d] = %.6f\n",k,*(a + i * n2*n3 + j * n3 + k));
//}
}
}
}
printf("read %s OK\n", name);
fclose(fp);
return;
}
void print_nzf(const char *name, float *a, int n1, int n2, int n3)
{
FILE *fp = fopen(name, "w+");
if (fp == NULL) // 判断文件读入是否正确
{
printf("fopen %s error! \n", name);
return;
}
printf("fopen %s ok! \n", name);
for (int k = 0; k < n3; k++)
{
for (int j = 0; j < n2; j++)
{
for (int i = 0; i < n1; i++)
{
fprintf(fp, "%e ", *(a + i * n2*n3 + j * n3 + k)); // 输出a[i][j][k]
}
}
}
printf("print %s OK\n", name);
fclose(fp);
return;
}
void read_data_from_txt(bool pianYi)
{
if (pianYi)
{
read_float("data_pianyi/CAEx.txt", (float*)CAEx, nx, ny + 1, nz + 1);
read_float("data_pianyi/CBEx.txt", (float*)CBEx, nx, ny + 1, nz + 1);
read_float("data_pianyi/RAEyz.txt", (float*)RAEyz, nx, 2 * (npml - 1), nz - 1);
read_float("data_pianyi/RBEyz.txt", (float*)RBEyz, nx, 2 * (npml - 1), nz - 1);
read_float("data_pianyi/RAEzy.txt", (float*)RAEzy, nx, ny - 1, 2 * (npml - 1));
read_float("data_pianyi/RBEzy.txt", (float*)RBEzy, nx, ny - 1, 2 * (npml - 1));
read_float("data_pianyi/CAEy.txt", (float*)CAEy, nx + 1, ny, nz + 1);
read_float("data_pianyi/CBEy.txt", (float*)CBEy, nx + 1, ny, nz + 1);
read_float("data_pianyi/RAEzx.txt", (float*)RAEzx, nx - 1, ny, 2 * (npml - 1));
read_float("data_pianyi/RBEzx.txt", (float*)RBEzx, nx - 1, ny, 2 * (npml - 1));
read_float("data_pianyi/RAExz.txt", (float*)RAExz, 2 * (npml - 1), ny, nz - 1);
read_float("data_pianyi/RBExz.txt", (float*)RBExz, 2 * (npml - 1), ny, nz - 1);
read_float("data_pianyi/CAEz.txt", (float*)CAEz, nx + 1, ny + 1, nz);
read_float("data_pianyi/CBEz.txt", (float*)CBEz, nx + 1, ny + 1, nz);
read_float("data_pianyi/RAExy.txt", (float*)RAExy, 2 * (npml - 1), ny - 1, nz);
read_float("data_pianyi/RBExy.txt", (float*)RBExy, 2 * (npml - 1), ny - 1, nz);
read_float("data_pianyi/RAEyx.txt", (float*)RAEyx, nx - 1, 2 * (npml - 1), nz);
read_float("data_pianyi/RBEyx.txt", (float*)RBEyx, nx - 1, 2 * (npml - 1), nz);
read_float("data_pianyi/CPHx.txt", (float*)CPHx, nx + 1, ny, nz);
read_float("data_pianyi/CQHx.txt", (float*)CQHx, nx + 1, ny, nz);
read_float("data_pianyi/RAHyz.txt", (float*)RAHyz, nx - 1, 2 * npml, nz);
read_float("data_pianyi/RBHyz.txt", (float*)RBHyz, nx - 1, 2 * npml, nz);
read_float("data_pianyi/RAHzy.txt", (float*)RAHzy, nx - 1, ny, 2 * npml);
read_float("data_pianyi/RBHzy.txt", (float*)RBHzy, nx - 1, ny, 2 * npml);
read_float("data_pianyi/CPHy.txt", (float*)CPHy, nx, ny + 1, nz);
read_float("data_pianyi/CQHy.txt", (float*)CQHy, nx, ny + 1, nz);
read_float("data_pianyi/RAHzx.txt", (float*)RAHzx, nx, ny - 1, 2 * npml);
read_float("data_pianyi/RBHzx.txt", (float*)RBHzx, nx, ny - 1, 2 * npml);
read_float("data_pianyi/RAHxz.txt", (float*)RAHxz, 2 * npml, ny - 1, nz);
read_float("data_pianyi/RBHxz.txt", (float*)RBHxz, 2 * npml, ny - 1, nz);
read_float("data_pianyi/CPHz.txt", (float*)CPHz, nx, ny, nz + 1);
read_float("data_pianyi/CQHz.txt", (float*)CQHz, nx, ny, nz + 1);
read_float("data_pianyi/RAHxy.txt", (float*)RAHxy, 2 * npml, ny, nz - 1);
read_float("data_pianyi/RBHxy.txt", (float*)RBHxy, 2 * npml, ny, nz - 1);
read_float("data_pianyi/RAHyx.txt", (float*)RAHyx, nx, 2 * npml, nz - 1);
read_float("data_pianyi/RBHyx.txt", (float*)RBHyx, nx, 2 * npml, nz - 1);
read_float("data_pianyi/kx_Ey.txt", (float*)kx_Ey, nx + 1, ny, nz + 1);
read_float("data_pianyi/kx_Ez.txt", (float*)kx_Ez, nx + 1, ny + 1, nz);
read_float("data_pianyi/ky_Ex.txt", (float*)ky_Ex, nx, ny + 1, nz + 1);
read_float("data_pianyi/ky_Ez.txt", (float*)ky_Ez, nx + 1, ny + 1, nz);
read_float("data_pianyi/kz_Ex.txt", (float*)kz_Ex, nx, ny + 1, nz + 1);
read_float("data_pianyi/kz_Ey.txt", (float*)kz_Ey, nx + 1, ny, nz + 1);
read_float("data_pianyi/kx_Hy.txt", (float*)kx_Hy, nx, ny + 1, nz);
read_float("data_pianyi/kx_Hz.txt", (float*)kx_Hz, nx, ny, nz + 1);
read_float("data_pianyi/ky_Hx.txt", (float*)ky_Hx, nx + 1, ny, nz);
read_float("data_pianyi/ky_Hz.txt", (float*)ky_Hz, nx, ny, nz + 1);
read_float("data_pianyi/kz_Hx.txt", (float*)kz_Hx, nx + 1, ny, nz);
read_float("data_pianyi/kz_Hy.txt", (float*)kz_Hy, nx, ny + 1, nz);
read_int("data_pianyi/fswzx.txt", (int*)fswzx, 1, 1, szfsw);
read_int("data_pianyi/fswzy.txt", (int*)fswzy, 1, 1, szfsw);
read_int("data_pianyi/fswzz.txt", (int*)fswzz, 1, 1, szfsw);
read_int("data_pianyi/jswzx.txt", (int*)jswzx, 1, 1, szfsw);
read_int("data_pianyi/jswzy.txt", (int*)jswzy, 1, 1, szfsw);
read_int("data_pianyi/jswzz.txt", (int*)jswzz, 1, 1, szfsw);
read_float("data_pianyi/source.txt", (float*)source, 1, 1, it);
read_float("data_pianyi/E_obs.txt", (float*)E_obs, 1, it, szfsw);
}
else
{
read_float("data_zhengyan/CAEx.txt", (float*)CAEx, nx, ny + 1, nz + 1);
read_float("data_zhengyan/CBEx.txt", (float*)CBEx, nx, ny + 1, nz + 1);
read_float("data_zhengyan/RAEyz.txt", (float*)RAEyz, nx, 2 * (npml - 1), nz - 1);
read_float("data_zhengyan/RBEyz.txt", (float*)RBEyz, nx, 2 * (npml - 1), nz - 1);
read_float("data_zhengyan/RAEzy.txt", (float*)RAEzy, nx, ny - 1, 2 * (npml - 1));
read_float("data_zhengyan/RBEzy.txt", (float*)RBEzy, nx, ny - 1, 2 * (npml - 1));
read_float("data_zhengyan/CAEy.txt", (float*)CAEy, nx + 1, ny, nz + 1);
read_float("data_zhengyan/CBEy.txt", (float*)CBEy, nx + 1, ny, nz + 1);
read_float("data_zhengyan/RAEzx.txt", (float*)RAEzx, nx - 1, ny, 2 * (npml - 1));
read_float("data_zhengyan/RBEzx.txt", (float*)RBEzx, nx - 1, ny, 2 * (npml - 1));
read_float("data_zhengyan/RAExz.txt", (float*)RAExz, 2 * (npml - 1), ny, nz - 1);
read_float("data_zhengyan/RBExz.txt", (float*)RBExz, 2 * (npml - 1), ny, nz - 1);
read_float("data_zhengyan/CAEz.txt", (float*)CAEz, nx + 1, ny + 1, nz);
read_float("data_zhengyan/CBEz.txt", (float*)CBEz, nx + 1, ny + 1, nz);
read_float("data_zhengyan/RAExy.txt", (float*)RAExy, 2 * (npml - 1), ny - 1, nz);
read_float("data_zhengyan/RBExy.txt", (float*)RBExy, 2 * (npml - 1), ny - 1, nz);
read_float("data_zhengyan/RAEyx.txt", (float*)RAEyx, nx - 1, 2 * (npml - 1), nz);
read_float("data_zhengyan/RBEyx.txt", (float*)RBEyx, nx - 1, 2 * (npml - 1), nz);
read_float("data_zhengyan/CPHx.txt", (float*)CPHx, nx + 1, ny, nz);
read_float("data_zhengyan/CQHx.txt", (float*)CQHx, nx + 1, ny, nz);
read_float("data_zhengyan/RAHyz.txt", (float*)RAHyz, nx - 1, 2 * npml, nz);
read_float("data_zhengyan/RBHyz.txt", (float*)RBHyz, nx - 1, 2 * npml, nz);
read_float("data_zhengyan/RAHzy.txt", (float*)RAHzy, nx - 1, ny, 2 * npml);
read_float("data_zhengyan/RBHzy.txt", (float*)RBHzy, nx - 1, ny, 2 * npml);
read_float("data_zhengyan/CPHy.txt", (float*)CPHy, nx, ny + 1, nz);
read_float("data_zhengyan/CQHy.txt", (float*)CQHy, nx, ny + 1, nz);
read_float("data_zhengyan/RAHzx.txt", (float*)RAHzx, nx, ny - 1, 2 * npml);
read_float("data_zhengyan/RBHzx.txt", (float*)RBHzx, nx, ny - 1, 2 * npml);
read_float("data_zhengyan/RAHxz.txt", (float*)RAHxz, 2 * npml, ny - 1, nz);
read_float("data_zhengyan/RBHxz.txt", (float*)RBHxz, 2 * npml, ny - 1, nz);
read_float("data_zhengyan/CPHz.txt", (float*)CPHz, nx, ny, nz + 1);
read_float("data_zhengyan/CQHz.txt", (float*)CQHz, nx, ny, nz + 1);
read_float("data_zhengyan/RAHxy.txt", (float*)RAHxy, 2 * npml, ny, nz - 1);
read_float("data_zhengyan/RBHxy.txt", (float*)RBHxy, 2 * npml, ny, nz - 1);
read_float("data_zhengyan/RAHyx.txt", (float*)RAHyx, nx, 2 * npml, nz - 1);
read_float("data_zhengyan/RBHyx.txt", (float*)RBHyx, nx, 2 * npml, nz - 1);
read_float("data_zhengyan/kx_Ey.txt", (float*)kx_Ey, nx + 1, ny, nz + 1);
read_float("data_zhengyan/kx_Ez.txt", (float*)kx_Ez, nx + 1, ny + 1, nz);
read_float("data_zhengyan/ky_Ex.txt", (float*)ky_Ex, nx, ny + 1, nz + 1);
read_float("data_zhengyan/ky_Ez.txt", (float*)ky_Ez, nx + 1, ny + 1, nz);
read_float("data_zhengyan/kz_Ex.txt", (float*)kz_Ex, nx, ny + 1, nz + 1);
read_float("data_zhengyan/kz_Ey.txt", (float*)kz_Ey, nx + 1, ny, nz + 1);
read_float("data_zhengyan/kx_Hy.txt", (float*)kx_Hy, nx, ny + 1, nz);
read_float("data_zhengyan/kx_Hz.txt", (float*)kx_Hz, nx, ny, nz + 1);
read_float("data_zhengyan/ky_Hx.txt", (float*)ky_Hx, nx + 1, ny, nz);
read_float("data_zhengyan/ky_Hz.txt", (float*)ky_Hz, nx, ny, nz + 1);
read_float("data_zhengyan/kz_Hx.txt", (float*)kz_Hx, nx + 1, ny, nz);
read_float("data_zhengyan/kz_Hy.txt", (float*)kz_Hy, nx, ny + 1, nz);
read_int("data_zhengyan/fswzx.txt", (int*)fswzx, 1, 1, szfsw);
read_int("data_zhengyan/fswzy.txt", (int*)fswzy, 1, 1, szfsw);
read_int("data_zhengyan/fswzz.txt", (int*)fswzz, 1, 1, szfsw);
read_int("data_zhengyan/jswzx.txt", (int*)jswzx, 1, 1, szfsw);
read_int("data_zhengyan/jswzy.txt", (int*)jswzy, 1, 1, szfsw);
read_int("data_zhengyan/jswzz.txt", (int*)jswzz, 1, 1, szfsw);
read_float("data_zhengyan/source.txt", (float*)source, 1, 1, it);
}
}
void print_E_obs()
{
const char *name = "output/E_obs.txt";
FILE *fp = fopen(name, "w+");
if (fp == NULL) // 判断文件读入是否正确
{
printf("fopen %s error! \n", name);
}
printf("print fopen %s ok! \n", name);
fprintf(fp, "输出E_obs[%d][%d]\n", it, szfsw);
fprintf(fp, "共有 %d 行 %d 列 \n", szfsw, it);
for (int i = 0; i < szfsw; i++)
{
for (int j = 0; j < it; j++)
{
fprintf(fp, "%8f ", E_obs[j][i]);
}
fprintf(fp, "\n");
}
printf("print %s OK\n", name);
fclose(fp);
const char *name2 = "data_pianyi/E_obs.txt";
fp = fopen(name2, "w+");
if (fp == NULL) // 判断文件读入是否正确
{
printf("fopen %s error! \n", name2);
}
printf("print fopen %s ok! \n", name2);
for (int i = 0; i < szfsw; i++)
{
for (int j = 0; j < it; j++)
{
fprintf(fp, "%8f ", E_obs[j][i]);
}
fprintf(fp, "\n");
}
printf("print %s OK\n", name2);
fclose(fp);
return;
}
void gpu_memory_malloc(bool pianYi)
{
cudaError_t cudaStatus = cudaSuccess;
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!"); }
//原来内存中存在的数组,数组大小用内存数组大小就行
cudaStatus = cudaMalloc((void**)&dev_CAEx, sizeof(CAEx));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_CBEx, sizeof(CBEx));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_RAEyz, sizeof(RAEyz));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_RBEyz, sizeof(RBEyz));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_RAEzy, sizeof(RAEzy));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_RBEzy, sizeof(RBEzy));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_CAEy, sizeof(CAEy));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_CBEy, sizeof(CBEy));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_RAExz, sizeof(RAExz));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_RBExz, sizeof(RBExz));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_RAEzx, sizeof(RAEzx));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_RBEzx, sizeof(RBEzx));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_CAEz, sizeof(CAEz));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_CBEz, sizeof(CBEz));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_RAExy, sizeof(RAExy));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_RBExy, sizeof(RBExy));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_RAEyx, sizeof(RAEyx));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_RBEyx, sizeof(RBEyx));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_CPHx, sizeof(CPHx));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_CQHx, sizeof(CQHx));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_RAHyz, sizeof(RAHyz));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_RBHyz, sizeof(RBHyz));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_RAHzy, sizeof(RAHzy));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_RBHzy, sizeof(RBHzy));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_CPHy, sizeof(CPHy));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_CQHy, sizeof(CQHy));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_RAHxz, sizeof(RAHxz));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_RBHxz, sizeof(RBHxz));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_RAHzx, sizeof(RAHzx));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_RBHzx, sizeof(RBHzx));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_CPHz, sizeof(CPHz));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_CQHz, sizeof(CQHz));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_RAHxy, sizeof(RAHxy));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_RBHxy, sizeof(RBHxy));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_RAHyx, sizeof(RAHyx));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_RBHyx, sizeof(RBHyx));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_kx_Ey, sizeof(kx_Ey));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_kx_Ez, sizeof(kx_Ez));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_ky_Ex, sizeof(ky_Ex));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_ky_Ez, sizeof(ky_Ez));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_kz_Ex, sizeof(kz_Ex));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_kz_Ey, sizeof(kz_Ey));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_kx_Hy, sizeof(kx_Hy));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_kx_Hz, sizeof(kx_Hz));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_ky_Hx, sizeof(ky_Hx));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_ky_Hz, sizeof(ky_Hz));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_kz_Hx, sizeof(kz_Hx));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_kz_Hy, sizeof(kz_Hy));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
//gpu显存新创建数组,原来内存中不存在
int szEx = nx * (ny + 1)*(nz + 1);
int szEy = (nx + 1)*ny*(nz + 1);
int szEz = (nx + 1)*(ny + 1)*nz;
int szHx = (nx + 1)*ny*nz;
int szHy = nx * (ny + 1)*nz;
int szHz = nx * ny*(nz + 1);
cudaStatus = cudaMalloc((void**)&dev_Ex, szEx * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_UEyz, szEx * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_UEzy, szEx * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_Ey, szEy * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_UEzx, szEy * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_UExz, szEy * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_Ez, szEz * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_UExy, szEz * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_UEyx, szEz * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_Hx, szHx * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_UHyz, szHx * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_UHzy, szHx * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_Hy, szHy * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_UHzx, szHy * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_UHxz, szHy * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_Hz, szHz * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_UHxy, szHz * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_UHyx, szHz * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_V, sizeof(V));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_E_obs, sizeof(E_obs));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_source, sizeof(source));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
// 第二部分并行需要用到的变量
if(pianYi)
{
cudaStatus = cudaMalloc((void**)&dev_fan, sizeof(fan));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_huanyuan, sizeof(huanyuan));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_ns, sizeof(ns));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_zv, sizeof(zv));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_fv, sizeof(fv));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_Ex1, sizeof(Ex1));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_Ey1, sizeof(Ey1));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_Ez1, sizeof(Ez1));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_Hx1, sizeof(Hx1));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_Hy1, sizeof(Hy1));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
cudaStatus = cudaMalloc((void**)&dev_Hz1, sizeof(Hz1));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!");}
// 超大数组
cudaStatus = cudaMalloc((void**)&dev_Ex_zheng_1, (it)*(2 * npmlc)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc Super Big Array failed!");}
cudaStatus = cudaMalloc((void**)&dev_Ex_zheng_2, (it)*(nx - 2 * npml)*(2 * npmlc)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc Super Big Array failed!");}
cudaStatus = cudaMalloc((void**)&dev_Ex_zheng_3, (it)*(nx - 2 * npml)*(ny - 2 * npml)*(2 * npmlc) * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc Super Big Array failed!");}
cudaStatus = cudaMalloc((void**)&dev_Ey_zheng_1, (it)*(2 * npmlc)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc Super Big Array failed!");}
cudaStatus = cudaMalloc((void**)&dev_Ey_zheng_2, (it)*(nx - 2 * npml)*(2 * npmlc)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc Super Big Array failed!");}
cudaStatus = cudaMalloc((void**)&dev_Ey_zheng_3, (it)*(nx - 2 * npml)*(ny - 2 * npml)*(2 * npmlc) * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc Super Big Array failed!");}
cudaStatus = cudaMalloc((void**)&dev_Ez_zheng_1, (it)*(2 * npmlc)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc Super Big Array failed!");}
cudaStatus = cudaMalloc((void**)&dev_Ez_zheng_2, (it)*(nx - 2 * npml)*(2 * npmlc)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc Super Big Array failed!");}
cudaStatus = cudaMalloc((void**)&dev_Ez_zheng_3, (it)*(nx - 2 * npml)*(ny - 2 * npml)*(2 * npmlc) * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc Super Big Array failed!");}
cudaStatus = cudaMalloc((void**)&dev_Hx_zheng_1, (it)*(2 * npmlc)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc Super Big Array failed!");}
cudaStatus = cudaMalloc((void**)&dev_Hx_zheng_2, (it)*(nx - 2 * npml)*(2 * npmlc)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc Super Big Array failed!");}
cudaStatus = cudaMalloc((void**)&dev_Hx_zheng_3, (it)*(nx - 2 * npml)*(ny - 2 * npml)*(2 * npmlc) * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc Super Big Array failed!");}
cudaStatus = cudaMalloc((void**)&dev_Hy_zheng_1, (it)*(2 * npmlc)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc Super Big Array failed!");}
cudaStatus = cudaMalloc((void**)&dev_Hy_zheng_2, (it)*(nx - 2 * npml)*(2 * npmlc)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc Super Big Array failed!");}
cudaStatus = cudaMalloc((void**)&dev_Hy_zheng_3, (it)*(nx - 2 * npml)*(ny - 2 * npml)*(2 * npmlc) * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc Super Big Array failed!");}
cudaStatus = cudaMalloc((void**)&dev_Hz_zheng_1, (it)*(2 * npmlc)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc Super Big Array failed!");}
cudaStatus = cudaMalloc((void**)&dev_Hz_zheng_2, (it)*(nx - 2 * npml)*(2 * npmlc)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc Super Big Array failed!");}
cudaStatus = cudaMalloc((void**)&dev_Hz_zheng_3, (it)*(nx - 2 * npml)*(ny - 2 * npml)*(2 * npmlc) * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc Super Big Array failed!");}
cudaStatus = cudaMalloc((void**)&dev_Ex_zheng_last, (nx - 2 * npml)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc Super Big Array failed!");}
cudaStatus = cudaMalloc((void**)&dev_Ey_zheng_last, (nx - 2 * npml)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc Super Big Array failed!");}
cudaStatus = cudaMalloc((void**)&dev_Ez_zheng_last, (nx - 2 * npml)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc Super Big Array failed!");}
cudaStatus = cudaMalloc((void**)&dev_Hx_zheng_last, (nx - 2 * npml)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc Super Big Array failed!");}
cudaStatus = cudaMalloc((void**)&dev_Hy_zheng_last, (nx - 2 * npml)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc Super Big Array failed!");}
cudaStatus = cudaMalloc((void**)&dev_Hz_zheng_last, (nx - 2 * npml)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc Super Big Array failed!");}
}
}
// flag == 0 将GPU显存中的E*, UE**, H*, UH**, (V, E_obs)置零
// flag == 1 将GPU显存中的E*, UE**, H*, UH**, (V, E*_zheng_*, H*_zheng_*, E*_zheng_last, H*_zheng_last, fan, huanyuan)置零
// flag == 2 将GPU显存中的E*, UE**, H*, UH**, (V, E*1, H*1)置零
void gpu_memory_set_zero(int flag)
{
int szEx = nx * (ny + 1)*(nz + 1);
int szEy = (nx + 1)*ny*(nz + 1);
int szEz = (nx + 1)*(ny + 1)*nz;
int szHx = (nx + 1)*ny*nz;
int szHy = nx * (ny + 1)*nz;
int szHz = nx * ny*(nz + 1);
cudaMemset(dev_Ex, 0, szEx * sizeof(float));
cudaMemset(dev_UEyz, 0, szEx * sizeof(float));
cudaMemset(dev_UEzy, 0, szEx * sizeof(float));
cudaMemset(dev_Ey, 0, szEy * sizeof(float));
cudaMemset(dev_UEzx, 0, szEy * sizeof(float));
cudaMemset(dev_UExz, 0, szEy * sizeof(float));
cudaMemset(dev_Ez, 0, szEz * sizeof(float));
cudaMemset(dev_UExy, 0, szEz * sizeof(float));
cudaMemset(dev_UEyx, 0, szEz * sizeof(float));
cudaMemset(dev_Hx, 0, szHx * sizeof(float));
cudaMemset(dev_UHyz, 0, szHx * sizeof(float));
cudaMemset(dev_UHzy, 0, szHx * sizeof(float));
cudaMemset(dev_Hy, 0, szHy * sizeof(float));
cudaMemset(dev_UHzx, 0, szHy * sizeof(float));
cudaMemset(dev_UHxz, 0, szHy * sizeof(float));
cudaMemset(dev_Hz, 0, szHz * sizeof(float));
cudaMemset(dev_UHxy, 0, szHz * sizeof(float));
cudaMemset(dev_UHyx, 0, szHz * sizeof(float));
if (flag == 0)
{
cudaMemset(dev_V, 0, sizeof(V));
cudaMemset(dev_E_obs, 0, sizeof(E_obs));
}
else if (flag == 1)
{
cudaMemset(dev_V, 0, sizeof(V));
cudaMemset(dev_Ex_zheng_1, 0, (it)*(2 * npmlc)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
cudaMemset(dev_Ex_zheng_2, 0, (it)*(nx - 2 * npml)*(2 * npmlc)*(nz - 2 * npml) * sizeof(float));
cudaMemset(dev_Ex_zheng_3, 0, (it)*(nx - 2 * npml)*(ny - 2 * npml)*(2 * npmlc) * sizeof(float));
cudaMemset(dev_Ey_zheng_1, 0, (it)*(2 * npmlc)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
cudaMemset(dev_Ey_zheng_2, 0, (it)*(nx - 2 * npml)*(2 * npmlc)*(nz - 2 * npml) * sizeof(float));
cudaMemset(dev_Ey_zheng_3, 0, (it)*(nx - 2 * npml)*(ny - 2 * npml)*(2 * npmlc) * sizeof(float));
cudaMemset(dev_Ez_zheng_1, 0, (it)*(2 * npmlc)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
cudaMemset(dev_Ez_zheng_2, 0, (it)*(nx - 2 * npml)*(2 * npmlc)*(nz - 2 * npml) * sizeof(float));
cudaMemset(dev_Ez_zheng_3, 0, (it)*(nx - 2 * npml)*(ny - 2 * npml)*(2 * npmlc) * sizeof(float));
cudaMemset(dev_Hx_zheng_1, 0, (it)*(2 * npmlc)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
cudaMemset(dev_Hx_zheng_2, 0, (it)*(nx - 2 * npml)*(2 * npmlc)*(nz - 2 * npml) * sizeof(float));
cudaMemset(dev_Hx_zheng_3, 0, (it)*(nx - 2 * npml)*(ny - 2 * npml)*(2 * npmlc) * sizeof(float));
cudaMemset(dev_Hy_zheng_1, 0, (it)*(2 * npmlc)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
cudaMemset(dev_Hy_zheng_2, 0, (it)*(nx - 2 * npml)*(2 * npmlc)*(nz - 2 * npml) * sizeof(float));
cudaMemset(dev_Hy_zheng_3, 0, (it)*(nx - 2 * npml)*(ny - 2 * npml)*(2 * npmlc) * sizeof(float));
cudaMemset(dev_Hz_zheng_1, 0, (it)*(2 * npmlc)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float));
cudaMemset(dev_Hz_zheng_2, 0, (it)*(nx - 2 * npml)*(2 * npmlc)*(nz - 2 * npml) * sizeof(float));
cudaMemset(dev_Hz_zheng_3, 0, (it)*(nx - 2 * npml)*(ny - 2 * npml)*(2 * npmlc) * sizeof(float));
size_t sz_last = (nx - 2 * npml)*(ny - 2 * npml)*(nz - 2 * npml) * sizeof(float);
cudaMemset(dev_Ex_zheng_last, 0, sz_last);
cudaMemset(dev_Ey_zheng_last, 0, sz_last);
cudaMemset(dev_Ez_zheng_last, 0, sz_last);
cudaMemset(dev_Hx_zheng_last, 0, sz_last);
cudaMemset(dev_Hy_zheng_last, 0, sz_last);
cudaMemset(dev_Hz_zheng_last, 0, sz_last);
cudaMemset(dev_fan, 0, sz_last);
cudaMemset(dev_huanyuan, 0, sz_last);
}
else if (flag == 2)
{
cudaMemset(dev_Ex1, 0, sizeof(Ex1));
cudaMemset(dev_Ey1, 0, sizeof(Ey1));
cudaMemset(dev_Ez1, 0, sizeof(Ez1));
cudaMemset(dev_Hx1, 0, sizeof(Hx1));
cudaMemset(dev_Hy1, 0, sizeof(Hy1));
cudaMemset(dev_Hz1, 0, sizeof(Hz1));
}
}
// 将内存中的变量复制到显存中
// flag == 0 CAE CBE RAE RBE CPH CQH RAH RBH k*_E* k*_H* source
// flag == 1 CAE CBE RAE RBE CPH CQH RAH RBH k*_E* k*_H* source
void gpu_memory_copy(bool pianYi)
{
cudaError_t cudaStatus;
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_CAEx, CAEx, sizeof(CAEx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_CBEx, CBEx, sizeof(CBEx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_RAEyz, RAEyz, sizeof(RAEyz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_RBEyz, RBEyz, sizeof(RBEyz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_RAEzy, RAEzy, sizeof(RAEzy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_RBEzy, RBEzy, sizeof(RBEzy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_CAEy, CAEy, sizeof(CAEy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_CBEy, CBEy, sizeof(CBEy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_RAExz, RAExz, sizeof(RAExz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_RBExz, RBExz, sizeof(RBExz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_RAEzx, RAEzx, sizeof(RAEzx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_RBEzx, RBEzx, sizeof(RBEzx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_CAEz, CAEz, sizeof(CAEz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_CBEz, CBEz, sizeof(CBEz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_RAExy, RAExy, sizeof(RAExy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_RBExy, RBExy, sizeof(RBExy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_RAEyx, RAEyx, sizeof(RAEyx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_RBEyx, RBEyx, sizeof(RBEyx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_CPHx, CPHx, sizeof(CPHx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_CQHx, CQHx, sizeof(CQHx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_RAHyz, RAHyz, sizeof(RAHyz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_RBHyz, RBHyz, sizeof(RBHyz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_RAHzy, RAHzy, sizeof(RAHzy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_RBHzy, RBHzy, sizeof(RBHzy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_CPHy, CPHy, sizeof(CPHy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_CQHy, CQHy, sizeof(CQHy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_RAHxz, RAHxz, sizeof(RAHxz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_RBHxz, RBHxz, sizeof(RBHxz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_RAHzx, RAHzx, sizeof(RAHzx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_RBHzx, RBHzx, sizeof(RBHzx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_CPHz, CPHz, sizeof(CPHz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_CQHz, CQHz, sizeof(CQHz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_RAHxy, RAHxy, sizeof(RAHxy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_RBHxy, RBHxy, sizeof(RBHxy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_RAHyx, RAHyx, sizeof(RAHyx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_RBHyx, RBHyx, sizeof(RBHyx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_kx_Ey, kx_Ey, sizeof(kx_Ey), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_kx_Ez, kx_Ez, sizeof(kx_Ez), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_ky_Ex, ky_Ex, sizeof(ky_Ex), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_ky_Ez, ky_Ez, sizeof(ky_Ez), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_kz_Ex, kz_Ex, sizeof(kz_Ex), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_kz_Ey, kz_Ey, sizeof(kz_Ey), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_kx_Hy, kx_Hy, sizeof(kx_Hy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_kx_Hz, kx_Hz, sizeof(kx_Hz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_ky_Hx, ky_Hx, sizeof(ky_Hx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_ky_Hz, ky_Hz, sizeof(ky_Hz), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_kz_Hx, kz_Hx, sizeof(kz_Hx), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_kz_Hy, kz_Hy, sizeof(kz_Hy), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
cudaStatus = cudaMemcpy(dev_source, source, sizeof(source), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!");}
if (pianYi)
{
cudaStatus = cudaMemcpy(dev_E_obs, E_obs, sizeof(E_obs), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); }
}
}
// 释放显存空间
void gpu_memory_free(bool pianYi)
{
cudaFree(dev_Ex);
cudaFree(dev_Ey);
cudaFree(dev_Ez);
cudaFree(dev_UEyz);
cudaFree(dev_UEzy);
cudaFree(dev_UExz);
cudaFree(dev_UEzx);
cudaFree(dev_UExy);
cudaFree(dev_UEyx);
cudaFree(dev_Hx);
cudaFree(dev_Hy);
cudaFree(dev_Hz);
cudaFree(dev_UHyz);
cudaFree(dev_UHzy);
cudaFree(dev_UHxz);
cudaFree(dev_UHzx);
cudaFree(dev_UHxy);
cudaFree(dev_UHyx);
cudaFree(dev_CAEx);
cudaFree(dev_CAEy);
cudaFree(dev_CAEz);
cudaFree(dev_CBEx);
cudaFree(dev_CBEy);
cudaFree(dev_CBEz);
cudaFree(dev_RAEyz);
cudaFree(dev_RAEzy);
cudaFree(dev_RAEzx);
cudaFree(dev_RAExz);
cudaFree(dev_RAExy);
cudaFree(dev_RAEyx);
cudaFree(dev_RBEyz);
cudaFree(dev_RBEzy);
cudaFree(dev_RBEzx);
cudaFree(dev_RBExz);
cudaFree(dev_RBExy);
cudaFree(dev_RBEyx);
cudaFree(dev_CPHx);
cudaFree(dev_CQHx);
cudaFree(dev_CPHy);
cudaFree(dev_CQHy);
cudaFree(dev_CPHz);
cudaFree(dev_CQHz);
cudaFree(dev_RAHyz);
cudaFree(dev_RAHzy);
cudaFree(dev_RAHzx);
cudaFree(dev_RAHxz);
cudaFree(dev_RAHxy);
cudaFree(dev_RAHyx);
cudaFree(dev_RBHyz);
cudaFree(dev_RBHzy);
cudaFree(dev_RBHzx);
cudaFree(dev_RBHxz);
cudaFree(dev_RBHxy);
cudaFree(dev_RBHyx);
cudaFree(fswzx);
cudaFree(fswzy);
cudaFree(fswzz);
cudaFree(jswzx);
cudaFree(jswzy);
cudaFree(jswzz);
cudaFree(dev_E_obs);
cudaFree(dev_V);
cudaFree(dev_source);
cudaFree(dev_kx_Ey);
cudaFree(dev_kx_Ez);
cudaFree(dev_ky_Ex);
cudaFree(dev_ky_Ez);
cudaFree(dev_kz_Ex);
cudaFree(dev_kz_Ey);
cudaFree(dev_kx_Hy);
cudaFree(dev_kx_Hz);
cudaFree(dev_ky_Hx);
cudaFree(dev_ky_Hz);
cudaFree(dev_kz_Hx);
cudaFree(dev_kz_Hy);
if (pianYi)
{
cudaFree(dev_Ex_zheng_1);
cudaFree(dev_Ex_zheng_2);
cudaFree(dev_Ex_zheng_3);
cudaFree(dev_Ey_zheng_1);
cudaFree(dev_Ey_zheng_2);
cudaFree(dev_Ey_zheng_3);
cudaFree(dev_Ez_zheng_1);
cudaFree(dev_Ez_zheng_2);
cudaFree(dev_Ez_zheng_3);
cudaFree(dev_Hx_zheng_1);
cudaFree(dev_Hx_zheng_2);
cudaFree(dev_Hx_zheng_3);
cudaFree(dev_Hy_zheng_1);
cudaFree(dev_Hy_zheng_2);
cudaFree(dev_Hy_zheng_3);
cudaFree(dev_Hz_zheng_1);
cudaFree(dev_Hz_zheng_2);
cudaFree(dev_Hz_zheng_3);
cudaFree(dev_Ex_zheng_last);
cudaFree(dev_Ey_zheng_last);
cudaFree(dev_Ez_zheng_last);
cudaFree(dev_Hx_zheng_last);
cudaFree(dev_Hy_zheng_last);
cudaFree(dev_Hz_zheng_last);
cudaFree(dev_fan);
cudaFree(dev_huanyuan);
cudaFree(dev_ns);
cudaFree(dev_zv);
cudaFree(dev_fv);
}
}
// gpu并行计算UH H UE E
void zheng_yan()
{
cudaError_t cudaStatus = cudaSuccess;
gpu_UHyz << < gridUHyz, blockUHyz >> > (dev_UHyz, dev_RBHyz, dev_RAHyz, dev_Ez);
gpu_UHzy << < gridUHzy, blockUHzy >> > (dev_UHzy, dev_RBHzy, dev_RAHzy, dev_Ey);
gpu_UHxy << < gridUHxy, blockUHxy >> > (dev_UHxy, dev_RBHxy, dev_RAHxy, dev_Ey);
gpu_UHxz << < gridUHxz, blockUHxz >> > (dev_UHxz, dev_RBHxz, dev_RAHxz, dev_Ez);
gpu_UHyx << < gridUHyx, blockUHyx >> > (dev_UHyx, dev_RBHyx, dev_RAHyx, dev_Ex);
gpu_UHzx << < gridUHzx, blockUHzx >> > (dev_UHzx, dev_RBHzx, dev_RAHzx, dev_Ex);
gpu_Hx << < gridHx, blockHx >> > (dev_Hx, dev_CPHx, dev_CQHx, dev_ky_Hx, dev_kz_Hx, dev_Ez, dev_Ey, dev_UHyz, dev_UHzy);
gpu_Hy << < gridHy, blockHy >> > (dev_Hy, dev_CPHy, dev_CQHy, dev_kz_Hy, dev_kx_Hy, dev_Ex, dev_Ez, dev_UHzx, dev_UHxz);
gpu_Hz << < gridHz, blockHz >> > (dev_Hz, dev_CPHz, dev_CQHz, dev_kx_Hz, dev_ky_Hz, dev_Ey, dev_Ex, dev_UHxy, dev_UHyx);
gpu_UExy << < gridUExy, blockUExy >> > (dev_UExy, dev_RBExy, dev_RAExy, dev_Hy);
gpu_UExz << < gridUExz, blockUExz >> > (dev_UExz, dev_RBExz, dev_RAExz, dev_Hz);
gpu_UEyx << < gridUEyx, blockUEyx >> > (dev_UEyx, dev_RBEyx, dev_RAEyx, dev_Hx);
gpu_UEyz << < gridUEyz, blockUEyz >> > (dev_UEyz, dev_RBEyz, dev_RAEyz, dev_Hz);
gpu_UEzx << < gridUEzx, blockUEzx >> > (dev_UEzx, dev_RBEzx, dev_RAEzx, dev_Hx);
gpu_UEzy << < gridUEzy, blockUEzy >> > (dev_UEzy, dev_RBEzy, dev_RAEzy, dev_Hy);
gpu_Ex << < gridEx, blockEx >> > (dev_Ex, dev_CAEx, dev_CBEx, dev_ky_Ex, dev_kz_Ex, dev_Hz, dev_Hy, dev_UEyz, dev_UEzy);
gpu_Ey << < gridEy, blockEy >> > (dev_Ey, dev_CAEy, dev_CBEy, dev_kz_Ey, dev_kx_Ey, dev_Hx, dev_Hz, dev_UEzx, dev_UExz);
gpu_Ez << < gridEz, blockEz >> > (dev_Ez, dev_CAEz, dev_CBEz, dev_kx_Ez, dev_ky_Ez, dev_Hy, dev_Hx, dev_UExy, dev_UEyx);
cudaDeviceSynchronize();
// 计算过程是否出错?
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
printf("Zhengyan Calc Failed: %s\n", cudaGetErrorString(cudaStatus));
}
}
cudaError_t gpu_parallel_one()
{
cudaError_t cudaStatus = cudaSuccess;
int i, j;
for (i = 0; i < szfsw; i++)
{
gpu_memory_set_zero(0); // flag == 0 将GPU显存中的E*, UE**, H*, UH**, (V, E_obs)置零
for (j = 0; j < it; j++)
{
if (j % 50 == 0)
{
printf("i = %3d / %d, j = %4d / %d\n", i, szfsw, j, it);
}
// matlab: Ex(fswzx(i),fswzy(i),fswzz(i))=source(j); 显存到显存
int idxEx = (fswzx[i] - 1) * (ny + 1) * (nz + 1) + (fswzy[i] - 1) * (nz + 1) + (fswzz[i] - 1);
cudaStatus = cudaMemcpy(&(dev_Ex[idxEx]), &(dev_source[j]), sizeof(float), cudaMemcpyDeviceToDevice);
if (cudaStatus != cudaSuccess) { printf("source --> Ex cudaMemcpy failed: %s\n", cudaGetErrorString(cudaStatus)); return cudaStatus; };
cudaDeviceSynchronize();
// 调用GPU运算正演
zheng_yan();
// matlab: V(j)=Ex(jswzx(i), jswzy(i), jswzz(i)); 显存到显存
idxEx = (jswzx[i] - 1) * (ny + 1) * (nz + 1) + (jswzy[i] - 1) * (nz + 1) + (jswzz[i] - 1);
cudaStatus = cudaMemcpy(&(dev_V[j]), &(dev_Ex[idxEx]), sizeof(float), cudaMemcpyDeviceToDevice);
if (cudaStatus != cudaSuccess) { printf("Ex --> V cudaMemcpy failed: %s\n", cudaGetErrorString(cudaStatus)); return cudaStatus; };
cudaDeviceSynchronize();
// matlab: E_obs(j,i) = V(j) 显存到内存
cudaStatus = cudaMemcpy(&(E_obs[j][i]), &(dev_V[j]), sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) { printf("V --> E_obs cudaMemcpy failed: %s\n", cudaGetErrorString(cudaStatus)); return cudaStatus; };
cudaDeviceSynchronize();
}
}
cudaDeviceSynchronize();
printf("finish calc 1 !\n");
// 输出结果
print_E_obs();
return cudaStatus;
}
cudaError_t gpu_parallel_two()
{
cudaError_t cudaStatus = cudaSuccess;
//ns zv fv数组清零
cudaMemset(dev_ns, 0, sizeof(ns));
cudaMemset(dev_zv, 0, sizeof(zv));
cudaMemset(dev_fv, 0, sizeof(fv));
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
printf("ns&zv&fv cudaMemset Failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
int i, j;
for (i = 0; i < szfsw; i++)
{
// --------------------- part one ---------------------
// flag == 1 将GPU显存中的E*, UE**, H*, UH**, (V, E*_zheng_*, H*_zheng_*, E*_zheng_last, H*_zheng_last, fan, huanyuan)置零
gpu_memory_set_zero(1);
cudaDeviceSynchronize();
for (j = 0; j < it; j++)
{
// 输出i j到屏幕
if (j%50 == 0) { printf("i = %3d / %d, j = %4d / %d\n", i, szfsw, j, it); }
// matlab: Ex(fswzx(i), fswzy(i), fswzz(i)) = source(j);
int idxEx = (fswzx[i] - 1) * (ny + 1) * (nz + 1) + (fswzy[i] - 1) * (nz + 1) + (fswzz[i] - 1);
cudaStatus = cudaMemcpy(&(dev_Ex[idxEx]), &(dev_source[j]), sizeof(float), cudaMemcpyDeviceToDevice);
cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
{
printf("source --> Ex cudaMemcpy failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
// 调用GPU运算正演
zheng_yan();
gpu_zheng_1 << <grid_zheng_1, block_zheng_1 >> > (
dev_Ex_zheng_1, dev_Ey_zheng_1, dev_Ez_zheng_1,
dev_Hx_zheng_1, dev_Hy_zheng_1, dev_Hz_zheng_1,
dev_Ex, dev_Ey, dev_Ez,
dev_Hx, dev_Hy, dev_Hz,
j);
gpu_zheng_2 << <grid_zheng_2, block_zheng_2 >> > (
dev_Ex_zheng_2, dev_Ey_zheng_2, dev_Ez_zheng_2,
dev_Hx_zheng_2, dev_Hy_zheng_2, dev_Hz_zheng_2,
dev_Ex, dev_Ey, dev_Ez,
dev_Hx, dev_Hy, dev_Hz,
j);
gpu_zheng_3 << <grid_zheng_3, block_zheng_3 >> > (
dev_Ex_zheng_3, dev_Ey_zheng_3, dev_Ez_zheng_3,
dev_Hx_zheng_3, dev_Hy_zheng_3, dev_Hz_zheng_3,
dev_Ex, dev_Ey, dev_Ez,
dev_Hx, dev_Hy, dev_Hz,
j);
gpu_zheng_last << <grid_zheng_last, block_zheng_last >> > (
dev_Ex_zheng_last, dev_Ey_zheng_last, dev_Ez_zheng_last,
dev_Hx_zheng_last, dev_Hy_zheng_last, dev_Hz_zheng_last,
dev_Ex, dev_Ey, dev_Ez,
dev_Hx, dev_Hy, dev_Hz);
cudaDeviceSynchronize();
}
cudaStatus = cudaGetLastError();
printf("--------------------- part one --------------------- : %s\n", cudaGetErrorString(cudaStatus));
// --------------------- part two ---------------------
gpu_memory_set_zero(2);
for (j = it - 1; j >= 0; j--)
{
if (j%50==0) { printf("i = %3d / %d, j = %4d / %d\n", i, szfsw, j, it); }
//Ex(fswzx(i), fswzy(i), fswzz(i)) = E_obs(j, i);
int idxEx = (fswzx[i] - 1) * (ny + 1) * (nz + 1) + (fswzy[i] - 1) * (nz + 1) + (fswzz[i] - 1);
int idxE_obs = j * szfsw + i;
cudaStatus = cudaMemcpy(&(dev_Ex[idxEx]), &(dev_E_obs[idxE_obs]), sizeof(float), cudaMemcpyDeviceToDevice);
cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
{
printf("E_obs --> Ex cudaMemcpy failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
// 调用GPU运算正演
zheng_yan();
cudaDeviceSynchronize();
// matlab: fan=Ex(npml+1:nx-npml,npml+1:ny-npml,npml+1:nz-npml);
gpu_fan_huanyuan << <grid_fan_huanyuan, block_fan_huanyuan >> > (dev_fan, dev_Ex);
//printf("fan\n");
//print_dev_matrix(dev_fan, nx-2*npml, ny-2*npml, nz-2*npml);
cudaDeviceSynchronize();
//getchar();
if (j == it - 1)
{
gpu_back_zheng_last << <grid_zheng_last, block_zheng_last >> > (
dev_Ex_zheng_last, dev_Ey_zheng_last, dev_Ez_zheng_last,
dev_Hx_zheng_last, dev_Hy_zheng_last, dev_Hz_zheng_last,
dev_Ex1, dev_Ey1, dev_Ez1,
dev_Hx1, dev_Hy1, dev_Hz1);
cudaDeviceSynchronize();
}
else //j < it - 1
{
gpu_back_zheng_1 << <grid_zheng_1, block_zheng_1 >> > (
dev_Ex_zheng_1, dev_Ey_zheng_1, dev_Ez_zheng_1,
dev_Hx_zheng_1, dev_Hy_zheng_1, dev_Hz_zheng_1,
dev_Ex1, dev_Ey1, dev_Ez1,
dev_Hx1, dev_Hy1, dev_Hz1,
j);
gpu_back_zheng_2 << <grid_zheng_2, block_zheng_2 >> > (
dev_Ex_zheng_2, dev_Ey_zheng_2, dev_Ez_zheng_2,
dev_Hx_zheng_2, dev_Hy_zheng_2, dev_Hz_zheng_2,
dev_Ex1, dev_Ey1, dev_Ez1,
dev_Hx1, dev_Hy1, dev_Hz1,
j);
gpu_back_zheng_3 << <grid_zheng_3, block_zheng_3 >> > (
dev_Ex_zheng_3, dev_Ey_zheng_3, dev_Ez_zheng_3,
dev_Hx_zheng_3, dev_Hy_zheng_3, dev_Hz_zheng_3,
dev_Ex1, dev_Ey1, dev_Ez1,
dev_Hx1, dev_Hy1, dev_Hz1,
j);
cudaDeviceSynchronize();
//print_dev_Ex1();
//getchar();
// matlab: Ex1(fswzx(i), fswzy(i), fswzz(i)) = source(j);
int idxEx1 = (fswzx[i] - 1) * (ny + 1) * (nz + 1) + (fswzy[i] - 1) * (nz + 1) + (fswzz[i] - 1);
cudaStatus = cudaMemcpy(&(dev_Ex1[idxEx1]), &(dev_source[j]), sizeof(float), cudaMemcpyDeviceToDevice);
if (cudaStatus != cudaSuccess)
{
printf("source --> Ex1 cudaMemcpy failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
gpu_H1 << <grid_HE1, block_HE1 >> > (
dev_Hx1, dev_Hy1, dev_Hz1,
dev_Ex1, dev_Ey1, dev_Ez1,
dev_CPHx, dev_CPHy, dev_CPHz,
dev_CQHx, dev_CQHy, dev_CQHz);
gpu_E1 << <grid_HE1, block_HE1 >> > (
dev_Hx1, dev_Hy1, dev_Hz1,
dev_Ex1, dev_Ey1, dev_Ez1,
dev_CAEx, dev_CAEy, dev_CAEz,
dev_CBEx, dev_CBEy, dev_CBEz);
}
// matlab: huanyuan=Ex1(npml+1:nx-npml,npml+1:ny-npml,npml+1:nz-npml);
gpu_fan_huanyuan << <grid_fan_huanyuan, block_fan_huanyuan >> > (dev_huanyuan, dev_Ex1);
//printf("huanyuan\n");
//print_dev_matrix(dev_huanyuan, nx-2*npml, ny-2*npml, nz-2*npml);
//cudaDeviceSynchronize();
//getchar();
gpu_nzf << <grid_nzf, block_nzf >> > (dev_ns, dev_huanyuan, dev_fan);
gpu_nzf << <grid_nzf, block_nzf >> > (dev_zv, dev_huanyuan, dev_huanyuan);
gpu_nzf << <grid_nzf, block_nzf >> > (dev_fv, dev_fan, dev_fan);
cudaDeviceSynchronize();
}
cudaStatus = cudaGetLastError();
printf("--------------------- part two --------------------- : %s\n", cudaGetErrorString(cudaStatus));
}
// 将ns fv zv从显存传到内存里
cudaStatus = cudaMemcpy(ns, dev_ns, sizeof(ns), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
printf("dev_ns --> ns cudaMemcpy failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
cudaStatus = cudaMemcpy(fv, dev_fv, sizeof(fv), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
printf("dev_fv --> fv cudaMemcpy failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
cudaStatus = cudaMemcpy(zv, dev_zv, sizeof(zv), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
printf("dev_zv --> ns cudaMemcpy failed: %s\n", cudaGetErrorString(cudaStatus));
return cudaStatus;
}
printf("finish calc 2!\n");
print_nzf("nzf/ns.txt", (float*)ns, nx - 2 * npml, ny - 2 * npml, nz - 2 * npml);
print_nzf("nzf/fv.txt", (float*)fv, nx - 2 * npml, ny - 2 * npml, nz - 2 * npml);
print_nzf("nzf/zv.txt", (float*)zv, nx - 2 * npml, ny - 2 * npml, nz - 2 * npml);
return cudaStatus;
}
void gpu_parallel(bool pianYi)
{
cudaError_t cudaStatus;
if(pianYi)
{
cudaStatus = gpu_parallel_two();
if (cudaStatus != cudaSuccess) { printf("gpu_parallel_two failed!"); }
else { printf("gpu_parallel_two success!\n"); }
}
else
{
cudaStatus = gpu_parallel_one();
if (cudaStatus != cudaSuccess) { printf("gpu_parallel_one failed!"); }
else { printf("gpu_parallel_one success!\n"); }
}
}
/************************************************************************************
* 主函数
************************************************************************************/
int main()
{
// 切换工作目录
chdir(path); //linux
//_chdir(path);
char str[80];
printf("Current Dir: %s \n",getcwd(str, 80)); //linux
//printf("Current Dir: %s \n", _getcwd(str, 80));
// 从matlab输出的文本文件中读取数据
read_data_from_txt(isPianYi);
printf("Read Data From Txt OK ! \n");
// 选择运算使用的GPU
cudaError_t cudaStatus = cudaSetDevice(cudaDevice);
if (cudaStatus != cudaSuccess) { printf("cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); return 1; }
else { printf("cudaSetDevice success!\n"); }
// 分配显存,把数据从内存传输到显存
gpu_memory_malloc(isPianYi);
gpu_memory_copy(isPianYi);
// 调用gpu运算并输出到文件
gpu_parallel(isPianYi);
// 释放显存空间
gpu_memory_free(isPianYi);
// 重置GPU
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) { printf("cudaDeviceReset failed!"); return 1; }
// 释放内存空间
freeMemory();
return 0;
} |
f63985fa622720821b13d413f04415821db9d1fb.hip | // !!! This is a file automatically generated by hipify!!!
/* Code completed by Nerea Alamar & Antonio Marco Rodrigo*/
//Librerias de Thrust
#include <thrust/extrema.h>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
//Librerias de Cuda
#include <hip/hip_runtime.h>
#include <iostream>
#include "device_launch_parameters.h"
//Para evitar el subrayado rojo en Visual por intellisense en __syncthreads y en atomicAdd
#pragma once
#ifdef __INTELLISENSE__
void __syncthreads();
void atomicAdd(unsigned int*, unsigned int);
#endif
//Tamao de bloque optimo para imagenes de resolucin 2x1 (ej: waterfall_bg)
#define BLOCK_SIZE 128;
//Cambiar este valor si se quieren usar los kernels o las funciones thrust:
// 1-> Thrust
// 0-> Kernels
#define THRUST 0
//Usamos checkCudaErrors como en la prctica anterior para detectar fallos de cuda
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != hipSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << hipGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
//////////////////////////////////////////////////////////////////////////
// KERNELS //
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
// SCAN KERNEL //
//////////////////////////////////////////////////////////////////////////
/* Este kernel SCAN lleva a cabo un exclsuive scan mediante el metodo de Hillis Steele
de manera que movemos los valores a la derecha y ponemos un 0 en el principio para
que sea exclusive. Recibe como parametro de entrada el histograma generado en el paso
anterior, y consigue la distribucion acumulativa*/
__global__ void scan(unsigned int* output, const unsigned int* input, int numBins)
{
//Obtenemos las id del thread con el que vamos a trabajar
int i = threadIdx.x + blockDim.x*blockIdx.x;
int thread = threadIdx.x;
//usamos memoria compartida a travs de la directiva __shared__
extern __shared__ float shArrayS[];
//creamos dos variables auxiliares las cuales valdrn 1 y 0 respectivamente, e irn
//rotando su valor a lo largo del bucle for que realiza el scan
int up = 0;
int down = 1;
//exclusive scan (forzamos un 0 en el principio)
if (thread > 0)
shArrayS[thread] = input[thread - 1];
else
shArrayS[thread] = 0;
// inclusive scan (ponemos el mismo valor que se encuentra en el histograma de entrada)
// temp[tid] = d_in[tid];
__syncthreads();
// con este bucle realizamos el algoritmo de Hillis Steele, rellenando el array en memoria compartida
for (int off = 1; off < numBins; off <<= 1) //usamos operaciones de bits para mejorar notablemente el rendimiento
{
up = 1 - up; // 1 <-> 0
down = 1 - up; // 0 <-> 1
if (thread >= off)
shArrayS[numBins*up + thread] = shArrayS[numBins*down + thread] + shArrayS[numBins*down + thread - off];
else
shArrayS[numBins*up + thread] = shArrayS[numBins*down + thread];
__syncthreads();
}
// una vez calculado, volcamos el array de memoria compartida en nuestro array de salida para devolver la distribucon acumulativa
output[thread] = shArrayS[up*numBins + thread];
}
//////////////////////////////////////////////////////////////////////////
// HISTOGRAMA KERNEL //
//////////////////////////////////////////////////////////////////////////
/* Con este kernel calculamos el histograma dados: el numero de bins, los valores de rango de luminancias y sus valores minimo y maximo
(en concreto, solo nos hara falta el valor minimo y el rango)*/
__global__ void histo(unsigned int* output, const float * input, int numBins, int resolution, float lumMin, float lumRange)
{
//Obtenemos las id del thread con el que vamos a trabajar
int i = threadIdx.x + blockDim.x*blockIdx.x;
int thread = threadIdx.x;
//comprobamos si nos salimos de los limites de la imagen
if (i >= resolution)
return;
//Dividimos el valor del vector de luminancias por el n de bin, dandote el bin en el que cae
int bin = (input[i] - lumMin) / lumRange * numBins;
//suma atomica para que solo haga esta operacion un hilo a la vez (evita condiciones de carrera)
atomicAdd(&(output[bin]), 1);
}
//////////////////////////////////////////////////////////////////////////
// MAX & MIN FUNCTIONS //
//////////////////////////////////////////////////////////////////////////
/* funciones que calculan el maximo y el minimo de dos valores dados, respectivamente.
Sern necesarias para el kernel de calculo de maximo y minimo valor de luminancia*/
//Devuelve el minimo de los dos valores pasados como parametro
__device__ float minimize(float a, float b)
{
if (a < b)
return a;
else
return b;
}
//Devuelve el maximo de los dos valores pasados como parametro
__device__ float maximize(float a, float b)
{
if (a > b)
return a;
else
return b;
}
//////////////////////////////////////////////////////////////////////////
// REDUCE KERNEL //
//////////////////////////////////////////////////////////////////////////
/*Este kernel calculara el valor maximo o minimo del vector de luminancias pasado como parametro.
Para evitar crear dos kernels practicamente iguales, le aadimos un tercer parametro bool llamado "greater",
cuyo valor decidira si lo que estamos calculando es el minimo o el maximo*/
__global__ void reduce(float* output, const float * input, bool greater)
{
//Obtenemos las id del thread con el que vamos a trabajar
int i = threadIdx.x + blockDim.x*blockIdx.x;
int thread = threadIdx.x;
//usamos memoria compartida a travs de la directiva __shared__
extern __shared__ float shArrayR[];
shArrayR[thread] = input[i];
__syncthreads();
//con este bucle realizamos un proceso de reduccion, en el cual el primer elemento
//de nuestro vector en memoria compartida almacenara el valor maximo o minimo
for (int s = blockDim.x >> 1; s > 0; s >>= 1) //usamos operaciones de bits para mejorar notablemente el rendimiento (>>1 es /2) (<<1 es *2)
{
if (thread < s)
{
//si greater es true, calculamos el maximo, si no lo es, calculamos el minimo
if (greater)
shArrayR[thread] = maximize(shArrayR[thread], shArrayR[thread + s]);
else
shArrayR[thread] = minimize(shArrayR[thread], shArrayR[thread + s]);
}
__syncthreads();
}
//cuando estamos en el primer elemento, ya tendra almacenado el valor maximo o minimo, y lo devolvemos
//en nuestro valor de salida
if (thread == 0)
output[blockIdx.x] = shArrayR[0];
}
//////////////////////////////////////////////////////////////////////////
// FUNCIONES //
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
// FUNCION MINMAX //
//////////////////////////////////////////////////////////////////////////
/*Esta funcion recibe como parametro el vector de luminancias, el tamao de la imagen
y greater, que decidira si estamos buscando el minimo o el maximo. La funcion llamara al
kernel reduce para dicha tarea*/
float minmax(const float* const d_logLuminance, int resolution, bool greater)
{
int size = resolution;
//definimos un vector half, que almacenara los valores intermedios del proceso de reduccion
float* half = NULL;
//numero de threads (tamao de bloque)
int blocksize = BLOCK_SIZE;
//numero de bloques (tamao de grid)
int gridsize = ceil(1.0f*size / blocksize);
//cantida en bytes de memoria compartida que usara el kernel
int sharedMemory = blocksize * sizeof(float);
//en este bucle se realizara el proceso de reduccion al completo, a traves de sucesivas llamadas
//al kernel reduce. Saldremos del mismo cuando solo tengamos relleno el elemento 1 del vector
while (true)
{
//definimos vector que contendra los resultados globales de la reduccion
float* deviceResult;
checkCudaErrors(hipMalloc(&deviceResult, gridsize * sizeof(float)));
//si estamos en la primera iteracion, pasamos al kernel el vector completo de luminancias
if (half == NULL)
reduce << <gridsize, blocksize, sharedMemory >> > (deviceResult, d_logLuminance, greater);
//si no, le pasamos el vector que contendra los resultados intermedios de la reduccion "half"
else
reduce << <gridsize, blocksize, sharedMemory >> > (deviceResult, half, greater);
hipDeviceSynchronize();
//vaciamos el vector intermedio y lo llenamos con el nuevo vector de valores intermedios obtenido
if (half != NULL)
checkCudaErrors(hipFree(half));
half = deviceResult;
//comprobamos si hemos llegado al final de la reduccion
if (gridsize == 1)
{
//copiamos el resultado de la reduccion a memoria principal (de CPU)
float hostResult;
checkCudaErrors(hipMemcpy(&hostResult, deviceResult, sizeof(float), hipMemcpyDeviceToHost));
//salimos del bucle y devolvemos el valor maximo o minimo calculado
return hostResult;
}
//actualizamos los valores disminuyendolos en cada iteracion, hasta llegar a 1
size = gridsize;
gridsize = ceil(1.0f*size / blocksize);
}
}
//////////////////////////////////////////////////////////////////////////
// FUNCION HISTOGRAMA //
//////////////////////////////////////////////////////////////////////////
/* Esta funcion recibe como parametros el vector de luminancias, el numero de bins, el tamao, el valor minimo
de dicho vector calculado de la reduccion y el rango (calculado una vez obtenido el valor minimo y maximo) para
calcular el histograma mediante la llamada al kernel de histograma*/
unsigned int* histograma(const float* const d_logLuminance, int numBins, int resolution, float lumMin, float lumRange)
{
//numero de threads (tamao de bloque)
int blocksize = BLOCK_SIZE;
//numero de bloques (tamao de grid)
int gridsize = ceil(1.0f*resolution / blocksize);
//definimos el vector de enteros que contendra el histograma y devolveremos como salida
unsigned int* result;
//reservamos memoria e inicializamos los valores a 0
checkCudaErrors(hipMalloc(&result, numBins * sizeof(unsigned int)));
checkCudaErrors(hipMemset(result, 0, numBins * sizeof(unsigned int)));
//llamamos al kernel para calcular el histograma
histo << <gridsize, blocksize, 0 >> > (result, d_logLuminance, numBins, resolution, lumMin, lumRange);
hipDeviceSynchronize();
return result;
}
//////////////////////////////////////////////////////////////////////////
// FUNCION CALCULATE CDF //
//////////////////////////////////////////////////////////////////////////
/* Funcion principal que obtiene como parametros de entrada los valores de luminancia de la imagen,
el numero de filas y de columnas, y el numero de bins que usaremos para calcular el histograma. Esta
funcion se encargara de llamar a todas las funciones y kernels anteriores para obtener la distribucion
acumulativa siguiendo los pasos del TODO*/
void calculate_cdf(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
/* TODO
1) Encontrar el valor mximo y mnimo de luminancia en min_logLum and max_logLum a partir del canal logLuminance
2) Obtener el rango a representar
3) Generar un histograma de todos los valores del canal logLuminance usando la formula
bin = (Lum [i] - lumMin) / lumRange * numBins
4) Realizar un exclusive scan en el histograma para obtener la distribucin acumulada (cdf)
de los valores de luminancia. Se debe almacenar en el puntero c_cdf
*/
//1) Encontrar el valor mximo y mnimo de luminancia en min_logLum and max_logLum a partir del canal logLuminance
//Definimos el tamao de la imagen
int resolution = numRows * numCols;
// Si la CTE THRUST vale 1, utilizamos las funciones thrust para calcular l mximo y mnimo en lugar de los kernels
if (THRUST)
{
thrust::device_ptr<const float> minLuminance = thrust::device_pointer_cast(d_logLuminance);
thrust::device_vector<float> minLuminanceVector(minLuminance, minLuminance + resolution);
min_logLum = *thrust::min_element(minLuminanceVector.begin(), minLuminanceVector.end());
max_logLum = *thrust::max_element(minLuminanceVector.begin(), minLuminanceVector.end());
}
//llamamos a la funcion minmax dos veces, pasandole un diferente valor para el parametro "greater", para calcular
//el minimo y el maximo valor del vector de luminancias
else
{
min_logLum = minmax(d_logLuminance, resolution, false);
max_logLum = minmax(d_logLuminance, resolution, true);
}
//2) Obtener el rango a representar
//simplemente restamos el valor minimo del vector de luminancias al valor maximo del mismo
//float range = maxim - minim;
float range = max_logLum - min_logLum;
//3) Generar un histograma de todos los valores del canal logLuminance usando la formula
//bin = (Lum[i] - lumMin) / lumRange * numBins
//llamamos a la funcion histograma pasanole el valor minimo y el rango previamente calculados
unsigned int* histoResult = histograma(d_logLuminance, numBins, resolution, min_logLum, range);
//4) Realizar un exclusive scan en el histograma para obtener la distribucin acumulada (cdf)
//de los valores de luminancia.Se debe almacenar en el puntero c_cdf
//Utilizamos la funcion que nos ofrece thrust para realizar el exclusive scan
if (THRUST)
{
thrust::device_ptr<unsigned int> histoResultPTR = thrust::device_pointer_cast(histoResult);
thrust::device_vector<int> histoResultPTRVector(histoResultPTR, histoResultPTR + numBins);
thrust::exclusive_scan(histoResultPTRVector.begin(), histoResultPTRVector.end(), d_cdf);
}
//llamamos directamente a nuestro scan kernel pasandole el histograma previamente calculado
//obteniendo asi el valor final deseado
else
scan << <1, numBins, 2 * numBins * sizeof(unsigned int) >> > (d_cdf, histoResult, numBins);
checkCudaErrors(hipFree(histoResult));
} | f63985fa622720821b13d413f04415821db9d1fb.cu | /* Code completed by Nerea Alamar & Antonio Marco Rodrigo*/
//Librerias de Thrust
#include <thrust/extrema.h>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
//Librerias de Cuda
#include <cuda_runtime.h>
#include <iostream>
#include "device_launch_parameters.h"
//Para evitar el subrayado rojo en Visual por intellisense en __syncthreads y en atomicAdd
#pragma once
#ifdef __INTELLISENSE__
void __syncthreads();
void atomicAdd(unsigned int*, unsigned int);
#endif
//Tamaño de bloque optimo para imagenes de resolución 2x1 (ej: waterfall_bg)
#define BLOCK_SIZE 128;
//Cambiar este valor si se quieren usar los kernels o las funciones thrust:
// 1-> Thrust
// 0-> Kernels
#define THRUST 0
//Usamos checkCudaErrors como en la práctica anterior para detectar fallos de cuda
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != cudaSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << cudaGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
//////////////////////////////////////////////////////////////////////////
// KERNELS //
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
// SCAN KERNEL //
//////////////////////////////////////////////////////////////////////////
/* Este kernel SCAN lleva a cabo un exclsuive scan mediante el metodo de Hillis Steele
de manera que movemos los valores a la derecha y ponemos un 0 en el principio para
que sea exclusive. Recibe como parametro de entrada el histograma generado en el paso
anterior, y consigue la distribucion acumulativa*/
__global__ void scan(unsigned int* output, const unsigned int* input, int numBins)
{
//Obtenemos las id del thread con el que vamos a trabajar
int i = threadIdx.x + blockDim.x*blockIdx.x;
int thread = threadIdx.x;
//usamos memoria compartida a través de la directiva __shared__
extern __shared__ float shArrayS[];
//creamos dos variables auxiliares las cuales valdrán 1 y 0 respectivamente, e irán
//rotando su valor a lo largo del bucle for que realiza el scan
int up = 0;
int down = 1;
//exclusive scan (forzamos un 0 en el principio)
if (thread > 0)
shArrayS[thread] = input[thread - 1];
else
shArrayS[thread] = 0;
// inclusive scan (ponemos el mismo valor que se encuentra en el histograma de entrada)
// temp[tid] = d_in[tid];
__syncthreads();
// con este bucle realizamos el algoritmo de Hillis Steele, rellenando el array en memoria compartida
for (int off = 1; off < numBins; off <<= 1) //usamos operaciones de bits para mejorar notablemente el rendimiento
{
up = 1 - up; // 1 <-> 0
down = 1 - up; // 0 <-> 1
if (thread >= off)
shArrayS[numBins*up + thread] = shArrayS[numBins*down + thread] + shArrayS[numBins*down + thread - off];
else
shArrayS[numBins*up + thread] = shArrayS[numBins*down + thread];
__syncthreads();
}
// una vez calculado, volcamos el array de memoria compartida en nuestro array de salida para devolver la distribucíon acumulativa
output[thread] = shArrayS[up*numBins + thread];
}
//////////////////////////////////////////////////////////////////////////
// HISTOGRAMA KERNEL //
//////////////////////////////////////////////////////////////////////////
/* Con este kernel calculamos el histograma dados: el numero de bins, los valores de rango de luminancias y sus valores minimo y maximo
(en concreto, solo nos hara falta el valor minimo y el rango)*/
__global__ void histo(unsigned int* output, const float * input, int numBins, int resolution, float lumMin, float lumRange)
{
//Obtenemos las id del thread con el que vamos a trabajar
int i = threadIdx.x + blockDim.x*blockIdx.x;
int thread = threadIdx.x;
//comprobamos si nos salimos de los limites de la imagen
if (i >= resolution)
return;
//Dividimos el valor del vector de luminancias por el nº de bin, dandote el bin en el que cae
int bin = (input[i] - lumMin) / lumRange * numBins;
//suma atomica para que solo haga esta operacion un hilo a la vez (evita condiciones de carrera)
atomicAdd(&(output[bin]), 1);
}
//////////////////////////////////////////////////////////////////////////
// MAX & MIN FUNCTIONS //
//////////////////////////////////////////////////////////////////////////
/* funciones que calculan el maximo y el minimo de dos valores dados, respectivamente.
Serán necesarias para el kernel de calculo de maximo y minimo valor de luminancia*/
//Devuelve el minimo de los dos valores pasados como parametro
__device__ float minimize(float a, float b)
{
if (a < b)
return a;
else
return b;
}
//Devuelve el maximo de los dos valores pasados como parametro
__device__ float maximize(float a, float b)
{
if (a > b)
return a;
else
return b;
}
//////////////////////////////////////////////////////////////////////////
// REDUCE KERNEL //
//////////////////////////////////////////////////////////////////////////
/*Este kernel calculara el valor maximo o minimo del vector de luminancias pasado como parametro.
Para evitar crear dos kernels practicamente iguales, le añadimos un tercer parametro bool llamado "greater",
cuyo valor decidira si lo que estamos calculando es el minimo o el maximo*/
__global__ void reduce(float* output, const float * input, bool greater)
{
//Obtenemos las id del thread con el que vamos a trabajar
int i = threadIdx.x + blockDim.x*blockIdx.x;
int thread = threadIdx.x;
//usamos memoria compartida a través de la directiva __shared__
extern __shared__ float shArrayR[];
shArrayR[thread] = input[i];
__syncthreads();
//con este bucle realizamos un proceso de reduccion, en el cual el primer elemento
//de nuestro vector en memoria compartida almacenara el valor maximo o minimo
for (int s = blockDim.x >> 1; s > 0; s >>= 1) //usamos operaciones de bits para mejorar notablemente el rendimiento (>>1 es /2) (<<1 es *2)
{
if (thread < s)
{
//si greater es true, calculamos el maximo, si no lo es, calculamos el minimo
if (greater)
shArrayR[thread] = maximize(shArrayR[thread], shArrayR[thread + s]);
else
shArrayR[thread] = minimize(shArrayR[thread], shArrayR[thread + s]);
}
__syncthreads();
}
//cuando estamos en el primer elemento, ya tendra almacenado el valor maximo o minimo, y lo devolvemos
//en nuestro valor de salida
if (thread == 0)
output[blockIdx.x] = shArrayR[0];
}
//////////////////////////////////////////////////////////////////////////
// FUNCIONES //
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
// FUNCION MINMAX //
//////////////////////////////////////////////////////////////////////////
/*Esta funcion recibe como parametro el vector de luminancias, el tamaño de la imagen
y greater, que decidira si estamos buscando el minimo o el maximo. La funcion llamara al
kernel reduce para dicha tarea*/
float minmax(const float* const d_logLuminance, int resolution, bool greater)
{
int size = resolution;
//definimos un vector half, que almacenara los valores intermedios del proceso de reduccion
float* half = NULL;
//numero de threads (tamaño de bloque)
int blocksize = BLOCK_SIZE;
//numero de bloques (tamaño de grid)
int gridsize = ceil(1.0f*size / blocksize);
//cantida en bytes de memoria compartida que usara el kernel
int sharedMemory = blocksize * sizeof(float);
//en este bucle se realizara el proceso de reduccion al completo, a traves de sucesivas llamadas
//al kernel reduce. Saldremos del mismo cuando solo tengamos relleno el elemento 1 del vector
while (true)
{
//definimos vector que contendra los resultados globales de la reduccion
float* deviceResult;
checkCudaErrors(cudaMalloc(&deviceResult, gridsize * sizeof(float)));
//si estamos en la primera iteracion, pasamos al kernel el vector completo de luminancias
if (half == NULL)
reduce << <gridsize, blocksize, sharedMemory >> > (deviceResult, d_logLuminance, greater);
//si no, le pasamos el vector que contendra los resultados intermedios de la reduccion "half"
else
reduce << <gridsize, blocksize, sharedMemory >> > (deviceResult, half, greater);
cudaDeviceSynchronize();
//vaciamos el vector intermedio y lo llenamos con el nuevo vector de valores intermedios obtenido
if (half != NULL)
checkCudaErrors(cudaFree(half));
half = deviceResult;
//comprobamos si hemos llegado al final de la reduccion
if (gridsize == 1)
{
//copiamos el resultado de la reduccion a memoria principal (de CPU)
float hostResult;
checkCudaErrors(cudaMemcpy(&hostResult, deviceResult, sizeof(float), cudaMemcpyDeviceToHost));
//salimos del bucle y devolvemos el valor maximo o minimo calculado
return hostResult;
}
//actualizamos los valores disminuyendolos en cada iteracion, hasta llegar a 1
size = gridsize;
gridsize = ceil(1.0f*size / blocksize);
}
}
//////////////////////////////////////////////////////////////////////////
// FUNCION HISTOGRAMA //
//////////////////////////////////////////////////////////////////////////
/* Esta funcion recibe como parametros el vector de luminancias, el numero de bins, el tamaño, el valor minimo
de dicho vector calculado de la reduccion y el rango (calculado una vez obtenido el valor minimo y maximo) para
calcular el histograma mediante la llamada al kernel de histograma*/
unsigned int* histograma(const float* const d_logLuminance, int numBins, int resolution, float lumMin, float lumRange)
{
//numero de threads (tamaño de bloque)
int blocksize = BLOCK_SIZE;
//numero de bloques (tamaño de grid)
int gridsize = ceil(1.0f*resolution / blocksize);
//definimos el vector de enteros que contendra el histograma y devolveremos como salida
unsigned int* result;
//reservamos memoria e inicializamos los valores a 0
checkCudaErrors(cudaMalloc(&result, numBins * sizeof(unsigned int)));
checkCudaErrors(cudaMemset(result, 0, numBins * sizeof(unsigned int)));
//llamamos al kernel para calcular el histograma
histo << <gridsize, blocksize, 0 >> > (result, d_logLuminance, numBins, resolution, lumMin, lumRange);
cudaDeviceSynchronize();
return result;
}
//////////////////////////////////////////////////////////////////////////
// FUNCION CALCULATE CDF //
//////////////////////////////////////////////////////////////////////////
/* Funcion principal que obtiene como parametros de entrada los valores de luminancia de la imagen,
el numero de filas y de columnas, y el numero de bins que usaremos para calcular el histograma. Esta
funcion se encargara de llamar a todas las funciones y kernels anteriores para obtener la distribucion
acumulativa siguiendo los pasos del TODO*/
void calculate_cdf(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
/* TODO
1) Encontrar el valor máximo y mínimo de luminancia en min_logLum and max_logLum a partir del canal logLuminance
2) Obtener el rango a representar
3) Generar un histograma de todos los valores del canal logLuminance usando la formula
bin = (Lum [i] - lumMin) / lumRange * numBins
4) Realizar un exclusive scan en el histograma para obtener la distribución acumulada (cdf)
de los valores de luminancia. Se debe almacenar en el puntero c_cdf
*/
//1) Encontrar el valor máximo y mínimo de luminancia en min_logLum and max_logLum a partir del canal logLuminance
//Definimos el tamaño de la imagen
int resolution = numRows * numCols;
// Si la CTE THRUST vale 1, utilizamos las funciones thrust para calcular él máximo y mínimo en lugar de los kernels
if (THRUST)
{
thrust::device_ptr<const float> minLuminance = thrust::device_pointer_cast(d_logLuminance);
thrust::device_vector<float> minLuminanceVector(minLuminance, minLuminance + resolution);
min_logLum = *thrust::min_element(minLuminanceVector.begin(), minLuminanceVector.end());
max_logLum = *thrust::max_element(minLuminanceVector.begin(), minLuminanceVector.end());
}
//llamamos a la funcion minmax dos veces, pasandole un diferente valor para el parametro "greater", para calcular
//el minimo y el maximo valor del vector de luminancias
else
{
min_logLum = minmax(d_logLuminance, resolution, false);
max_logLum = minmax(d_logLuminance, resolution, true);
}
//2) Obtener el rango a representar
//simplemente restamos el valor minimo del vector de luminancias al valor maximo del mismo
//float range = maxim - minim;
float range = max_logLum - min_logLum;
//3) Generar un histograma de todos los valores del canal logLuminance usando la formula
//bin = (Lum[i] - lumMin) / lumRange * numBins
//llamamos a la funcion histograma pasanole el valor minimo y el rango previamente calculados
unsigned int* histoResult = histograma(d_logLuminance, numBins, resolution, min_logLum, range);
//4) Realizar un exclusive scan en el histograma para obtener la distribución acumulada (cdf)
//de los valores de luminancia.Se debe almacenar en el puntero c_cdf
//Utilizamos la funcion que nos ofrece thrust para realizar el exclusive scan
if (THRUST)
{
thrust::device_ptr<unsigned int> histoResultPTR = thrust::device_pointer_cast(histoResult);
thrust::device_vector<int> histoResultPTRVector(histoResultPTR, histoResultPTR + numBins);
thrust::exclusive_scan(histoResultPTRVector.begin(), histoResultPTRVector.end(), d_cdf);
}
//llamamos directamente a nuestro scan kernel pasandole el histograma previamente calculado
//obteniendo asi el valor final deseado
else
scan << <1, numBins, 2 * numBins * sizeof(unsigned int) >> > (d_cdf, histoResult, numBins);
checkCudaErrors(cudaFree(histoResult));
} |
8701a424987bcafdfc6aa3b2334d79fb2d3284af.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <math_functions.h>
#define N 2//64
__global__ void foo(float *x, float y) {
x[threadIdx.x] = __exp10f(y); // pow(10,y), in this case pow(10,2) = 100
}
int main(void){
float *A;
float *dev_A;
float size= N*sizeof(float);
A=(float*)malloc(size);
hipMalloc((void**)&dev_A, size);
hipMemcpy(dev_A, A, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( foo), dim3(1),dim3(N), 0, 0, dev_A, 2);
//ESBMC_verify_kernel_f(foo, 1, N, dev_A, 2);
hipMemcpy(A, dev_A, size, hipMemcpyDeviceToHost);
printf("\n");
for(int t=0; t<N; t++){
printf("%.1f ", A[t]);
assert (A[t] != 100);
}
hipFree(dev_A);
free(A);
return 0;
}
| 8701a424987bcafdfc6aa3b2334d79fb2d3284af.cu | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <math_functions.h>
#define N 2//64
__global__ void foo(float *x, float y) {
x[threadIdx.x] = __exp10f(y); // pow(10,y), in this case pow(10,2) = 100
}
int main(void){
float *A;
float *dev_A;
float size= N*sizeof(float);
A=(float*)malloc(size);
cudaMalloc((void**)&dev_A, size);
cudaMemcpy(dev_A, A, size, cudaMemcpyHostToDevice);
foo<<<1,N>>>(dev_A, 2);
//ESBMC_verify_kernel_f(foo, 1, N, dev_A, 2);
cudaMemcpy(A, dev_A, size, cudaMemcpyDeviceToHost);
printf("\n");
for(int t=0; t<N; t++){
printf("%.1f ", A[t]);
assert (A[t] != 100);
}
cudaFree(dev_A);
free(A);
return 0;
}
|
93725028b19a246334b652c455ce54aaa2c0f4eb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sumKernel.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
float *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
int batchSize = XSIZE*YSIZE;
int numberEntries = 1;
int numberIterations = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sumKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,result,batchSize,numberEntries,numberIterations);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sumKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,result,batchSize,numberEntries,numberIterations);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sumKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, input,result,batchSize,numberEntries,numberIterations);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 93725028b19a246334b652c455ce54aaa2c0f4eb.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sumKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
float *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
int batchSize = XSIZE*YSIZE;
int numberEntries = 1;
int numberIterations = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sumKernel<<<gridBlock,threadBlock>>>(input,result,batchSize,numberEntries,numberIterations);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sumKernel<<<gridBlock,threadBlock>>>(input,result,batchSize,numberEntries,numberIterations);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sumKernel<<<gridBlock,threadBlock>>>(input,result,batchSize,numberEntries,numberIterations);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.